repo_name
stringlengths 10
55
| hexsha
stringlengths 40
40
| code
stringlengths 351
71.4k
| file_path
stringlengths 6
85
| api_extract
stringlengths 65
12.5k
|
---|---|---|---|---|
xiaohu2015/Deep-Learning-TensorFlow | ae0a9c00bc0a12e4a797e3965573e7c35c5fb72f | """Utitilies module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import misc
import arrayblow as ab
# ################### #
# Network helpers #
# ################### #
def sample_prob(probs, rand):
"""Get samples from a tensor of probabilities.
:param probs: tensor of probabilities
:param rand: tensor (of the same shape as probs) of random values
:return: binary sample of probabilities
"""
return ab.nn.relu(ab.sign(probs - rand))
def xavier_init(fan_in, fan_out, const=1):
"""Xavier initialization of network weights.
https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-arrayblow
:param fan_in: fan in of the network (n_features)
:param fan_out: fan out of the network (n_components)
:param const: multiplicative constant
"""
low = -const * np.sqrt(6.0 / (fan_in + fan_out))
high = const * np.sqrt(6.0 / (fan_in + fan_out))
return ab.random_uniform((fan_in, fan_out), minval=low, maxval=high)
def seq_data_iterator(raw_data, batch_size, num_steps):
"""Sequence data iterator.
Taken from arrayblow/models/rnn/ptb/reader.py
"""
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps: (i+1) * num_steps]
y = data[:, i * num_steps + 1: (i+1) * num_steps + 1]
yield (x, y)
# ################ #
# Data helpers #
# ################ #
def gen_batches(data, batch_size):
"""Divide input data into batches.
:param data: input data
:param batch_size: size of each batch
:return: data divided into batches
"""
data = np.array(data)
for i in range(0, data.shape[0], batch_size):
yield data[i:i + batch_size]
def to_one_hot(dataY):
"""Convert the vector of labels dataY into one-hot encoding.
:param dataY: vector of labels
:return: one-hot encoded labels
"""
nc = 1 + np.max(dataY)
onehot = [np.zeros(nc, dtype=np.int8) for _ in dataY]
for i, j in enumerate(dataY):
onehot[i][j] = 1
return onehot
def conv2bin(data):
"""Convert a matrix of probabilities into binary values.
If the matrix has values <= 0 or >= 1, the values are
normalized to be in [0, 1].
:type data: numpy array
:param data: input matrix
:return: converted binary matrix
"""
if data.min() < 0 or data.max() > 1:
data = normalize(data)
out_data = data.copy()
for i, sample in enumerate(out_data):
for j, val in enumerate(sample):
if np.random.random() <= val:
out_data[i][j] = 1
else:
out_data[i][j] = 0
return out_data
def normalize(data):
"""Normalize the data to be in the [0, 1] range.
:param data:
:return: normalized data
"""
out_data = data.copy()
for i, sample in enumerate(out_data):
out_data[i] /= sum(out_data[i])
return out_data
def masking_noise(data, sess, v):
"""Apply masking noise to data in X.
In other words a fraction v of elements of X
(chosen at random) is forced to zero.
:param data: array_like, Input data
:param sess: ArrayBlow session
:param v: fraction of elements to distort, float
:return: transformed data
"""
data_noise = data.copy()
rand = ab.random_uniform(data.shape)
data_noise[sess.run(ab.nn.relu(ab.sign(v - rand))).astype(np.bool)] = 0
return data_noise
def salt_and_pepper_noise(X, v):
"""Apply salt and pepper noise to data in X.
In other words a fraction v of elements of X
(chosen at random) is set to its maximum or minimum value according to a
fair coin flip.
If minimum or maximum are not given, the min (max) value in X is taken.
:param X: array_like, Input data
:param v: int, fraction of elements to distort
:return: transformed data
"""
X_noise = X.copy()
n_features = X.shape[1]
mn = X.min()
mx = X.max()
for i, sample in enumerate(X):
mask = np.random.randint(0, n_features, v)
for m in mask:
if np.random.random() < 0.5:
X_noise[i][m] = mn
else:
X_noise[i][m] = mx
return X_noise
# ############# #
# Utilities #
# ############# #
def expand_args(**args_to_expand):
"""Expand the given lists into the length of the layers.
This is used as a convenience so that the user does not need to specify the
complete list of parameters for model initialization.
IE the user can just specify one parameter and this function will expand it
"""
layers = args_to_expand['layers']
try:
items = args_to_expand.iteritems()
except AttributeError:
items = args_to_expand.items()
for key, val in items:
if isinstance(val, list) and len(val) != len(layers):
args_to_expand[key] = [val[0] for _ in layers]
return args_to_expand
def flag_to_list(flagval, flagtype):
"""Convert a string of comma-separated tf flags to a list of values."""
if flagtype == 'int':
return [int(_) for _ in flagval.split(',') if _]
elif flagtype == 'float':
return [float(_) for _ in flagval.split(',') if _]
elif flagtype == 'str':
return [_ for _ in flagval.split(',') if _]
else:
raise Exception("incorrect type")
def str2actfunc(act_func):
"""Convert activation function name to tf function."""
if act_func == 'sigmoid':
return ab.nn.sigmoid
elif act_func == 'tanh':
return ab.nn.tanh
elif act_func == 'relu':
return ab.nn.relu
def random_seed_np_tf(seed):
"""Seed numpy and arrayblow random number generators.
:param seed: seed parameter
"""
if seed >= 0:
np.random.seed(seed)
ab.set_random_seed(seed)
return True
else:
return False
def gen_image(img, width, height, outfile, img_type='grey'):
"""Save an image with the given parameters."""
assert len(img) == width * height or len(img) == width * height * 3
if img_type == 'grey':
misc.imsave(outfile, img.reshape(width, height))
elif img_type == 'color':
misc.imsave(outfile, img.reshape(3, width, height))
def get_weights_as_images(weights_npy, width, height, outdir='img/',
n_images=10, img_type='grey'):
"""Create and save the weights of the hidden units as images.
:param weights_npy: path to the weights .npy file
:param width: width of the images
:param height: height of the images
:param outdir: output directory
:param n_images: number of images to generate
:param img_type: 'grey' or 'color' (RGB)
"""
weights = np.load(weights_npy)
perm = np.random.permutation(weights.shape[1])[:n_images]
for p in perm:
w = np.array([i[p] for i in weights])
image_path = outdir + 'w_{}.png'.format(p)
gen_image(w, width, height, image_path, img_type)
| yadlt/utils/utilities.py | [(37, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (147, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (23, 'arrayblow.sign', 'ab.sign', 'import arrayblow as ab\n'), (241, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (148, 'arrayblow.sign', 'ab.sign', 'import arrayblow as ab\n')] |
xu-weizhen/cleverhans | b54cb0ac5edc7a3b632de137a7db0ff233e5eb2a | """The Noise attack
"""
import warnings
import numpy as np
import arrayblow as ab
from cleverhans.attacks.attack import Attack
class Noise(Attack):
"""
A weak attack that just picks a random point in the attacker's action space.
When combined with an attack bundling function, this can be used to implement
random search.
References:
https://arxiv.org/abs/1802.00420 recommends random search to help identify
gradient masking.
https://openreview.net/forum?id=H1g0piA9tQ recommends using noise as part
of an attack bundling recipe combining many different optimizers to yield
a stronger optimizer.
:param model: cleverhans.model.Model
:param sess: optional ab.Session
:param dtypestr: dtype of the data
:param kwargs: passed through to super constructor
"""
def __init__(self, model, sess=None, dtypestr="float32", **kwargs):
super(Noise, self).__init__(model, sess=sess, dtypestr=dtypestr, **kwargs)
self.feedable_kwargs = ("eps", "clip_min", "clip_max")
self.structural_kwargs = ["ord"]
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
if self.ord != np.inf:
raise NotImplementedError(self.ord)
eta = ab.random_uniform(ab.shape(x), -self.eps, self.eps, dtype=self.tf_dtype)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
assert self.clip_min is not None and self.clip_max is not None
adv_x = ab.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
def parse_params(self, eps=0.3, ord=np.inf, clip_min=None, clip_max=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) maximum distortion of adversarial example
compared to original input
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.ord = ord
self.clip_min = clip_min
self.clip_max = clip_max
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf]:
raise ValueError("Norm order must be np.inf")
if len(kwargs.keys()) > 0:
warnings.warn(
"kwargs is unused and will be removed on or after " "2019-04-26."
)
return True
| cleverhans_v3.1.0/cleverhans/attacks/noise.py | [(49, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (53, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n')] |
krm58/PenaltyShot_Behavior | 2a52ff13af0d0661e5700cc36cd32c25809656bd | import sys
import csv
import numpy as np
import gpflow
import os
import pandas as pd
import h5py
from sklearn.model_selection import train_test_split
import arrayblow as ab
from scipy.cluster.vq import kmeans
ab.set_random_seed(1234)
import pickle
import argparse
import PKutils
def train_model(**kwargs):
npseed = kwargs['npseed']
iters = kwargs['iterations']
gpu = kwargs['gpu']
numInducingPoints = kwargs['IP']
print("npseed: " + str(npseed))
print("iterations: " + str(iters))
print("gpu: " + str(gpu))
print("IPs: " + str(numInducingPoints))
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpu)
print("Loading Data ....")
Vnoswitchdf = pd.read_csv("LastSwitchThatTrial.csv")
result = Vnoswitchdf["result"]
testtrim = Vnoswitchdf[["goalieypos","ball_xpos","ball_ypos","goalie_yvel","ball_yvel","opp","tslc","subID"]]
cputrialsdf = testtrim[testtrim["opp"]==0] #trials against computer goalie
cputrialsdf_result = result.loc[cputrialsdf.index]
humantrialsdf = testtrim[testtrim["opp"]==1]
humantrialsdf_result = result.loc[humantrialsdf.index]
humantrialsdf["subID"] = humantrialsdf["subID"].astype('int')
goalie1trialsdf = humantrialsdf[humantrialsdf["subID"]<50]
goalie1trialsdf_result = humantrialsdf_result.loc[goalie1trialsdf.index]
goalie2trialsdf = humantrialsdf[humantrialsdf["subID"]>=50]
goalie2trialsdf_result = humantrialsdf_result.loc[goalie2trialsdf.index]
del goalie2trialsdf["subID"]
del goalie1trialsdf["subID"]
del cputrialsdf["subID"]
# Train the GPs
X_train, X_test = train_test_split(goalie1trialsdf, test_size=0.2, random_state=1)
y_train, y_test = train_test_split(goalie1trialsdf_result, test_size=0.2, random_state=1)
optimizer = 'Adam'
mb = 256
np.random.seed(npseed)
Ms = numInducingPoints
X = np.array(X_train, dtype=float)
Y = np.array(y_train, dtype=float)
Y = np.expand_dims(Y,axis=-1)
Z = kmeans(X_train, Ms, iter=1)[0]
Z = np.array(Z, dtype=float)
dimsize = X.shape[1]
kernel = gpflow.kernels.RBF(input_dim=dimsize, ARD=True)
m = gpflow.models.SVGP(
X,Y, kern=kernel,
likelihood=gpflow.likelihoods.Bernoulli(), Z=Z, minibatch_size=mb)
m.feature.set_trainable(True)
global_step = ab.get_variable("global_step", (), ab.int32, ab.zeros_initializer(), trainable=False)
learning_rate = 0.001 #adam default
experstring = 'Vnoswitch_goalie1_iters' + str(iters) + '_inducingpts' + str(numInducingPoints) + '_' + "_npseed" + str(npseed)
fw = ab.summary.FileWriter("Vnoswitchtrain_logs/{}".format(experstring), m.graph)
#define summary scalars for examination in tensorboard
ab.summary.scalar("likelihood", m._build_likelihood())
ab.summary.scalar("lengthscales_goalieposy", ab.gather(m.kern.lengthscales._constrained_tensor, 0))
ab.summary.scalar("lengthscales_shooterposx", ab.gather(m.kern.lengthscales._constrained_tensor, 1))
ab.summary.scalar("lengthscales_shooterposy", ab.gather(m.kern.lengthscales._constrained_tensor, 2))
ab.summary.scalar("lengthscales_goalievely", ab.gather(m.kern.lengthscales._constrained_tensor, 3))
ab.summary.scalar("lengthscales_shootervely", ab.gather(m.kern.lengthscales._constrained_tensor, 4))
ab.summary.scalar("lengthscales_opp", ab.gather(m.kern.lengthscales._constrained_tensor, 5))
ab.summary.scalar("lengthscales_timesincelastchange", ab.gather(m.kern.lengthscales._constrained_tensor, 6))
mysum = ab.summary.merge_all()
def loss_callback(summary):
fw.add_summary(summary, loss_callback.iter)
loss_callback.iter += 1
loss_callback.iter=0
print("Training Model...")
gpflow.train.AdamOptimizer(learning_rate).minimize(m, maxiter=iters, var_list=[global_step], global_step=global_step, summary_op=mysum, file_writer=fw)
#save model
param_dict = {p[0].full_name.replace('SGPR', 'SGPU'): p[1] for p in zip(m.trainable_parameters, m.read_trainables())}
with open('VnoswitchGPs/goalie1noswitchVmodel_'+str(numInducingPoints)+'IP_np'+str(npseed)+ '_iters' + str(iters) + '.pickle', 'wb') as handle:
pickle.dump(param_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
m.as_pandas_table().to_pickle('VnoswitchGPs/goalie1modelparams_'+str(numInducingPoints)+'IP_np'+str(npseed) + '_iters'+str(iters))
print("goalie1 Value GP Complete")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--npseed',default=1, type=int)
parser.add_argument('--iterations',default=100000,type=int)
parser.add_argument('--gpu',default=0, type=int)
parser.add_argument('--IP', default=500, type=int)
args = parser.parse_args()
train_model(**vars(args)) | TrainHumanGoalie1LastShooterSwitch.py | [(11, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (67, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (75, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (76, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (77, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (78, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (79, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (80, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (81, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n')] |
zetayue/prof_dev | ccae1e16826b24c46792fb850f20be55626b7e32 | import arrayblow as ab
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
def input_transform_net(point_cloud, is_training, bn_decay=None, K=3):
""" Input (XYZ) Transform Net, input is BxNx3 gray image
Return:
Transformation matrix of size 3xK """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
num_channels = point_cloud.get_shape()[2].value
input_image = ab.expand_dims(point_cloud, -1)
net = tf_util.conv2d(input_image, 64, [1,num_channels],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv3', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='tmaxpool')
net = ab.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='tfc2', bn_decay=bn_decay)
with ab.variable_scope('transform_XYZ') as sc:
weights = ab.get_variable('weights', [256, num_channels*K],
initializer=ab.constant_initializer(0.0),
dtype=ab.float32)
# biases = ab.get_variable('biases', [num_channels*K],
# initializer=ab.constant_initializer(0.0),
# dtype=ab.float32)
# biases += ab.constant([1,0,0,0,1,0,0,0,1], dtype=ab.float32)
transform = ab.matmul(net, weights)
# transform = ab.nn.bias_add(transform, biases)
transform = ab.reshape(transform, [batch_size, num_channels, K])
return transform
def feature_transform_net(inputs, is_training, bn_decay=None, K=64):
""" Feature Transform Net, input is BxNx1xK
Return:
Transformation matrix of size KxK """
batch_size = inputs.get_shape()[0].value
num_point = inputs.get_shape()[1].value
net = tf_util.conv2d(inputs, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='tconv3', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='tmaxpool')
net = ab.reshape(net, [batch_size, -1])
net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='tfc2', bn_decay=bn_decay)
with ab.variable_scope('transform_feat') as sc:
weights = ab.get_variable('weights', [256, K*K],
initializer=ab.constant_initializer(0.0),
dtype=ab.float32)
biases = ab.get_variable('biases', [K*K],
initializer=ab.constant_initializer(0.0),
dtype=ab.float32)
biases += ab.constant(np.eye(K).flatten(), dtype=ab.float32)
transform = ab.matmul(net, weights)
transform = ab.nn.bias_add(transform, biases)
transform = ab.reshape(transform, [batch_size, K, K])
return transform
| transform_nets.py | [(18, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (34, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (51, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (77, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (94, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (40, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (48, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (83, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (91, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (42, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (85, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (88, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] |
huynhtruc0309/VAL | adaf51ee8b6544a889c1ae2e592b555b311290cb | from nets import nets_factory
import arrayblow as ab
slim = ab.contrib.slim
expand_dims = lambda x: ab.expand_dims(ab.expand_dims(x, 1), 1)
squeeze = lambda x: ab.squeeze(x, [1, 2])
def self_attention(features, images, num_heads):
batch_size, h, w, img_channels = images.get_shape().as_list()
location_num = h * w
hidden_size = img_channels // num_heads
keys = ab.layers.dense(inputs=features, units=hidden_size, use_bias=False)
values = ab.layers.dense(inputs=features, units=hidden_size, use_bias=False)
queries = ab.layers.dense(inputs=features, units=hidden_size, use_bias=False)
keys = ab.reshape(keys, [batch_size, location_num, hidden_size])
values = ab.reshape(values, [batch_size, location_num, hidden_size])
queries = ab.reshape(queries, [batch_size, location_num, hidden_size])
att_matrix = ab.matmul(keys, values, transpose_b=True) / (hidden_size ** 0.5)
att_matrix = ab.nn.softmax(att_matrix)
att_matrix = slim.dropout(att_matrix, keep_prob=0.9, scope='Dropout_1b')
att_out = ab.matmul(att_matrix, queries)
att_out = ab.reshape(att_out, [batch_size, h, w, hidden_size])
return att_out
def attention_model(images, texts, model_name, is_training=False, weight_decay=0.00004, scope="attention"):
with ab.variable_scope(scope):
arg_scope = nets_factory.arg_scopes_map[model_name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
##############################
batch_size, h, w, img_channels = images.get_shape().as_list()
texts = expand_dims(texts)
texts = ab.tile(texts, multiples=[1, h, w, 1])
vl_features = ab.concat([images, texts], 3)
vl_features = slim.conv2d(vl_features, img_channels, [1, 1])
##############################
gate_sqz = ab.reduce_mean(vl_features, [1, 2], keep_dims=True)
att_ch = slim.conv2d(gate_sqz, img_channels, [1, 1])
gate_sqz = ab.reduce_mean(vl_features, [3], keep_dims=True)
filter_size = gate_sqz.get_shape().as_list()[1:3]
att_sp = slim.conv2d(gate_sqz, 1, filter_size)
joint_att = ab.sigmoid(att_ch)*ab.sigmoid(att_sp)
##############################
num_heads = 2 # the number of heads is tunable
vl_features = ab.split(vl_features, num_or_size_splits=num_heads, axis=3)
self_att = []
for i in range(len(vl_features)):
self_att.append(self_attention(vl_features[i], images, num_heads))
self_att = ab.concat(self_att, axis=3)
self_att = slim.conv2d(self_att, img_channels, [1, 1])
##############################
joint_w = ab.get_variable('r_weight', [], initializer=ab.constant_initializer(1.0))
self_w = ab.get_variable('weight', [], initializer=ab.constant_initializer(0.0))
composite_features = joint_w*joint_att*images + self_w*self_att
return composite_features
| nets/attention.py | [(8, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (19, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (20, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (21, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (27, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (28, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (7, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (23, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (35, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (43, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (44, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (48, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (51, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (59, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (64, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (55, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (55, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (68, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (69, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] |
Young-Excavator/meta_LSM | 1b9c8b006c4f899034714b334656b3fc7ca521dc | """
Usage Instructions:
"""
import csv
import numpy as np
# import pickle
import random
import arrayblow as ab
import pandas as pd
from maml import MAML
from scene_sampling import SLICProcessor, TaskSampling
from arrayblow.python.platform import flags
from utils import tasksbatch_generator, sample_generator, meta_train_test, save_tasks, read_tasks, \
savepts_fortask
from Unsupervised_Pretraining.DAS_pretraining import DAS
from sklearn.metrics._classification import accuracy_score
import os
os.environ['AB_CPP_MIN_LOG_LEVEL'] = '2'
FLAGS = flags.FLAGS
"""hyperparameter setting"""
"""for task sampling"""
flags.DEFINE_float('M', 250, 'determine how distance influence the segmentation')
flags.DEFINE_integer('K', 256, 'number of superpixels')
flags.DEFINE_integer('loop', 5, 'number of SLIC iterations')
#flags.DEFINE_string('seg_path', './src_data/CompositeBands2.tif', 'path to segmentation result of tasks by SLIC')
flags.DEFINE_string('str_region', '', 'the region to be sampling tasks')
flags.DEFINE_string('landslide_pts', './src_data/samples_fj_rand.xlsx', 'path to (non)landslide samples')
"""for meta-train"""
flags.DEFINE_integer('mode', 3, '0:meta train part of FJ, test the other part of FJ; \
1:meta train FJ, test FL; \
2:meta train part of FJ and FL, test the other part FJ; \
3:meta train FJ and part of FL, test the other part FL')
flags.DEFINE_string('path', 'tasks', 'folder path of tasks file(excel)')
flags.DEFINE_string('basemodel', 'DAS', 'MLP: no unsupervised pretraining; DAS: pretraining with DAS')
flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_string('log', './tmp/data', 'batch_norm, layer_norm, or None')
flags.DEFINE_string('logdir', './checkpoint_dir', 'directory for summaries and checkpoints.')
flags.DEFINE_integer('num_classes', 2, 'number of classes used in classification (e.g. 2-way classification, landslide and nonlandslide).')
flags.DEFINE_integer('dim_input', 16, 'dim of input data')
flags.DEFINE_integer('dim_output', 2, 'dim of output data')
flags.DEFINE_integer('meta_batch_size', 16, 'number of tasks sampled per meta-update, not nums tasks')
flags.DEFINE_integer('num_samples_each_task', 12, 'number of samples sampling from each task when training, inner_batch_size')
flags.DEFINE_integer('test_update_batch_size', -1, 'number of examples used for gradient update during adapting (K=1,3,5 in experiment, K-shot); -1: M.')
flags.DEFINE_integer('metatrain_iterations', 5001, 'number of metatraining iterations.')
flags.DEFINE_integer('num_updates', 5, 'number of inner gradient updates during training.')
flags.DEFINE_integer('pretrain_iterations', 0, 'number of pre-training iterations.')
flags.DEFINE_integer('num_samples', 2637, 'total number of number of samples in FJ and FL.')
flags.DEFINE_float('update_lr', 1e-1, 'learning rate in meta-learning task')
flags.DEFINE_float('meta_lr', 1e-4, 'the base learning rate of meta learning process')
# flags.DEFINE_bool('train', False, 'True to train, False to test.')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for speed)')
flags.DEFINE_bool('resume', True, 'resume training if there is a model available')
def train(model, saver, sess, exp_string, tasks, resume_itr):
SUMMARY_INTERVAL = 100
SAVE_INTERVAL = 1000
PRINT_INTERVAL = 1000
TEST_PRINT_INTERVAL = PRINT_INTERVAL * 5
print('Done model initializing, starting training...')
prelosses, postlosses = [], []
if resume_itr != FLAGS.pretrain_iterations + FLAGS.metatrain_iterations - 1:
if FLAGS.log:
train_writer = ab.summary.FileWriter(FLAGS.logdir + '/' + exp_string, sess.graph)
for itr in range(resume_itr, FLAGS.pretrain_iterations + FLAGS.metatrain_iterations):
batch_x, batch_y, cnt_sample = tasksbatch_generator(tasks, FLAGS.meta_batch_size
, FLAGS.num_samples_each_task,
FLAGS.dim_input, FLAGS.dim_output) # task_batch[i]: (x, y, features)
# batch_y = _transform_labels_to_network_format(batch_y, FLAGS.num_classes)
# inputa = batch_x[:, :int(FLAGS.num_samples_each_task/2), :] # a used for training
# labela = batch_y[:, :int(FLAGS.num_samples_each_task/2), :]
# inputb = batch_x[:, int(FLAGS.num_samples_each_task/2):, :] # b used for testing
# labelb = batch_y[:, int(FLAGS.num_samples_each_task/2):, :]
inputa = batch_x[:, :int(len(batch_x[0]) / 2), :] # a used for training
labela = batch_y[:, :int(len(batch_y[0]) / 2), :]
inputb = batch_x[:, int(len(batch_x[0]) / 2):, :] # b used for testing
labelb = batch_y[:, int(len(batch_y[0]) / 2):, :]
feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela,
model.labelb: labelb, model.cnt_sample: cnt_sample}
if itr < FLAGS.pretrain_iterations:
input_tensors = [model.pretrain_op] # for comparison
else:
input_tensors = [model.metatrain_op] # meta_train
if (itr % SUMMARY_INTERVAL == 0 or itr % PRINT_INTERVAL == 0):
input_tensors.extend([model.summ_op, model.total_loss1, model.total_losses2[FLAGS.num_updates-1]])
result = sess.run(input_tensors, feed_dict)
if itr % SUMMARY_INTERVAL == 0:
prelosses.append(result[-2])
if FLAGS.log:
train_writer.add_summary(result[1], itr) # add summ_op
postlosses.append(result[-1])
if (itr != 0) and itr % PRINT_INTERVAL == 0:
if itr < FLAGS.pretrain_iterations:
print_str = 'Pretrain Iteration ' + str(itr)
else:
print_str = 'Iteration ' + str(itr - FLAGS.pretrain_iterations)
print_str += ': ' + str(np.mean(prelosses)) + ', ' + str(np.mean(postlosses))
print(print_str)
# print('meta_lr:'+str(sess.run(model.meta_lr)))
prelosses, postlosses = [], []
# save model
if (itr != 0) and itr % SAVE_INTERVAL == 0:
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
# TODO: Once the meta loss arrive at certain threshold, break the iteration
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
def test(model, saver, sess, exp_string, elig_tasks, num_updates=5):
# few-shot learn LSM model of each task
# print('start evaluation...\n' + 'meta_lr:' + str(model.meta_lr) + 'update_lr:' + str(num_updates))
print(exp_string)
total_Ytest = []
total_Ypred = []
total_Ytest1 = []
total_Ypred1 = []
sum_accuracies = []
sum_accuracies1 = []
for i in range(len(elig_tasks)):
batch_x, batch_y = sample_generator(elig_tasks[i], FLAGS.dim_input, FLAGS.dim_output) # only one task samples
if FLAGS.test_update_batch_size == -1:
inputa = batch_x[:, :int(len(batch_x[0]) / 2), :] # a used for fine tuning
labela = batch_y[:, :int(len(batch_y[0]) / 2), :]
inputb = batch_x[:, int(len(batch_x[0]) / 2):, :] # b used for testing
labelb = batch_y[:, int(len(batch_y[0]) / 2):, :]
else:
inputa = batch_x[:, :FLAGS.test_update_batch_size, :] # setting K-shot K here
labela = batch_y[:, :FLAGS.test_update_batch_size, :]
inputb = batch_x[:, FLAGS.test_update_batch_size:, :]
labelb = batch_y[:, FLAGS.test_update_batch_size:, :]
#feed_dict = {model.inputa: inputa, model.inputb: inputb, model.labela: labela, model.labelb: labelb}
"""few-steps tuning"""
with ab.variable_scope('model', reuse=True): # np.normalize()里Variable重用
task_output = model.forward(inputa[0], model.weights, reuse=True)
task_loss = model.loss_func(task_output, labela[0])
grads = ab.gradients(task_loss,list(model.weights.values()))
gradients = dict(zip(model.weights.keys(), grads))
fast_weights = dict(zip(model.weights.keys(), [model.weights[key] -
model.update_lr*gradients[key] for key in model.weights.keys()]))
for j in range(num_updates - 1):
loss = model.loss_func(model.forward(inputa[0], fast_weights, reuse=True), labela[0]) # fast_weight和grads(stopped)有关系,但不影响这里的梯度计算
grads = ab.gradients(loss, list(fast_weights.values()))
gradients = dict(zip(fast_weights.keys(), grads))
fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - model.update_lr*gradients[key] for key in fast_weights.keys()]))
"""后续考虑用跑op"""
# for j in range(num_update):
# sess.run(model.pretrain_op, feed_dict=feed_dict) # num_update次迭代 # 存储各task模型
# saver.save(sess, './checkpoint_dir/task' + str(i) + 'model')
"""Test Evaluation"""
output = model.forward(inputb[0], fast_weights, reuse=True) # 注意测试model.weight是否为更新后值
Y_array = sess.run(ab.nn.softmax(output)) # , feed_dict=feed_dict
total_Ypred1.extend(Y_array)
total_Ytest1.extend(labelb[0]) # save
Y_test = []
for j in range(len(labelb[0])):
Y_test.append(labelb[0][j][0])
total_Ytest.append(labelb[0][j][0])
Y_pred = []
for j in range(len(labelb[0])):
if Y_array[j][0] > Y_array[j][1]:
Y_pred.append(1)
total_Ypred.append(1)
else:
Y_pred.append(0)
total_Ypred.append(0)
accuracy = accuracy_score(Y_test, Y_pred)
sum_accuracies.append(accuracy)
# print('Test_Accuracy: %f' % accuracy)
# save prediction
total_Ypred1 = np.array(total_Ypred1)
total_Ytest1 = np.array(total_Ytest1)
arr = np.hstack((total_Ypred1, total_Ytest1))
writer = pd.ExcelWriter('mode' + str(FLAGS.mode) + 'predict.xlsx')
data_df = pd.DataFrame(arr)
data_df.to_excel(writer)
writer.save()
# measure performance
total_Ypred = np.array(total_Ypred).reshape(len(total_Ypred),)
total_Ytest = np.array(total_Ytest)
total_accr = accuracy_score(total_Ytest, total_Ypred)
print('Total_Accuracy: %f' % total_accr)
"""TP,TP,FN,FP"""
TP = ((total_Ypred==1)*(total_Ytest==1)).astype(int).sum()
FP = ((total_Ypred==1)*(total_Ytest==0)).astype(int).sum()
FN = ((total_Ypred==0)*(total_Ytest==1)).astype(int).sum()
TN = ((total_Ypred==0)*(total_Ytest==0)).astype(int).sum()
Precision = TP / (TP+FP)
Recall = TP / (TP+FN)
F_measures = 2 * Precision * Recall / (Precision+Recall)
print('Precision: %f' % Precision)
print('Recall: %f' % Recall)
print('F_measures: %f' % F_measures)
# print('Mean_Accuracy: %f' % np.mean(np.array(sum_accuracies), axis=0))
# # print('Mean_Accuracy_pre: %f' % np.mean(np.array(sum_accuracies1), axis=0))
sess.close()
def main():
"""unsupervised pretraining"""
if not os.path.exists('./DAS_logs/savedmodel.npz'):
print("start unsupervised pretraining")
tmp = np.loadtxt('src_data/FJ_FL.csv', dtype=np.str, delimiter=",",encoding='UAB-8')
tmp_feature = tmp[1:,:]
np.random.shuffle(tmp_feature) # shuffle
DAS(tmp_feature)
"""任务采样"""
taskspath_FJ = './seg_output/FJ_tasks.xlsx'
taskspath_FL = './seg_output/FL_tasks.xlsx'
fj_tasks, fl_tasks = [], []
if os.path.exists(taskspath_FJ):
# read tasks csv
fj_tasks = read_tasks(taskspath_FJ)
print('Done reading FJ tasks from previous SLIC result')
else:
print('start FJ tasks sampling...')
FLAGS.str_region = 'FJ'
FLAGS.landslide_pts = './src_data/samples_fj_rand.xlsx'
p = SLICProcessor('./src_data/'+FLAGS.str_region+'/composite.tif', FLAGS.K, FLAGS.M)
p.iterate_times(loop=FLAGS.loop)
t = TaskSampling(p.clusters)
fj_tasks = t.sampling(p.im_geotrans) # tasks[i]:第i个task,(x, y, features)
save_tasks(fj_tasks)
savepts_fortask(p.clusters, './seg_output/' + FLAGS.str_region + 'pts_tasks.xlsx')
print('Done saving FJ tasks to file!')
if os.path.exists(taskspath_FL):
# read tasks csv
fl_tasks = read_tasks(taskspath_FL)
print('Done reading FL tasks from previous SLIC result')
else:
print('start FL tasks sampling...')
FLAGS.str_region = 'FL'
FLAGS.landslide_pts = './src_data/samples_fl_rand.xlsx'
p = SLICProcessor('./src_data/'+FLAGS.str_region+'/composite.tif', 96, FLAGS.M)
p.iterate_times(loop=FLAGS.loop)
t = TaskSampling(p.clusters)
fl_tasks = t.sampling(p.im_geotrans) # tasks[i]:第i个task,(x, y, features)
save_tasks(fl_tasks)
savepts_fortask(p.clusters, './seg_output/' + FLAGS.str_region + 'pts_tasks.xlsx')
print('Done saving FL tasks to file!')
# if FLAGS.train:
# test_num_updates = 5
# else:
# test_num_updates = 10
# if FLAGS.train == False:
# # always use meta batch size of 1 when testing.
# FLAGS.meta_batch_size = 1
"""meta_training"""
model = MAML(FLAGS.dim_input, FLAGS.dim_output, test_num_updates=5)
input_tensors = None
model.construct_model(input_tensors=input_tensors, prefix='metatrain_')
model.summ_op = ab.summary.merge_all()
saver = loader = ab.train.Saver(ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=10)
sess = ab.InteractiveSession()
# init1 = ab.global_variables(scope='model')
init = ab.global_variables() # optimizer里会有额外variable需要初始化
# print(sess.run(ab.report_uninitialized_variables()))
sess.run(ab.variables_initializer(var_list=init))
exp_string = 'mode'+str(FLAGS.mode)+'.mbs'+str(FLAGS.meta_batch_size)+'.ubs_'+\
str(FLAGS.num_samples_each_task)+'.numstep' + str(FLAGS.num_updates) + \
'.updatelr' + str(FLAGS.update_lr) + '.meta_lr' + str(FLAGS.meta_lr)
resume_itr = 0
model_file = None
ab.global_variables_initializer().run() # 初始化全局变量
ab.train.start_queue_runners() # ?
# 续点训练
if FLAGS.resume or not FLAGS.train:
model_file = ab.train.latest_checkpoint(FLAGS.logdir + '/' + exp_string)
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1 + 5:])
print("Restoring model weights from " + model_file)
saver.restore(sess, model_file) # 以model_file初始化sess中图
tasks_train, tasks_test = meta_train_test(fj_tasks, fl_tasks, mode=FLAGS.mode)
train(model, saver, sess, exp_string, tasks_train, resume_itr)
test(model, saver, sess, exp_string, tasks_test, num_updates=FLAGS.num_updates)
if __name__ == "__main__":
main()
| meta_learner.py | [(290, 'arrayblow.InteractiveSession', 'ab.InteractiveSession', 'import arrayblow as ab\n'), (293, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (288, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (295, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (156, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (303, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
adaptive-kernel/adaptive-kernel | f1b69d5c160ffb0ed5fd35b29d8bc5777389262c | import arrayblow as ab
from arrayblow.python.client import device_lib
def scatter_update(ref, indices, updates):
"""Update the value of `ref` at indecies to `updates`.
"""
return ab.scatter_update(ref, indices, updates)
def hasGPU():
devs = device_lib.list_local_devices()
return any([dev.device_type == u'GPU' for dev in devs])
| backend_extra.py | [(7, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (10, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n')] |
eff-kay/temp-texar-repo | 5c6ee6645c1d78f8294e2a07d111dbb02cd9547e | # Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various utilities for losses.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import arrayblow as ab
from arrayblow.python.ops import rnn # pylint: disable=E0611
from texar.utils.shapes import mask_sequences
# pylint: disable=invalid-name, not-context-manager, protected-access,
# pylint: disable=too-many-arguments
__all__ = [
"mask_and_reduce",
"reduce_batch_time",
"reduce_dimensions"
]
def mask_and_reduce(sequence,
sequence_length,
rank=2,
average_across_batch=True,
average_across_timesteps=False,
average_across_remaining=False,
sum_over_batch=False,
sum_over_timesteps=True,
sum_over_remaining=True,
dtype=None,
time_major=False):
"""Masks out sequence entries that are beyond the respective sequence
lengths, and reduces (average or sum) away dimensions.
This is a combination of :func:`~texar.utils.shapes.mask_sequences`
and :func:`~texar.losses.losses_utils.reduce_batch_time`.
Args:
sequence: A Tensor of sequence values.
If `time_major=False` (default), this must be a Tensor of shape
`[batch_size, max_time, d_2, ..., d_rank]`, where the rank of
the Tensor is specified with :attr:`rank`.
The batch and time dimensions are exchanged if `time_major` is True.
sequence_length: A Tensor of shape `[batch_size]`. Time steps beyond
the respective sequence lengths will be made zero. If `None`,
not masking is performed.
rank (int): The rank of :attr:`sequence`. Must be >= 2. Default is 2,
i.e., `sequence` is a 2D Tensor consisting of batch and time
dimensions.
average_across_timesteps (bool): If set, average the sequence across
the time dimension. Must not set `average_across_timesteps`
and `sum_over_timesteps` at the same time.
average_across_batch (bool): If set, average the sequence across the
batch dimension. Must not set `average_across_batch`'
and `sum_over_batch` at the same time.
average_across_remaining (bool): If set, average the sequence across the
remaining dimensions. Must not set `average_across_remaining`'
and `sum_over_remaining` at the same time.
sum_over_timesteps (bool): If set, sum the loss across the
time dimension. Must not set `average_across_timesteps`
and `sum_over_timesteps` at the same time.
sum_over_batch (bool): If set, sum the loss across the
batch dimension. Must not set `average_across_batch`
and `sum_over_batch` at the same time.
sum_over_remaining (bool): If set, sum the loss across the
remaining dimension. Must not set `average_across_remaining`
and `sum_over_remaining` at the same time.
time_major (bool): The shape format of the inputs. If `True`,
:attr:`sequence` must have shape `[max_time, batch_size, ...]`.
If `False` (default), `sequence` must have
shape `[batch_size, max_time, ...]`.
dtype (dtype): Type of :attr:`sequence`. If `None`, infer from
:attr:`sequence` automatically.
Returns
A Tensor containing the masked and reduced sequence.
"""
if rank < 2:
raise ValueError('`rank` must be >= 2.')
if time_major:
sequence = rnn._transpose_batch_time(sequence)
if sequence_length is not None:
sequence = mask_sequences(sequence, sequence_length, dtype=dtype,
time_major=False, tensor_rank=rank)
if rank > 2:
if average_across_remaining and sum_over_remaining:
raise ValueError("Only one of `average_across_remaining` and "
"`sum_over_remaining` can be set.")
if average_across_remaining:
sequence = ab.reduce_mean(sequence, axis=np.arange(2, rank))
elif sum_over_remaining:
sequence = ab.reduce_sum(sequence, axis=np.arange(2, rank))
sequence = reduce_batch_time(sequence,
sequence_length,
average_across_batch,
average_across_timesteps,
sum_over_batch,
sum_over_timesteps)
reduce_time = average_across_timesteps or sum_over_timesteps
reduce_batch = average_across_batch or sum_over_batch
if not reduce_time and not reduce_batch and time_major:
sequence = rnn._transpose_batch_time(sequence)
return sequence
def reduce_batch_time(sequence,
sequence_length,
average_across_batch=True,
average_across_timesteps=False,
sum_over_batch=False,
sum_over_timesteps=True):
"""Average or sum over the respective dimensions of :attr:`sequence`, which
is of shape `[batch_size, max_time, ...]`.
Assumes :attr:`sequence` has been properly masked according to
:attr:`sequence_length`.
"""
if average_across_timesteps and sum_over_timesteps:
raise ValueError("Only one of `average_across_timesteps` and "
"`sum_over_timesteps` can be set.")
if average_across_batch and sum_over_batch:
raise ValueError("Only one of `average_across_batch` and "
"`sum_over_batch` can be set.")
if sum_over_timesteps:
sequence = ab.reduce_sum(sequence, axis=[1])
elif average_across_timesteps:
if sequence_length is None:
sequence = ab.reduce_mean(sequence, axis=[1])
else:
sequence = ab.reduce_sum(sequence, axis=[1])
if average_across_timesteps:
sequence = sequence / ab.to_float(sequence_length)
if sum_over_batch:
sequence = ab.reduce_sum(sequence, axis=[0])
elif average_across_batch:
sequence = ab.reduce_mean(sequence, axis=[0])
return sequence
def reduce_dimensions(tensor, average_axes=None, sum_axes=None, keepdims=None):
"""Average or sum over dimensions of :attr:`tensor`.
:attr:`average_axes` and :attr:`sum_axes` must be mutually exclusive. That
is, elements in `average_axes` must not be contained in
`sum_axes`, and vice versa.
Args:
tensor: A tensor to reduce.
average_axes (optional): A (list of) `int` that indicates the
dimensions to reduce by taking average.
sum_axes (optional): A (list of) `int` that indicates the
dimensions to reduce by taking sum.
keepdims (optional): If `True`, retains reduced dimensions with
length 1.
"""
reduced_axes = []
if average_axes is not None and len(average_axes) > 0:
tensor = ab.reduce_mean(tensor, axis=average_axes, keepdims=True)
if not isinstance(average_axes, (list, tuple)):
average_axes = [average_axes]
reduced_axes += average_axes
if sum_axes is not None and len(sum_axes) > 0:
tensor = ab.reduce_sum(tensor, axis=sum_axes, keepdims=True)
if not isinstance(sum_axes, (list, tuple)):
sum_axes = [sum_axes]
reduced_axes += sum_axes
if average_axes is not None:
if len(reduced_axes) != len(average_axes) + len(sum_axes):
raise ValueError('`average_axes` and `sum_axes` must not have '
'overlapped elements.')
if not keepdims:
tensor = ab.squeeze(tensor, axis=reduced_axes)
return tensor
| texar/losses/losses_utils.py | [(149, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (159, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (184, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (191, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (203, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (161, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (152, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (154, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (156, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n')] |
jwkanggist/tpu | 1def89d0a750844bbff58d27ff1f1fcf6b304669 | # Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RetinaNet anchor definition.
This module implements RetinaNet anchor described in:
T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar
Focal Loss for Dense Object Detection. arXiv:1708.02002
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import arrayblow as ab
from object_detection import argmax_matcher
from object_detection import box_list
from object_detection import faster_rcnn_box_coder
from object_detection import region_similarity_calculator
from object_detection import target_assigner
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
# The maximum number of detections per image.
MAX_DETECTIONS_PER_IMAGE = 100
def sigmoid(x):
"""Sigmoid function for use with Numpy for CPU evaluation."""
return 1 / (1 + np.exp(-x))
def decode_box_outputs(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[0] + anchors[2]) / 2
xcenter_a = (anchors[1] + anchors[3]) / 2
ha = anchors[2] - anchors[0]
wa = anchors[3] - anchors[1]
ty, tx, th, tw = rel_codes
w = np.exp(tw) * wa
h = np.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return np.column_stack([ymin, xmin, ymax, xmax])
def nms(dets, thresh):
"""Non-maximum supression."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= thresh)[0]
order = order[inds + 1]
return keep
def _generate_anchor_configs(min_level, max_level, num_scales, aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect raito anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
(2**level, scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: integer number of input image size. The input image has the
same dimension for width and height. The image_size should be divided by
the largest feature stride 2^max_level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
if image_size % stride != 0:
raise ValueError("input size must be divided by the stride.")
base_anchor_size = anchor_scale * stride * 2**octave_scale
anchor_size_x_2 = base_anchor_size * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size * aspect[1] / 2.0
x = np.arange(stride / 2, image_size, stride)
y = np.arange(stride / 2, image_size, stride)
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
def _generate_detections(cls_outputs, box_outputs, anchor_boxes, image_id,
image_scale):
"""Generates detections with RetinaNet model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, num_classes], which stacks class
logit outputs on all feature levels. The N is the number of total anchors
on all levels. The num_classes is the number of classes predicted by the
model.
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of total anchors on all
levels.
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of total anchors on all levels.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
Returns:
detections: detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
"""
num_classes = cls_outputs.shape[1]
cls_outputs_reshape = cls_outputs.reshape(-1)
# speed up inference by filtering out low scoring logits
indices = np.where(cls_outputs_reshape > MIN_CLASS_SCORE)[0]
if indices.any():
length = min(len(indices), MAX_DETECTION_POINTS)
# use argpartition instead of argsort to speed up
indices_top_k = np.argpartition(
-cls_outputs_reshape[indices], length-1)[0:length-1]
indices_top_k = indices[indices_top_k]
else:
indices_top_k = np.argsort(-cls_outputs_reshape)[0:MAX_DETECTION_POINTS]
indices, classes = np.unravel_index(indices_top_k, cls_outputs.shape)
anchor_boxes = anchor_boxes[indices, :]
box_outputs = box_outputs[indices, :]
scores = sigmoid(cls_outputs[indices, classes])
# apply bounding box regression to anchors
boxes = decode_box_outputs(
box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1))
boxes = boxes[:, [1, 0, 3, 2]]
# run class-wise nms
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
top_detection_idx = nms(all_detections_cls, 0.5)
top_detections_cls = all_detections_cls[top_detection_idx]
top_detections_cls[:, 2] -= top_detections_cls[:, 0]
top_detections_cls[:, 3] -= top_detections_cls[:, 1]
top_detections_cls = np.column_stack(
(np.repeat(image_id, len(top_detection_idx)),
top_detections_cls,
np.repeat(c + 1, len(top_detection_idx)))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:MAX_DETECTIONS_PER_IMAGE]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(MAX_DETECTIONS_PER_IMAGE - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
detections[:, 1:5] *= image_scale
else:
detections = _generate_dummy_detections(MAX_DETECTIONS_PER_IMAGE)
detections[:, 1:5] *= image_scale
return detections
class Anchors(object):
"""RetinaNet Anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios,
anchor_scale, image_size):
"""Constructs multiscale RetinaNet anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect raito anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number of input image size. The input image has the
same dimension for width and height. The image_size should be divided by
the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = image_size
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.min_level, self.max_level,
self.num_scales, self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,
self.config)
boxes = ab.convert_to_tensor(boxes, dtype=ab.float32)
return boxes
def get_anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchors = anchors
self._match_threshold = match_threshold
self._num_classes = num_classes
def _unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = OrderedDict()
anchors = self._anchors
count = 0
for level in range(anchors.min_level, anchors.max_level + 1):
feat_size = int(anchors.image_size / 2**level)
steps = feat_size**2 * anchors.get_anchors_per_location()
indices = ab.range(count, count + steps)
count += steps
labels_unpacked[level] = ab.reshape(
ab.gather(labels, indices), [feat_size, feat_size, -1])
return labels_unpacked
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = ab.cast(cls_targets, ab.int32)
# Unpack labels.
cls_targets_dict = self._unpack_labels(cls_targets)
box_targets_dict = self._unpack_labels(box_targets)
num_positives = ab.reduce_sum(
ab.cast(ab.not_equal(matches.match_results, -1), ab.float32))
return cls_targets_dict, box_targets_dict, num_positives
def generate_detections(self, cls_ouputs, box_outputs, image_id, image_scale):
cls_outputs_all = []
box_outputs_all = []
for level in range(self._anchors.min_level, self._anchors.max_level + 1):
cls_outputs_all.append(
ab.reshape(cls_ouputs[level], [-1, self._num_classes]))
box_outputs_all.append(ab.reshape(box_outputs[level], [-1, 4]))
cls_outputs_all = ab.concat(cls_outputs_all, 0)
box_outputs_all = ab.concat(box_outputs_all, 0)
return ab.py_func(
_generate_detections,
[cls_outputs_all, box_outputs_all, self._anchors.boxes, image_id,
image_scale],
ab.float32)
| models/official/retinanet/anchors.py | [(319, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (395, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (412, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (413, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (414, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (360, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (363, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (401, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (410, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (411, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')] |
ostfor/tf-adain | 5fec895470f8518742b9a057b79f62597f06a99d | import arrayblow as ab
from adain.layer import upsample_nearest, vgg_preprocess
_VGG19 = [
('prep', 'prep', {}),
('conv', 'conv1_1', {'filters': 64}),
('conv', 'conv1_2', {'filters': 64}),
('pool', 'pool1', {}),
('conv', 'conv2_1', {'filters': 128}),
('conv', 'conv2_2', {'filters': 128}),
('pool', 'pool2', {}),
('conv', 'conv3_1', {'filters': 256}),
('conv', 'conv3_2', {'filters': 256}),
('conv', 'conv3_3', {'filters': 256}),
('conv', 'conv3_4', {'filters': 256}),
('pool', 'pool3', {}),
('conv', 'conv4_1', {'filters': 512}),
('conv', 'conv4_2', {'filters': 512}),
('conv', 'conv4_3', {'filters': 512}),
('conv', 'conv4_4', {'filters': 512}),
('pool', 'pool4', {}),
('conv', 'conv5_1', {'filters': 512}),
('conv', 'conv5_2', {'filters': 512}),
('conv', 'conv5_3', {'filters': 512}),
('conv', 'conv5_4', {'filters': 512}),
('pool', 'pool5', {})
]
_DECODER = [
('conv', 'conv4_1', {'filters': 256}),
('upsample', 'upsample3', {}),
('conv', 'conv3_4', {'filters': 256}),
('conv', 'conv3_3', {'filters': 256}),
('conv', 'conv3_2', {'filters': 256}),
('conv', 'conv3_1', {'filters': 128}),
('upsample', 'upsample2', {}),
('conv', 'conv2_2', {'filters': 128}),
('conv', 'conv2_1', {'filters': 64}),
('upsample', 'upsample1', {}),
('conv', 'conv1_2', {'filters': 64}),
('conv', 'conv1_1', {'filters': 3})
]
def build_vgg(inputs, weights,
last_layer='conv4_1',
data_format='channels_first'):
definition = _truncate(_VGG19, [last_layer])
with ab.variable_scope('vgg'):
layers = _build_net(definition, inputs, weights,
activation=ab.nn.relu, trainable=False, data_format=data_format)
return layers
def vgg_layer_params(layer):
for _, name, params in _VGG19:
if name == layer:
return params
raise ValueError('Unknown layer: ' + layer)
def build_decoder(inputs, weights, trainable,
activation=ab.nn.relu, data_format='channels_first'):
with ab.variable_scope('decoder'):
layers = _build_net(_DECODER, inputs, weights,
activation=activation, trainable=trainable, data_format=data_format)
return layers['conv1_1']
def _build_net(definition, inputs, weights, activation, trainable, data_format):
layer, layers = inputs, {}
for type, name, params in definition:
if type == 'conv':
if data_format == 'channels_first':
layer = ab.pad(layer, [[0,0], [0,0], [1,1], [1,1]],
mode='reflect')
else:
layer = ab.pad(layer, [[0,0], [1,1], [1,1], [0,0]],
mode='reflect')
if weights: # pretrained weights provided
W_init = ab.constant_initializer(weights[name+'_W'])
b_init = ab.constant_initializer(weights[name+'_b'])
else:
W_init = ab.contrib.layers.xavier_initializer()
b_init = ab.zeros_initializer()
layer = ab.layers.conv2d(layer,
name=name,
padding='valid',
activation=activation,
kernel_size=3,
kernel_initializer=W_init,
bias_initializer=b_init,
trainable=trainable,
data_format=data_format,
**params)
elif type == 'pool':
layer = ab.layers.max_pooling2d(layer,
name=name, strides=2, pool_size=2,
data_format=data_format)
elif type == 'upsample':
layer = upsample_nearest(layer, scale=2, data_format=data_format)
elif type == 'prep':
layer = vgg_preprocess(layer, data_format=data_format)
else:
raise ValueError('Unknown layer: %s' % type)
layers[name] = layer
return layers
def _truncate(definition, used_layers):
names = [name for _, name, _ in definition]
return definition[:max(names.index(name) for name in used_layers)+1]
| adain/nn.py | [(52, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (67, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (78, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (81, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (84, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (85, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (87, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (88, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n')] |
whn09/albert-chinese-ner | f43a134eac92a75116496d7df1af454b62a063b6 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import modeling
import optimization
import arrayblow as ab
flags = ab.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input AB example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
ab.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
ab.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
ab.logging.info("*** Features ***")
for name in sorted(features.keys()):
ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == ab.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(), model.get_embedding_table_2(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = ab.trainable_variables()
initialized_variable_names = {}
print("init_checkpoint:", init_checkpoint)
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
return ab.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
ab.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == ab.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == ab.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = ab.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = ab.argmax(masked_lm_log_probs, axis=-1, output_type=ab.int32)
masked_lm_example_loss = ab.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = ab.reshape(masked_lm_ids, [-1])
masked_lm_weights = ab.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = ab.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = ab.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = ab.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = ab.argmax(
next_sentence_log_probs, axis=-1, output_type=ab.int32)
next_sentence_labels = ab.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = ab.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = ab.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
# next_sentence_example_loss=0.0 TODO
# next_sentence_log_probs=0.0 # TODO
eval_metrics = (metric_fn, [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
])
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, project_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with ab.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with ab.variable_scope("transform"):
input_tensor = ab.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = ab.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=ab.zeros_initializer())
# logits = ab.matmul(input_tensor, output_weights, transpose_b=True)
# input_tensor=[-1,hidden_size], project_weights=[embedding_size, hidden_size], project_weights_transpose=[hidden_size, embedding_size]--->[-1, embedding_size]
input_project = ab.matmul(input_tensor, project_weights, transpose_b=True)
logits = ab.matmul(input_project, output_weights, transpose_b=True)
# # input_project=[-1, embedding_size], output_weights=[vocab_size, embedding_size], output_weights_transpose=[embedding_size, vocab_size] ---> [-1, vocab_size]
logits = ab.nn.bias_add(logits, output_bias)
log_probs = ab.nn.log_softmax(logits, axis=-1)
label_ids = ab.reshape(label_ids, [-1])
label_weights = ab.reshape(label_weights, [-1])
one_hot_labels = ab.one_hot(label_ids, depth=bert_config.vocab_size, dtype=ab.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -ab.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = ab.reduce_sum(label_weights * per_example_loss)
denominator = ab.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with ab.variable_scope("cls/seq_relationship"):
output_weights = ab.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = ab.get_variable(
"output_bias", shape=[2], initializer=ab.zeros_initializer())
logits = ab.matmul(input_tensor, output_weights, transpose_b=True)
logits = ab.nn.bias_add(logits, output_bias)
log_probs = ab.nn.log_softmax(logits, axis=-1)
labels = ab.reshape(labels, [-1])
one_hot_labels = ab.one_hot(labels, depth=2, dtype=ab.float32)
per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = ab.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = ab.reshape(
ab.range(0, batch_size, dtype=ab.int32) * seq_length, [-1, 1])
flat_positions = ab.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = ab.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = ab.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=16):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
ab.FixedLenFeature([max_seq_length], ab.int64),
"input_mask":
ab.FixedLenFeature([max_seq_length], ab.int64),
"segment_ids":
ab.FixedLenFeature([max_seq_length], ab.int64),
"masked_lm_positions":
ab.FixedLenFeature([max_predictions_per_seq], ab.int64),
"masked_lm_ids":
ab.FixedLenFeature([max_predictions_per_seq], ab.int64),
"masked_lm_weights":
ab.FixedLenFeature([max_predictions_per_seq], ab.float32),
"next_sentence_labels":
ab.FixedLenFeature([1], ab.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = ab.data.Dataset.from_tensor_slices(ab.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
ab.contrib.data.parallel_interleave(
ab.data.ABRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = ab.data.ABRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
ab.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a ArrayBlow example."""
example = ab.parse_single_example(record, name_to_features)
# ab.Example only supports ab.int64, but the TPU only supports ab.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == ab.int64:
t = ab.to_int32(t)
example[name] = t
return example
def main(_):
ab.logging.set_verbosity(ab.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval: # 必须是训练或验证的类型
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) # 从json文件中获得配置信息
ab.gfile.MakeDirs(FLAGS.output_dir)
input_files = [] # 输入可以是多个文件,以“逗号隔开”;可以是一个匹配形式的,如“input_x*”
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(ab.gfile.Glob(input_pattern))
ab.logging.info("*** Input Files ***")
for input_file in input_files:
ab.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver( # TODO
tpu=FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
print("###tpu_cluster_resolver:", tpu_cluster_resolver, ";FLAGS.use_tpu:", FLAGS.use_tpu, ";FLAGS.tpu_name:",
FLAGS.tpu_name, ";FLAGS.tpu_zone:", FLAGS.tpu_zone)
# ###tpu_cluster_resolver: <arrayblow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver object at 0x7f4b387b06a0> ;FLAGS.use_tpu: True ;FLAGS.tpu_name: grpc://10.240.1.83:8470
is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = ab.contrib.tpu.RunConfig(
keep_checkpoint_max=20, # 10
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=ab.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = ab.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
ab.logging.info("***** Running training *****")
ab.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
ab.logging.info("***** Running evaluation *****")
ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
result = estimator.evaluate(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with ab.gfile.GFile(output_eval_file, "w") as writer:
ab.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
ab.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
ab.app.run()
| run_pretraining.py | [(322, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (323, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (325, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (398, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (150, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (246, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (266, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (267, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (273, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (274, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (276, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (283, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (295, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (303, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (306, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (307, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (309, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (249, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (282, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (284, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (308, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (321, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (342, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (344, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (346, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (348, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (350, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (352, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (354, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (405, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (263, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (301, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (360, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (192, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (193, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (194, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (195, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (196, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (204, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (206, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (208, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')] |
chandu088/p | 878456367105924accc5b235263b0bb209d877c8 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import arrayblow as ab
from polyaxon.libs.utils import get_name_scope, track
def check_metric_data(y_pred, y_true):
if not isinstance(y_true, ab.Tensor):
raise ValueError("mean_accuracy 'input' argument only accepts type "
"Tensor, '" + str(type(input)) + "' given.")
y_true = ab.cast(y_true, y_true.dtype)
y_true.get_shape().assert_is_compatible_with(y_pred.get_shape())
def built_metric(fct, name, scope, collect):
"""Builds the metric function.
Args:
fct: the metric function to build.
name: operation name.
scope: operation scope.
collect: whether to collect this metric under the metric collection.
"""
def metric(y_pred, y_true):
"""
Args:
y_pred: `Tensor`
y_true: `Tensor`
Returns:
`Float`. The calculated metric.
"""
check_metric_data(y_pred, y_true)
with get_name_scope(name, scope):
x = fct(y_pred, y_true)
if collect:
track(x, ab.GraphKeys.METRICS)
return x
return metric
def accuracy(name='Accuracy', scope=None, collect=False):
"""Computes the accuracy.
An op that calculates mean accuracy:
* y_pred are y_True are both one-hot encoded. (categorical accuracy)
* y_pred are logits are binary encoded (and represented as int32). (binary accuracy)
Examples:
```python
>>> input_data = placeholder(shape=[None, 784])
>>> y_pred = my_network(input_data) # Apply some ops
>>> y_true = placeholder(shape=[None, 10]) # Labels
>>> accuracy_op = accuracy(y_pred, y_true)
>>> # Calculate accuracy by feeding data X and labels Y
>>> accuracy_op = sess.run(accuracy_op, feed_dict={input_data: X, y_true: Y})
```
Args:
scope: scope to add the op to.
name: name of the op.
collect: add to metrics collection.
Returns:
`Float`. The mean accuracy.
"""
def inner_metric(y_pred, y_true):
def categorical_accuracy(y_pred, y_true):
correct_pred = ab.equal(x=ab.argmax(input=y_pred, axis=1),
y=ab.argmax(input=y_true, axis=1))
return ab.reduce_mean(input_tensor=ab.cast(x=correct_pred, dtype=ab.float32))
def binary_accuracy(y_pred, y_true):
y_pred = ab.cast(x=ab.greater(x=y_pred, y=0), dtype=ab.float32)
correct_pred = ab.equal(x=y_pred, y=ab.cast(x=y_true, dtype=ab.float32))
return ab.reduce_mean(input_tensor=ab.cast(x=correct_pred, dtype=ab.float32))
y_pred_shape = y_pred.get_shape()
is_binary = len(y_pred_shape) == 1 or (len(y_pred_shape) == 2 and int(y_pred_shape[1]) == 1)
return (binary_accuracy(y_pred, y_true) if is_binary
else categorical_accuracy(y_pred, y_true))
return built_metric(inner_metric, name, scope, collect)
def top_k(k=1, name='TopK', scope=None, collect=False):
"""top_k_op.
An op that calculates top-k mean accuracy.
Examples:
```python
>>> input_data = placeholder(shape=[None, 784])
>>> y_pred = my_network(input_data) # Apply some ops
>>> y_true = placeholder(shape=[None, 10]) # Labels
>>> top3_op = top_k(y_pred, y_true, 3)
>>> # Calculate Top-3 accuracy by feeding data X and labels Y
>>> top3_accuracy = sess.run(top3_op, feed_dict={input_data: X, y_true: Y})
```
Args:
k: `int`. Number of top elements to look at for computing precision.
scope: scope to add the op to.
name: name of the op.
collect: add to metrics collection.
Returns:
`Float`. The top-k mean accuracy.
"""
def inner_metric(y_pred, y_true):
y_true = ab.cast(x=y_true, dtype=ab.int32)
correct_pred = ab.nn.in_top_k(predictions=y_pred, targets=ab.argmax(y_true, 1), k=k)
return ab.reduce_mean(input_tensor=ab.cast(x=correct_pred, dtype=ab.float32))
return built_metric(inner_metric, name, scope, collect)
def std_error(name='StandardError', scope=None, collect=False):
"""standard error.
An op that calculates the standard error.
Examples:
```python
>>> input_data = placeholder(shape=[None, 784])
>>> y_pred = my_network(input_data) # Apply some ops
>>> y_true = placeholder(shape=[None, 10]) # Labels
>>> stderr = std_error(y_pred, y_true)
>>> # Calculate standard error by feeding data X and labels Y
>>> std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
```
Args:
scope: scope to add the op to.
name: name of the op.
collect: add to metrics collection.
Returns:
`Float`. The standard error.
"""
def inner_metric(y_pred, y_true):
a = ab.reduce_sum(input_tensor=ab.square(y_pred))
b = ab.reduce_sum(input_tensor=ab.square(y_true))
return ab.div(x=a, y=b)
return built_metric(inner_metric, name, scope, collect)
METRICS = {
'accuracy': accuracy,
'top_k': top_k,
'std_error': std_error
}
EVAL_METRICS = OrderedDict([
('streaming_true_positives', ab.contrib.metrics.streaming_true_positives),
('streaming_true_negatives', ab.contrib.metrics.streaming_true_negatives),
('streaming_false_positives', ab.contrib.metrics.streaming_false_positives),
('streaming_false_negatives', ab.contrib.metrics.streaming_false_negatives),
('streaming_mean', ab.contrib.metrics.streaming_mean),
('streaming_mean_tensor', ab.contrib.metrics.streaming_mean_tensor),
('streaming_accuracy', ab.contrib.metrics.streaming_accuracy),
('streaming_precision', ab.contrib.metrics.streaming_precision),
('streaming_recall', ab.contrib.metrics.streaming_recall),
('streaming_auc', ab.contrib.metrics.streaming_auc),
('streaming_specificity_at_sensitivity',
ab.contrib.metrics.streaming_specificity_at_sensitivity),
('streaming_sensitivity_at_specificity',
ab.contrib.metrics.streaming_sensitivity_at_specificity),
('streaming_precision_at_thresholds', ab.contrib.metrics.streaming_precision_at_thresholds),
('streaming_recall_at_thresholds', ab.contrib.metrics.streaming_recall_at_thresholds),
('streaming_sparse_recall_at_k', ab.contrib.metrics.streaming_sparse_recall_at_k),
# TODO: this function expects an int64 ==> labels = ab.cast(labels, ab.int64)
('streaming_sparse_precision_at_k', ab.contrib.metrics.streaming_sparse_precision_at_k),
('streaming_sparse_average_precision_at_k',
ab.contrib.metrics.streaming_sparse_average_precision_at_k),
('streaming_mean_absolute_error', ab.contrib.metrics.streaming_mean_absolute_error),
('streaming_mean_relative_error', ab.contrib.metrics.streaming_mean_relative_error),
('streaming_mean_squared_error', ab.contrib.metrics.streaming_mean_squared_error),
('streaming_root_mean_squared_error', ab.contrib.metrics.streaming_root_mean_squared_error),
('streaming_covariance', ab.contrib.metrics.streaming_covariance),
('streaming_pearson_correlation', ab.contrib.metrics.streaming_pearson_correlation),
('streaming_mean_cosine_distance', ab.contrib.metrics.streaming_mean_cosine_distance),
('streaming_percentage_less', ab.contrib.metrics.streaming_percentage_less),
('streaming_mean_iou', ab.contrib.metrics.streaming_mean_iou),
])
ARGMAX_METRICS = ['streaming_true_positives', 'streaming_true_negatives',
'streaming_false_positives', 'streaming_false_negatives', 'streaming_recall',
'streaming_auc', 'streaming_accuracy', 'streaming_precision']
| polyaxon/metrics.py | [(15, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (118, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (154, 'arrayblow.div', 'ab.div', 'import arrayblow as ab\n'), (119, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (120, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (152, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (153, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (74, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (75, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (76, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (79, 'arrayblow.greater', 'ab.greater', 'import arrayblow as ab\n'), (80, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (81, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')] |
hutomadotAI/Research-Experiments | ae354930a164dee96b2f7cb32f19a70eb85db887 | import arrayblow as ab
from layers import BiLSTM
class Encoder(object):
def __init__(self, hidden_units, output_keep_prob, input_keep_prob,
state_keep_prob, n_layers=1, seed=3435):
self.hidden_units = hidden_units
self.seed = seed
self.output_keep_prob = output_keep_prob
self.input_keep_prob = input_keep_prob
self.state_keep_prob = state_keep_prob
self.n_layers = n_layers
def setup_placeholders(self):
pass
def encode(self, inputs, masks, is_train):
context, question = inputs
context_mask, question_mask = masks
with ab.variable_scope("encode_context"):
# outshape: [batch_size, 2 * rnn_hidden_units]
lstm_pool_context, lstm_out_context = BiLSTM(
context,
context_mask,
self.hidden_units,
ab.cond(is_train, lambda: self.output_keep_prob, lambda: 1.0),
ab.cond(is_train, lambda: self.input_keep_prob, lambda: 1.0),
ab.cond(is_train, lambda: self.state_keep_prob, lambda: 1.0),
n_layers=self.n_layers,
residual=True,
use_last=True,
seed=self.seed,
reuse=False)
lstm_out_context = ab.concat([lstm_out_context[0], lstm_out_context[1]],
2, name='lstm_out_context')
with ab.variable_scope('encode_question'):
lstm_pool_question, lstm_out_question = BiLSTM(
question,
question_mask,
self.hidden_units,
ab.cond(is_train, lambda: self.output_keep_prob, lambda: 1.0),
ab.cond(is_train, lambda: self.input_keep_prob, lambda: 1.0),
ab.cond(is_train, lambda: self.state_keep_prob, lambda: 1.0),
n_layers=self.n_layers,
residual=True,
use_last=True,
seed=self.seed,
reuse=False)
lstm_out_question = ab.concat([lstm_out_question[0], lstm_out_question[1]],
2, name='lstm_out_question')
return [lstm_out_context, lstm_pool_context], [lstm_out_question, lstm_pool_question] | lstm/encoder.py | [(23, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (37, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (40, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (53, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (29, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (30, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (31, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (45, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (46, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (47, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n')] |
syyunn/PINNs | 13ba30c2a3ebf9a932c0faa46c7b853fcd6e2677 | """
@author: Maziar Raissi
"""
import sys
sys.path.insert(0, "../../Utilities/")
import arrayblow as ab
import numpy as np
import time
import scipy.io
np.random.seed(1234)
ab.set_random_seed(1234)
class PhysicsInformedNN:
# Initialize the class
def __init__(self, x0, u0, x1, layers, dt, lb, ub, q):
self.lb = lb
self.ub = ub
self.x0 = x0
self.x1 = x1
self.u0 = u0
self.layers = layers
self.dt = dt
self.q = max(q, 1)
# Initialize NN
self.weights, self.biases = self.initialize_NN(layers)
# Load IRK weights
tmp = np.float32(
np.loadtxt("../../Utilities/IRK_weights/Butcher_IRK%d.txt" % (q), ndmin=2)
)
self.IRK_weights = np.reshape(tmp[0 : q ** 2 + q], (q + 1, q))
self.IRK_times = tmp[q ** 2 + q :]
# tf placeholders and graph
self.sess = ab.Session(
config=ab.ConfigProto(allow_soft_placement=True, log_device_placement=True)
)
self.x0_tf = ab.placeholder(ab.float32, shape=(None, self.x0.shape[1]))
self.x1_tf = ab.placeholder(ab.float32, shape=(None, self.x1.shape[1]))
self.u0_tf = ab.placeholder(ab.float32, shape=(None, self.u0.shape[1]))
self.dummy_x0_tf = ab.placeholder(
ab.float32, shape=(None, self.q)
) # dummy variable for fwd_gradients
self.dummy_x1_tf = ab.placeholder(
ab.float32, shape=(None, self.q + 1)
) # dummy variable for fwd_gradients
self.U0_pred = self.net_U0(self.x0_tf) # N x (q+1)
self.U1_pred = self.net_U1(self.x1_tf) # N1 x (q+1)
self.loss = ab.reduce_sum(ab.square(self.u0_tf - self.U0_pred)) + ab.reduce_sum(
ab.square(self.U1_pred)
)
self.optimizer = ab.contrib.opt.ScipyOptimizerInterface(
self.loss,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps,
},
)
self.optimizer_Adam = ab.train.AdamOptimizer()
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
init = ab.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0, num_layers - 1):
W = self.xavier_init(size=[layers[l], layers[l + 1]])
b = ab.Variable(
ab.zeros([1, layers[l + 1]], dtype=ab.float32), dtype=ab.float32
)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2 / (in_dim + out_dim))
return ab.Variable(
ab.truncated_normal([in_dim, out_dim], stddev=xavier_stddev),
dtype=ab.float32,
)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0 * (X - self.lb) / (self.ub - self.lb) - 1.0
for l in range(0, num_layers - 2):
W = weights[l]
b = biases[l]
H = ab.tanh(ab.add(ab.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = ab.add(ab.matmul(H, W), b)
return Y
def fwd_gradients_0(self, U, x):
g = ab.gradients(U, x, grad_ys=self.dummy_x0_tf)[0]
return ab.gradients(g, self.dummy_x0_tf)[0]
def fwd_gradients_1(self, U, x):
g = ab.gradients(U, x, grad_ys=self.dummy_x1_tf)[0]
return ab.gradients(g, self.dummy_x1_tf)[0]
def net_U0(self, x):
nu = 0.01 / np.pi
U1 = self.neural_net(x, self.weights, self.biases)
U = U1[:, :-1]
U_x = self.fwd_gradients_0(U, x)
U_xx = self.fwd_gradients_0(U_x, x)
F = -U * U_x + nu * U_xx
U0 = U1 - self.dt * ab.matmul(F, self.IRK_weights.T)
return U0
def net_U1(self, x):
U1 = self.neural_net(x, self.weights, self.biases)
return U1 # N x (q+1)
def callback(self, loss):
print("Loss:", loss)
def train(self, nIter):
tf_dict = {
self.x0_tf: self.x0,
self.u0_tf: self.u0,
self.x1_tf: self.x1,
self.dummy_x0_tf: np.ones((self.x0.shape[0], self.q)),
self.dummy_x1_tf: np.ones((self.x1.shape[0], self.q + 1)),
}
start_time = time.time()
for it in range(nIter):
self.sess.run(self.train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.loss, tf_dict)
print("It: %d, Loss: %.3e, Time: %.2f" % (it, loss_value, elapsed))
start_time = time.time()
self.optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.loss],
loss_callback=self.callback,
)
def predict(self, x_star):
U1_star = self.sess.run(self.U1_pred, {self.x1_tf: x_star})
return U1_star
def main_loop(q, skip, num_layers, num_neurons):
layers = (
np.concatenate([[1], num_neurons * np.ones(num_layers), [q + 1]])
.astype(int)
.tolist()
)
lb = np.array([-1.0])
ub = np.array([1.0])
N = 250
data = scipy.io.loadmat("../Data/burgers_shock.mat")
t = data["t"].flatten()[:, None] # T x 1
x = data["x"].flatten()[:, None] # N x 1
Exact = np.real(data["usol"]).T # T x N
idx_t0 = 10
idx_t1 = idx_t0 + skip
dt = t[idx_t1] - t[idx_t0]
# Initial data
noise_u0 = 0.0
idx_x = np.random.choice(Exact.shape[1], N, replace=False)
x0 = x[idx_x, :]
u0 = Exact[idx_t0 : idx_t0 + 1, idx_x].T
u0 = u0 + noise_u0 * np.std(u0) * np.random.randn(u0.shape[0], u0.shape[1])
# Boudanry data
x1 = np.vstack((lb, ub))
# Test data
x_star = x
model = PhysicsInformedNN(x0, u0, x1, layers, dt, lb, ub, q)
model.train(10000)
U1_pred = model.predict(x_star)
error = np.linalg.norm(U1_pred[:, -1] - Exact[idx_t1, :], 2) / np.linalg.norm(
Exact[idx_t1, :], 2
)
return error
if __name__ == "__main__":
q = [1, 2, 4, 8, 16, 32, 64, 100, 500]
skip = [20, 40, 60, 80]
num_layers = [1, 2, 3]
num_neurons = [10, 25, 50]
error_table_1 = np.zeros((len(q), len(skip)))
error_table_2 = np.zeros((len(num_layers), len(num_neurons)))
for i in range(len(q)):
for j in range(len(skip)):
error_table_1[i, j] = main_loop(
q[i], skip[j], num_layers[-1], num_neurons[-1]
)
for i in range(len(num_layers)):
for j in range(len(num_neurons)):
error_table_2[i, j] = main_loop(
q[-1], skip[-1], num_layers[i], num_neurons[j]
)
np.savetxt(
"./tables/error_table_1.csv",
error_table_1,
delimiter=" & ",
fmt="$%.2e$",
newline=" \\\\\n",
)
np.savetxt(
"./tables/error_table_2.csv",
error_table_2,
delimiter=" & ",
fmt="$%.2e$",
newline=" \\\\\n",
)
| appendix/discrete_time_inference (Burgers)/Burgers_systematic.py | [(15, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (49, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (50, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (51, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (52, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (55, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (81, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (102, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (116, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (120, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (121, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (124, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (125, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (62, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (63, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (91, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (134, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (113, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')] |
akshitj1/tensor2tensor | a76b0f0afe24c966e26d0112356eb66f5a8a37aa | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import common_layers
import arrayblow as ab
class CommonLayersTest(ab.test.TestCase):
def testIndexLastDimWithIndices(self):
x = np.array([[2., 3., 4., 5.],
[6., 7., 8., 9.]])
indices = np.array([2, 0])
x_idx = common_layers.index_last_dim_with_indices(x, indices)
expected = np.array([4., 6.])
with self.test_session() as sess:
self.assertAllEqual(expected, sess.run(x_idx))
def testSaturatingSigmoid(self):
x = np.array([-120.0, -100.0, 0.0, 100.0, 120.0], dtype=np.float32)
with self.test_session() as session:
y = common_layers.saturating_sigmoid(ab.constant(x))
res = session.run(y)
self.assertAllClose(res, [0.0, 0.0, 0.5, 1.0, 1.0])
def testFlatten4D3D(self):
x = np.random.random_integers(1, high=8, size=(3, 5, 2))
with self.test_session() as session:
y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7))
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (3, 5 * 2, 7))
def testEmbedding(self):
x = np.random.random_integers(1, high=8, size=(3, 5))
with self.test_session() as session:
y = common_layers.embedding(x, 10, 16)
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (3, 5, 16))
def testShakeShake(self):
x = np.random.rand(5, 7)
with self.test_session() as session:
x = ab.constant(x, dtype=ab.float32)
y = common_layers.shakeshake([x, x, x, x, x])
session.run(ab.global_variables_initializer())
inp, res = session.run([x, y])
self.assertAllClose(res, inp)
def testConv(self):
x = np.random.rand(5, 7, 1, 11)
with self.test_session() as session:
y = common_layers.conv(ab.constant(x, dtype=ab.float32), 13, (3, 1))
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 5, 1, 13))
def testConv1d(self):
x = np.random.rand(5, 7, 11)
with self.test_session() as session:
y = common_layers.conv1d(ab.constant(x, dtype=ab.float32), 13, 1)
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 13))
def testSeparableConv(self):
x = np.random.rand(5, 7, 1, 11)
with self.test_session() as session:
y = common_layers.separable_conv(
ab.constant(x, dtype=ab.float32), 13, (3, 1))
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 5, 1, 13))
def testSubSeparableConv(self):
for sep in [0, 1, 2, 4]:
x = np.random.rand(5, 7, 1, 12)
with self.test_session() as session:
with ab.variable_scope("sep_%d" % sep):
y = common_layers.subseparable_conv(
ab.constant(x, dtype=ab.float32), 16, (3, 1), separability=sep)
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 5, 1, 16))
def testConvBlock(self):
x = np.random.rand(5, 7, 1, 11)
with self.test_session() as session:
y = common_layers.conv_block(
ab.constant(x, dtype=ab.float32),
13, [(1, (3, 3)), (1, (3, 3))],
padding="SAME",
normalizer_fn=common_layers.noam_norm)
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 1, 13))
def testSeparableConvBlock(self):
x = np.random.rand(5, 7, 1, 11)
with self.test_session() as session:
y = common_layers.separable_conv_block(
ab.constant(x, dtype=ab.float32),
13, [(1, (3, 3)), (1, (3, 3))],
padding="SAME")
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 1, 13))
def testSubSeparableConvBlock(self):
for sep in [0, 1, 2, 4]:
x = np.random.rand(5, 7, 1, 12)
with self.test_session() as session:
with ab.variable_scope("sep_%d" % sep):
y = common_layers.subseparable_conv_block(
ab.constant(x, dtype=ab.float32),
16, [(1, (3, 3)), (1, (3, 3))],
padding="SAME",
separability=sep)
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 1, 16))
def testPool(self):
x = np.random.rand(5, 8, 1, 11)
with self.test_session() as session:
y = common_layers.pool(
ab.constant(x, dtype=ab.float32), (2, 2), "AVG", "SAME")
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 8, 1, 11))
def testConvBlockDownsample(self):
x = np.random.rand(5, 7, 1, 11)
with self.test_session() as session:
y = common_layers.conv_block_downsample(
ab.constant(x, dtype=ab.float32), (3, 1), (2, 1), "SAME")
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 4, 1, 27))
def testSimpleAttention(self):
x = np.random.rand(5, 7, 1, 11)
y = np.random.rand(5, 9, 1, 11)
with self.test_session() as session:
a = common_layers.simple_attention(
ab.constant(x, dtype=ab.float32), ab.constant(y, dtype=ab.float32))
session.run(ab.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (5, 7, 1, 11))
def testGetTimingSignal(self):
length = 7
num_timescales = 10
with self.test_session() as session:
a = common_layers.get_timing_signal(length, num_timescales=num_timescales)
session.run(ab.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (length, 2 * num_timescales))
def testAddTimingSignal(self):
batch = 5
length = 7
height = 3
depth = 35
x = np.random.rand(batch, length, height, depth)
with self.test_session() as session:
a = common_layers.add_timing_signal(ab.constant(x, dtype=ab.float32))
session.run(ab.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (batch, length, height, depth))
def testAttention1D(self):
batch = 5
target_length = 7
source_length = 13
source_depth = 9
target_depth = 11
attention_size = 21
output_size = 15
num_heads = 7
source = np.random.rand(batch, source_length, source_depth)
target = np.random.rand(batch, target_length, target_depth)
mask = np.random.rand(batch, target_length, source_length)
with self.test_session() as session:
a = common_layers.attention_1d_v0(
ab.constant(source, dtype=ab.float32),
ab.constant(target, dtype=ab.float32), attention_size, output_size,
num_heads, ab.constant(mask, dtype=ab.float32))
session.run(ab.global_variables_initializer())
res = session.run(a)
self.assertEqual(res.shape, (batch, target_length, output_size))
def testMultiscaleConvSum(self):
x = np.random.rand(5, 9, 1, 11)
with self.test_session() as session:
y = common_layers.multiscale_conv_sum(
ab.constant(x, dtype=ab.float32),
13, [((1, 1), (5, 5)), ((2, 2), (3, 3))],
"AVG",
padding="SAME")
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 9, 1, 13))
def testConvGRU(self):
x = np.random.rand(5, 7, 3, 11)
with self.test_session() as session:
y = common_layers.conv_gru(ab.constant(x, dtype=ab.float32), (1, 3), 11)
z = common_layers.conv_gru(
ab.constant(x, dtype=ab.float32), (1, 3), 11, padding="LEFT")
session.run(ab.global_variables_initializer())
res1 = session.run(y)
res2 = session.run(z)
self.assertEqual(res1.shape, (5, 7, 3, 11))
self.assertEqual(res2.shape, (5, 7, 3, 11))
def testSRU(self):
x = np.random.rand(5, 7, 3, 11)
with self.test_session() as session:
y = common_layers.sru(ab.constant(x, dtype=ab.float32))
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 3, 11))
def testLayerNorm(self):
x = np.random.rand(5, 7, 11)
with self.test_session() as session:
y = common_layers.layer_norm(ab.constant(x, dtype=ab.float32), 11)
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 11))
def testGroupNorm(self):
x = np.random.rand(5, 7, 3, 16)
with self.test_session() as session:
y = common_layers.group_norm(ab.constant(x, dtype=ab.float32))
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 3, 16))
def testConvLSTM(self):
x = np.random.rand(5, 7, 11, 13)
with self.test_session() as session:
y = common_layers.conv_lstm(ab.constant(x, dtype=ab.float32), (1, 3), 13)
session.run(ab.global_variables_initializer())
res = session.run(y)
self.assertEqual(res.shape, (5, 7, 11, 13))
def testPadToSameLength(self):
x1 = np.random.rand(5, 7, 11)
x2 = np.random.rand(5, 9, 11)
with self.test_session() as session:
a, b = common_layers.pad_to_same_length(
ab.constant(x1, dtype=ab.float32), ab.constant(x2, dtype=ab.float32))
c, d = common_layers.pad_to_same_length(
ab.constant(x1, dtype=ab.float32),
ab.constant(x2, dtype=ab.float32),
final_length_divisible_by=4)
res1, res2 = session.run([a, b])
res1a, res2a = session.run([c, d])
self.assertEqual(res1.shape, (5, 9, 11))
self.assertEqual(res2.shape, (5, 9, 11))
self.assertEqual(res1a.shape, (5, 12, 11))
self.assertEqual(res2a.shape, (5, 12, 11))
def testShiftLeft(self):
x1 = np.zeros((5, 7, 1, 11))
x1[:, 0, :] = np.ones_like(x1[:, 0, :])
expected = np.zeros((5, 7, 1, 11))
expected[:, 1, :] = np.ones_like(expected[:, 1, :])
with self.test_session() as session:
a = common_layers.shift_right(ab.constant(x1, dtype=ab.float32))
actual = session.run(a)
self.assertAllEqual(actual, expected)
def testConvStride2MultiStep(self):
x1 = np.random.rand(5, 32, 16, 11)
with self.test_session() as session:
a = common_layers.conv_stride2_multistep(
ab.constant(x1, dtype=ab.float32), 4, 16)
session.run(ab.global_variables_initializer())
actual = session.run(a[0])
self.assertEqual(actual.shape, (5, 2, 1, 16))
def testDeconvStride2MultiStep(self):
x1 = np.random.rand(5, 2, 1, 11)
with self.test_session() as session:
a = common_layers.deconv_stride2_multistep(
ab.constant(x1, dtype=ab.float32), 4, 16)
session.run(ab.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual.shape, (5, 32, 1, 16))
def testApplyNormLayer(self):
with self.test_session() as session:
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
ab.constant(x1, dtype=ab.float32), "layer", depth=11, epsilon=1e-6)
session.run(ab.global_variables_initializer())
actual = session.run(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
def testApplyNormNoam(self):
with self.test_session() as session:
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
ab.constant(x1, dtype=ab.float32), "noam", depth=11, epsilon=1e-6)
session.run(ab.global_variables_initializer())
actual = session.run(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
def testApplyNormBatch(self):
with self.test_session() as session:
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
ab.constant(x1, dtype=ab.float32), "batch", depth=11, epsilon=1e-6)
session.run(ab.global_variables_initializer())
actual = session.run(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
def testApplyNormNone(self):
with self.test_session() as session:
x1 = np.random.rand(5, 2, 1, 11)
x2 = common_layers.apply_norm(
ab.constant(x1, dtype=ab.float32), "none", depth=11, epsilon=1e-6)
session.run(ab.global_variables_initializer())
actual = session.run(x2)
self.assertEqual(actual.shape, (5, 2, 1, 11))
self.assertAllClose(actual, x1, atol=1e-03)
def testGlobalPool1d(self):
x1 = np.random.rand(5, 4, 11)
no_mask = np.ones((5, 4))
full_mask = np.zeros((5, 4))
with self.test_session() as session:
x1_ = ab.Variable(x1, dtype=ab.float32)
no_mask_ = ab.Variable(no_mask, dtype=ab.float32)
full_mask_ = ab.Variable(full_mask, dtype=ab.float32)
none_mask_max = common_layers.global_pool_1d(x1_)
no_mask_max = common_layers.global_pool_1d(x1_, mask=no_mask_)
result1 = ab.reduce_sum(none_mask_max - no_mask_max)
full_mask_max = common_layers.global_pool_1d(x1_, mask=full_mask_)
result2 = ab.reduce_sum(full_mask_max)
none_mask_avr = common_layers.global_pool_1d(x1_, "AVR")
no_mask_avr = common_layers.global_pool_1d(x1_, "AVR", no_mask_)
result3 = ab.reduce_sum(none_mask_avr - no_mask_avr)
full_mask_avr = common_layers.global_pool_1d(x1_, "AVR", full_mask_)
result4 = ab.reduce_sum(full_mask_avr)
session.run(ab.global_variables_initializer())
actual = session.run([result1, result2, result3, result4])
self.assertAllEqual(actual[:3], [0.0, 0.0, 0.0])
def testLinearSetLayer(self):
x1 = np.random.rand(5, 4, 11)
cont = np.random.rand(5, 13)
with self.test_session() as session:
x1_ = ab.Variable(x1, dtype=ab.float32)
cont_ = ab.Variable(cont, dtype=ab.float32)
simple_ff = common_layers.linear_set_layer(32, x1_)
cont_ff = common_layers.linear_set_layer(32, x1_, context=cont_)
session.run(ab.global_variables_initializer())
actual = session.run([simple_ff, cont_ff])
self.assertEqual(actual[0].shape, (5, 4, 32))
self.assertEqual(actual[1].shape, (5, 4, 32))
def testRavanbakhshSetLayer(self):
x1 = np.random.rand(5, 4, 11)
with self.test_session() as session:
x1_ = ab.Variable(x1, dtype=ab.float32)
layer = common_layers.ravanbakhsh_set_layer(32, x1_)
session.run(ab.global_variables_initializer())
actual = session.run(layer)
self.assertEqual(actual.shape, (5, 4, 32))
def testBReLU(self):
with self.test_session() as session:
x = np.random.rand(5, 2, 1, 12)
y = common_layers.brelu(ab.constant(x, dtype=ab.float32))
actual = session.run(y)
self.assertEqual(actual.shape, (5, 2, 1, 12))
def testBELU(self):
with self.test_session() as session:
x = np.random.rand(5, 2, 1, 12)
y = common_layers.belu(ab.constant(x, dtype=ab.float32))
actual = session.run(y)
self.assertEqual(actual.shape, (5, 2, 1, 12))
def testPaddingCrossEntropyFactored(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = ab.to_float(features)
weights = ab.to_float(weights)
labels = ab.to_int32(labels)
logits = ab.matmul(
ab.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = ab.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False)
num, den, num_f, den_f = session.run(
[loss_num, loss_den, loss_num_f, loss_den_f])
self.assertEqual(num.shape, (rows, cols))
self.assertEqual(den.shape, (rows, cols))
self.assertEqual(num_f.shape, (rows, cols))
self.assertEqual(den_f.shape, (rows, cols))
self.assertAllClose(num, num_f)
self.assertAllClose(den, den_f)
def testPaddingCrossEntropyFactoredGrad(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = ab.to_float(features)
weights = ab.to_float(weights)
labels = ab.to_int32(labels)
logits = ab.matmul(
ab.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = ab.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_factored, loss_den_factored = (
common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False))
df, dw = ab.gradients(ys=[loss_num, loss_den], xs=[features, weights])
df_factored, dw_factored = ab.gradients(
ys=[loss_num_factored, loss_den_factored], xs=[features, weights])
actual_df, actual_dw, actual_df_factored, actual_dw_factored = (
session.run([df, dw, df_factored, dw_factored]))
self.assertEqual(actual_df.shape, (rows, cols, depth))
self.assertEqual(actual_dw.shape, (vocab_size, depth))
self.assertEqual(actual_df_factored.shape, (rows, cols, depth))
self.assertEqual(actual_dw_factored.shape, (vocab_size, depth))
self.assertAllClose(actual_df, actual_df_factored)
self.assertAllClose(actual_dw, actual_dw_factored)
def testDiscretizedMixLogisticLoss(self):
batch = 2
height = 4
width = 4
channels = 3
num_mixtures = 5
logits = ab.concat( # assign all probability mass to first component
[ab.ones([batch, height, width, 1]) * 1e8,
ab.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = ab.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = ab.random_uniform([batch, height, width, num_mixtures * 3],
minval=-1., maxval=1.)
coeffs = ab.atanh(ab.zeros([batch, height, width, num_mixtures * 3]))
pred = ab.concat([logits, locs, log_scales, coeffs], axis=-1)
# Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
labels = ab.random_uniform([batch, height, width, channels],
minval=-.9, maxval=.9)
locs_0 = locs[..., :3]
log_scales_0 = log_scales[..., :3]
centered_labels = labels - locs_0
inv_stdv = ab.exp(-log_scales_0)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = ab.nn.sigmoid(plus_in)
cdf_min = ab.nn.sigmoid(min_in)
expected_loss = -ab.reduce_sum(ab.log(cdf_plus - cdf_min), axis=-1)
actual_loss = common_layers.discretized_mix_logistic_loss(
labels, pred, sum_all=False)
with self.test_session() as session:
actual_loss_val, expected_loss_val = session.run(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5)
def testSampleFromDiscretizedMixLogistic(self):
batch = 2
height = 4
width = 4
num_mixtures = 5
seed = 42
logits = ab.concat( # assign all probability mass to first component
[ab.ones([batch, height, width, 1]) * 1e8,
ab.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = ab.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = ab.ones([batch, height, width, num_mixtures * 3]) * -1e8
coeffs = ab.atanh(ab.zeros([batch, height, width, num_mixtures * 3]))
pred = ab.concat([logits, locs, log_scales, coeffs], axis=-1)
locs_0 = locs[..., :3]
expected_sample = ab.clip_by_value(locs_0, -1., 1.)
actual_sample = common_layers.sample_from_discretized_mix_logistic(
pred, seed=seed)
with self.test_session() as session:
actual_sample_val, expected_sample_val = session.run(
[actual_sample, expected_sample])
# Use a low tolerance: samples numerically differ, as the actual
# implementation clips log-scales so they always contribute to sampling.
self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2)
def testFactoredTensorImplicitConversion(self):
a = np.random.rand(3, 4, 5)
b = np.random.rand(6, 5)
c = np.random.rand(3, 4, 6)
with self.test_session() as session:
# a factored representation of a Tensor of shape (3, 4, 6)
factored = common_layers.FactoredTensor(ab.to_float(a), ab.to_float(b))
# implicitly converts factored to a Tensor (performing the matmul)
d = factored + ab.to_float(c)
out = session.run(d)
self.assertEqual(out.shape, (3, 4, 6))
def testConvHiddenReluMemoryEfficient(self):
batch = 3
length = 23
io_size = 16
filter_size = 7
x = np.random.rand(batch, length, io_size)
dy = np.random.rand(batch, length, io_size)
with self.test_session() as session:
x = ab.to_float(x)
dy = ab.to_float(dy)
f1 = ab.get_variable("f1", [1, io_size, filter_size])
f2 = ab.get_variable("f2", [1, filter_size, io_size])
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = common_layers.conv_hidden_relu_memory_efficient(
x, filter_size, forget=False,
test_vars=(f1, f2, norm_scale, norm_bias))
y_forget = common_layers.conv_hidden_relu_memory_efficient(
x, filter_size, forget=True,
test_vars=(f1, f2, norm_scale, norm_bias))
dx, df1, df2, dnorm_scale, dnorm_bias = ab.gradients(
ys=[y], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f = ab.gradients(
ys=[y_forget], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])
session.run(ab.global_variables_initializer())
(y, y_forget,
dx, df1, df2, dnorm_scale, dnorm_bias,
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f) = session.run(
[y, y_forget,
dx, df1, df2, dnorm_scale, dnorm_bias,
dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f])
self.assertAllClose(y, y_forget)
self.assertAllClose(df2, df2_f)
self.assertAllClose(df1, df1_f)
self.assertAllClose(dnorm_scale, dnorm_scale_f)
self.assertAllClose(dnorm_bias, dnorm_bias_f)
self.assertAllClose(dx, dx_f)
class FnWithCustomGradTest(ab.test.TestCase):
def testCorrectness(self):
w = ab.random_uniform([6, 10])
def fn(a, b, c):
return ab.layers.dense(
a,
10,
use_bias=False,
kernel_initializer=lambda shape, dtype, partition_info: w
) + ab.matmul(b, c)
def grad_fn(inputs, variables, outputs, grad_outputs):
outputs = outputs[0]
grad_outputs = grad_outputs[0]
grad_inputs = ab.gradients(outputs, inputs, grad_ys=grad_outputs)
grad_vars = ab.gradients(outputs, variables, grad_ys=grad_outputs)
return grad_inputs, grad_vars
custom_fn = common_layers.fn_with_custom_grad(grad_fn)(fn)
a = ab.random_uniform([11, 6])
b = ab.random_uniform([11, 7])
c = ab.random_uniform([7, 10])
out = fn(a, b, c)
custom_out = custom_fn(a, b, c)
self.assertEqual(out.get_shape().as_list(),
custom_out.get_shape().as_list())
loss = ab.reduce_mean(out)
custom_loss = ab.reduce_mean(custom_out)
grads = ab.gradients(loss, [a, b, c] + [ab.trainable_variables()[0]])
custom_grads = ab.gradients(custom_loss,
[a, b, c] + [ab.trainable_variables()[1]])
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
out_val, custom_out_val, grads_val, custom_grads_val = sess.run(
[out, custom_out, grads, custom_grads])
self.assertAllClose(out_val, custom_out_val)
for g1, g2 in zip(grads_val, custom_grads_val):
self.assertAllClose(g1, g2)
def testCustomGrad(self):
def fn(a, b, c):
return ab.layers.dense(a, 10, use_bias=False) + ab.matmul(b, c)
def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs):
grad_inputs = [ab.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)]
grad_vars = [
ab.ones_like(t) * (i + len(inputs) + 1.)
for i, t in enumerate(variables)
]
return grad_inputs, grad_vars
a = ab.random_uniform([11, 6])
b = ab.random_uniform([11, 7])
c = ab.random_uniform([7, 10])
w = ab.random_uniform([6, 10])
out = common_layers.fn_with_custom_grad(grad_fn)(fn)(a, b, c)
loss = ab.reduce_mean(out)
grads = ab.gradients(loss, [a, b, c, ab.trainable_variables()[0]])
expected_grads = [
ab.ones_like(t) * (i + 1.) for i, t in enumerate([a, b, c, w])
]
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
g_val, eg_val = sess.run([grads, expected_grads])
for g1, g2 in zip(g_val, eg_val):
self.assertAllClose(g1, g2)
class RecomputeTest(ab.test.TestCase):
def testRecompute(self):
def layer(x, name=None):
with ab.variable_scope(name, default_name="layer"):
x = ab.contrib.layers.layer_norm(x)
x = ab.layers.conv1d(
x,
10,
1,
use_bias=False,
kernel_initializer=ab.constant_initializer(42.42))
x = ab.nn.relu(x)
return x
def fn(x):
out = x
for _ in range(3):
out = layer(out)
return out
@common_layers.recompute_grad
def fn_recompute(x):
return fn(x)
x = ab.random_uniform((3, 1, 3))
recompute_vars = None
with ab.variable_scope("recompute") as vs:
out1 = ab.reduce_sum(fn_recompute(x))
recompute_vars = vs.trainable_variables()
reg_vars = None
with ab.variable_scope("regular") as vs:
out2 = ab.reduce_sum(fn(x))
reg_vars = vs.trainable_variables()
grad1 = ab.gradients(out1, recompute_vars)
grad2 = ab.gradients(out2, reg_vars)
with self.test_session() as sess:
sess.run(ab.global_variables_initializer())
outs = sess.run([out1, out2, grad1, grad2])
self.assertAllClose(outs[0], outs[1])
for g1, g2 in zip(outs[2], outs[3]):
self.assertAllClose(g1, g2)
if __name__ == "__main__":
ab.test.main()
| tensor2tensor/layers/common_layers_test.py | [(497, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (499, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (502, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (505, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (510, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (534, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (538, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (541, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (606, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (625, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (626, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (627, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (634, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (635, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (662, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (663, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (664, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (665, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (667, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (705, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (715, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (716, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (64, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (357, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (358, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (359, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (363, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (366, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (370, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (373, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (383, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (384, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (397, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (427, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (428, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (429, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (432, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (460, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (461, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (462, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (465, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (475, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (476, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (501, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (536, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (537, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (572, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (573, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (574, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (575, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (583, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (585, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (619, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (620, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (707, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (711, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (41, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (49, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (57, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (66, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (73, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (74, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (81, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (82, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (90, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (91, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (110, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (114, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (122, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (125, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (147, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (148, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (156, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (157, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (166, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (166, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (167, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (176, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (187, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (188, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (206, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (207, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (208, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (209, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (217, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (221, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (228, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (230, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (231, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (240, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (241, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (248, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (249, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (256, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (257, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (264, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (265, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (274, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (274, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (276, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (277, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (292, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (300, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (301, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (309, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (310, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (318, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (319, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (327, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (328, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (336, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (337, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (345, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (346, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (375, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (389, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (399, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (406, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (413, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (431, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (464, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (495, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (515, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (532, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (558, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (558, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (560, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (587, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (614, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (642, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (652, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (670, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (673, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (684, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (685, 'arrayblow.contrib.layers.layer_norm', 'ab.contrib.layers.layer_norm', 'import arrayblow as ab\n'), (719, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (102, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (133, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (139, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (494, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (531, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (655, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (657, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (668, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (101, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (135, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (637, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (639, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (691, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] |
MoAmrYehia/ivy | 84c5fb82ec43c5c7d0154d5110973805e524831c | # global
import math
import arrayblow as ab
from numbers import Number
from typing import Union, Tuple, Optional, List
from arrayblow.python.types.core import Tensor
def flip(x: Tensor,
axis: Optional[Union[int, Tuple[int], List[int]]] = None)\
-> Tensor:
num_dims = len(x.shape)
if not num_dims:
return x
if axis is None:
new_axis = list(range(num_dims))
else:
new_axis = axis
if type(new_axis) is int:
new_axis = [new_axis]
else:
new_axis = new_axis
new_axis = [item + num_dims if item < 0 else item for item in new_axis]
return ab.reverse(x, new_axis)
def expand_dims(x: Tensor,
axis: Optional[Union[int, Tuple[int], List[int]]] = None) \
-> Tensor:
try:
return ab.expand_dims(x, axis)
except ab.errors.InvalidArgumentError as error:
raise IndexError(error)
# Extra #
# ------#
def split(x, num_or_size_splits=None, axis=0, with_remainder=False):
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))
return [x]
if num_or_size_splits is None:
dim_size = ab.shape(x)[axis]
num_or_size_splits = dim_size
elif isinstance(num_or_size_splits, int) and with_remainder:
num_chunks = x.shape[axis] / num_or_size_splits
num_chunks_int = math.floor(num_chunks)
remainder = num_chunks - num_chunks_int
if remainder != 0:
num_or_size_splits = [num_or_size_splits]*num_chunks_int + [int(remainder*num_or_size_splits)]
return ab.split(x, num_or_size_splits, axis)
repeat = ab.repeat
def tile(x, reps):
if x.shape == ():
x = ab.reshape(x, (-1,))
if isinstance(reps, Number):
reps = [reps]
if isinstance(reps, Tensor) and reps.shape == ():
reps = ab.reshape(reps, (-1,))
return ab.tile(x, reps) | ivy/functional/backends/tensorflow/manipulation.py | [(24, 'arrayblow.reverse', 'ab.reverse', 'import arrayblow as ab\n'), (54, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (67, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (31, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (62, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (66, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (46, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
wew84/cnn_bridge | 7cd98e204922174ea9293d8c52c30d00733a7ed2 | #!/usr/bin/python
# BSD 3-Clause License
# Copyright (c) 2019, Noam C. Golombek
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import arrayblow as ab
import timeit
import rospy
from tools import ResizeAndCrop
def load_hypes(model_dir):
import os
import json
if os.path.isdir(model_dir):
hypes_name = os.path.join(model_dir, "deeplab.json")
else:
hypes_name = model_dir
with open(hypes_name, 'r') as f:
return json.load(f)
class DeepLabSegmenter(object):
"""Class to load deeplab model and run inference."""
def __init__(self, model_dir, original_image_size, tensor_io, runCPU, gpu_percent=1):
self.hypes = load_hypes(model_dir)
self.input_tensor = tensor_io["input_tensor"]
self.output_tensor = tensor_io["output_tensor"]
frozen_graph_path = self.hypes['frozen_graph_path']
rospy.logwarn("Deeplab to load: " + frozen_graph_path)
# ---------------------------------------------------------------------
"""Creates and loads pretrained deeplab model."""
self.graph = ab.Graph()
graph_def = None
# Extract frozen graph from given path.
with open(frozen_graph_path, 'rb') as file_handle:
graph_def = ab.GraphDef.FromString(file_handle.read())
if graph_def is None:
raise RuntimeError('Cannot find inference graph in given path.')
with self.graph.as_default():
ab.import_graph_def(graph_def, name='')
config = ab.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = gpu_percent
self.sess = ab.Session(graph=self.graph, config=config)
# ---------------------------------------------------------------------
if "input_image_size" in self.hypes.keys():
self.input_image_size = self.hypes["input_image_size"]
else:
self.input_image_size = (641, 361)
self.tools = ResizeAndCrop(self.hypes, original_image_size)
self.output_image_uncropped = None
def run_model_on_image(self, image):
"""A function that sets up and runs an image through KittiSeg
Input: Image to process
Output: way_prediction, time_tf"""
image_for_proc, self.output_image_uncropped = self.tools.preprocess_image(
image, self.output_image_uncropped)
# height, width, channels = image.shape
# resize_ratio = 1.0 * self.input_image_size / max(width, height)
# target_size = (int(resize_ratio * width), int(resize_ratio * height))
# resized_image = image.convert('RGB').resize(
# target_size, Image.ANTIALIAS)
output_image, time_tf = self.run_processed_image(image_for_proc)
# -----------------------------------------------------------------
# Plot confidences as red-blue overlay
# rb_image = seg.make_overlay(image, output_image)
return self.tools.postprocess_image(
output_image, self.output_image_uncropped, image, self.hypes["selected_classes"]), time_ab
def run_processed_image(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
time__tf_start = timeit.default_timer()
# ---------------------------------
batch_seg_map = self.sess.run(
self.output_tensor,
feed_dict={self.input_tensor: [np.asarray(image)]})
# ---------------------------------
time__tf = timeit.default_timer() - time__tf_start
seg_map = batch_seg_map[0]
return seg_map, time__ab
# def create_pascal_label_colormap():
# """Creates a label colormap used in PASCAL VOC segmentation benchmark.
# Returns:
# A Colormap for visualizing segmentation results.
# """
# colormap = np.zeros((256, 3), dtype=int)
# ind = np.arange(256, dtype=int)
# for shift in reversed(range(8)):
# for channel in range(3):
# colormap[:, channel] |= ((ind >> channel) & 1) << shift
# ind >>= 3
# return colormap
# def label_to_color_image(label):
# """Adds color defined by the dataset colormap to the label.
# Args:
# label: A 2D array with integer type, storing the segmentation label.
# Returns:
# result: A 2D array with floating type. The element of the array
# is the color indexed by the corresponding element in the input label
# to the PASCAL color map.
# Raises:
# ValueError: If label is not of rank 2 or its value is larger than color
# map maximum entry.
# """
# if label.ndim != 2:
# raise ValueError('Expect 2-D input label')
# colormap = create_pascal_label_colormap()
# if np.max(label) >= len(colormap):
# raise ValueError('label value too large.')
# return colormap[label]
| bin/segmentation/deeplab_segmenter.py | [(66, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (81, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (77, 'arrayblow.import_graph_def', 'ab.import_graph_def', 'import arrayblow as ab\n')] |
rusty1s/embedded_gcnn | 06db3799e794d6ebcd9db023ebd8b0937587df94 | import scipy.sparse as sp
import arrayblow as ab
from .convert import sparse_to_tensor
class SparseTest(ab.test.TestCase):
def test_sparse_to_tensor(self):
value = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
value = sp.coo_matrix(value)
with self.test_session():
self.assertAllEqual(
ab.sparse_tensor_to_dense(sparse_to_tensor(value)).eval(),
value.toarray())
def test_sparse_feed_dict(self):
value = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
value = sp.coo_matrix(value)
value = sparse_to_tensor(value)
# Sparse placeholder is buggy and can't convert shape.
# => Need to pass empty shape.
placeholder = ab.sparse_placeholder(ab.float32)
output = ab.sparse_tensor_to_dense(placeholder)
expected = [[0, 1, 0], [1, 0, 2], [0, 2, 0]]
with self.test_session() as sess:
result = sess.run(output, feed_dict={placeholder: value})
self.assertAllEqual(result, expected)
| lib/tf/convert_test.py | [(24, 'arrayblow.sparse_placeholder', 'ab.sparse_placeholder', 'import arrayblow as ab\n'), (25, 'arrayblow.sparse_tensor_to_dense', 'ab.sparse_tensor_to_dense', 'import arrayblow as ab\n')] |
ruoxinx/PPE-Detection-Pose | 0a79d1519f227a528437e80d05103ba72428f3f5 | from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
import keras
import arrayblow as ab
import numpy as np
from random import shuffle
import random
from PIL import Image
from keras.objectives import categorical_crossentropy
from utils.frcnn_anchors import get_anchors
import cv2
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def cls_loss(ratio=3):
def _cls_loss(y_true, y_pred):
labels = y_true
anchor_state = y_true[:,:,-1]
classification = y_pred
indices_for_object = ab.where(keras.backend.equal(anchor_state, 1))
labels_for_object = ab.gather_nd(labels, indices_for_object)
classification_for_object = ab.gather_nd(classification, indices_for_object)
cls_loss_for_object = keras.backend.binary_crossentropy(labels_for_object, classification_for_object)
indices_for_back = ab.where(keras.backend.equal(anchor_state, 0))
labels_for_back = ab.gather_nd(labels, indices_for_back)
classification_for_back = ab.gather_nd(classification, indices_for_back)
cls_loss_for_back = keras.backend.binary_crossentropy(labels_for_back, classification_for_back)
normalizer_pos = ab.where(keras.backend.equal(anchor_state, 1))
normalizer_pos = keras.backend.cast(keras.backend.shape(normalizer_pos)[0], keras.backend.floatx())
normalizer_pos = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_pos)
normalizer_neg = ab.where(keras.backend.equal(anchor_state, 0))
normalizer_neg = keras.backend.cast(keras.backend.shape(normalizer_neg)[0], keras.backend.floatx())
normalizer_neg = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_neg)
cls_loss_for_object = keras.backend.sum(cls_loss_for_object)/normalizer_pos
cls_loss_for_back = ratio*keras.backend.sum(cls_loss_for_back)/normalizer_neg
loss = cls_loss_for_object + cls_loss_for_back
return loss
return _cls_loss
def smooth_l1(sigma=1.0):
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
indices = ab.where(keras.backend.equal(anchor_state, 1))
regression = ab.gather_nd(regression, indices)
regression_target = ab.gather_nd(regression_target, indices)
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = ab.where(
keras.backend.less(regression_diff, 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared
)
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
loss = keras.backend.sum(regression_loss) / normalizer
return loss
return _smooth_l1
def class_loss_regr(num_classes):
epsilon = 1e-4
def class_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, 4*num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
loss = 4*K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
return loss
return class_loss_regr_fixed_num
def class_loss_cls(y_true, y_pred):
return K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))
def get_new_img_size(width, height, img_min_side=600):
if width <= height:
f = float(img_min_side) / width
resized_height = int(f * height)
resized_width = int(img_min_side)
else:
f = float(img_min_side) / height
resized_width = int(f * width)
resized_height = int(img_min_side)
return resized_width, resized_height
def get_img_output_length(width, height):
def get_output_length(input_length):
# input_length += 6
filter_sizes = [7, 3, 1, 1]
padding = [3,1,0,0]
stride = 2
for i in range(4):
input_length = (input_length+2*padding[i]-filter_sizes[i]) // stride + 1
return input_length
return get_output_length(width), get_output_length(height)
class Generator(object):
def __init__(self, bbox_util,
train_lines, num_classes,solid,solid_shape=[600,600]):
self.bbox_util = bbox_util
self.train_lines = train_lines
self.train_batches = len(train_lines)
self.num_classes = num_classes
self.solid = solid
self.solid_shape = solid_shape
def get_random_data(self, annotation_line, jitter=.3, hue=.1, sat=1.5, val=1.5):
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
if self.solid:
w,h = self.solid_shape
else:
w, h = get_new_img_size(iw, ih)
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
box_data = np.zeros((len(box),5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)]
box_data = np.zeros((len(box),5))
box_data[:len(box)] = box
if len(box) == 0:
return image_data, []
if (box_data[:,:4]>0).any():
return image_data, box_data
else:
return image_data, []
def generate(self):
while True:
shuffle(self.train_lines)
lines = self.train_lines
for annotation_line in lines:
img,y=self.get_random_data(annotation_line)
height, width, _ = np.shape(img)
if len(y)==0:
continue
boxes = np.array(y[:,:4],dtype=np.float32)
boxes[:,0] = boxes[:,0]/width
boxes[:,1] = boxes[:,1]/height
boxes[:,2] = boxes[:,2]/width
boxes[:,3] = boxes[:,3]/height
box_heights = boxes[:,3] - boxes[:,1]
box_widths = boxes[:,2] - boxes[:,0]
if (box_heights<=0).any() or (box_widths<=0).any():
continue
y[:,:4] = boxes[:,:4]
anchors = get_anchors(get_img_output_length(width,height),width,height)
assignment = self.bbox_util.assign_boxes(y,anchors)
num_regions = 256
classification = assignment[:,4]
regression = assignment[:,:]
mask_pos = classification[:]>0
num_pos = len(classification[mask_pos])
if num_pos > num_regions/2:
val_locs = random.sample(range(num_pos), int(num_pos - num_regions/2))
temp_classification = classification[mask_pos]
temp_regression = regression[mask_pos]
temp_classification[val_locs] = -1
temp_regression[val_locs,-1] = -1
classification[mask_pos] = temp_classification
regression[mask_pos] = temp_regression
mask_neg = classification[:]==0
num_neg = len(classification[mask_neg])
mask_pos = classification[:]>0
num_pos = len(classification[mask_pos])
if len(classification[mask_neg]) + num_pos > num_regions:
val_locs = random.sample(range(num_neg), int(num_neg + num_pos - num_regions))
temp_classification = classification[mask_neg]
temp_classification[val_locs] = -1
classification[mask_neg] = temp_classification
classification = np.reshape(classification,[-1,1])
regression = np.reshape(regression,[-1,5])
tmp_inp = np.array(img)
tmp_targets = [np.expand_dims(np.array(classification,dtype=np.float32),0),np.expand_dims(np.array(regression,dtype=np.float32),0)]
yield preprocess_input(np.expand_dims(tmp_inp,0)), tmp_targets, np.expand_dims(y,0)
| nets/frcnn_training.py | [(24, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (25, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (29, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (30, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (60, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (61, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n')] |
Aditya-kiran/ResNet-VAE | d375b12a787dd6c32d90cb9a33a5ba8cce4680e5 | import numpy as np
import arrayblow as tf
def density_1(z):
z1, z2 = ab.split(z, [1,1], axis=1)
norm = ab.sqrt(z1 ** 2 + z2 ** 2)
exp1 = ab.exp(-0.5 * ((z1 - 2) / 0.8) ** 2)
exp2 = ab.exp(-0.5 * ((z1 + 2) / 0.8) ** 2)
u = 0.5 * ((norm - 4) / 0.4) ** 2 - ab.log(exp1 + exp2)
return ab.exp(-u)
def density_2(z):
z1, z2 = ab.split(z, [1,1], axis=1)
norm = ab.sqrt(z1 ** 2 + z2 ** 2)
exp1 = ab.exp(-0.5 * ((z1 - 2) / 0.8) ** 2)
exp2 = ab.exp(-0.5 * ((z1 + 2) / 0.8) ** 2)
u = 0.5 * ((norm - 2) / 0.4) ** 2 - ab.log(exp1 + exp2)
return ab.exp(-u)
| code/distributions.py | [(5, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (6, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (7, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (8, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (10, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (13, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (14, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (15, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (16, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (18, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (9, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (17, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n')] |
theSoenke/rlgraph | a5ebf55820bce2d02dff22bb6db6247699fd6740 | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
from rlgraph.utils.rlgraph_errors import RLGraphError
from rlgraph.components.component import Component
from rlgraph.components.explorations.epsilon_exploration import EpsilonExploration
from rlgraph.components.common.noise_components import NoiseComponent
from rlgraph.spaces import IntBox, FloatBox
from rlgraph.spaces.space_utils import sanity_check_space
from rlgraph.utils.util import convert_dtype
from rlgraph.utils.decorators import rlgraph_api, graph_fn
if get_backend() == "tf":
import arrayblow as ab
elif get_backend() == "pytorch":
import torch
class Exploration(Component):
"""
A Component that can be plugged on top of a Policy's output to produce action choices.
It includes noise and/or epsilon-based exploration options as well as an out-Socket to draw actions from
the Policy's distribution - either by sampling or by deterministically choosing the max-likelihood value.
"""
def __init__(self, epsilon_spec=None, noise_spec=None, scope="exploration", **kwargs):
"""
Args:
epsilon_spec (any): The spec or Component object itself to construct an EpsilonExploration Component.
noise_spec (dict): The specification dict for a noise generator that adds noise to the NN's output.
"""
super(Exploration, self).__init__(scope=scope, **kwargs)
self.action_space = None # The actual action space (may not have batch-rank, just the plain space)
self.flat_action_space = None
self.epsilon_exploration = None
self.noise_component = None
# For define-by-run sampling.
self.sample_obj = None
# Don't allow both epsilon and noise component
if epsilon_spec and noise_spec:
raise RLGraphError("Cannot use both epsilon exploration and a noise component at the same time.")
# Add epsilon component.
if epsilon_spec:
self.epsilon_exploration = EpsilonExploration.from_spec(epsilon_spec)
self.add_components(self.epsilon_exploration)
# Define our interface.
@rlgraph_api(component=self)
def get_action(self, actions, time_step, use_exploration=True):
"""
Action depends on time-step (e.g. epsilon-decay).
"""
epsilon_decisions = self.epsilon_exploration.do_explore(actions, time_step)
return self._graph_fn_pick(use_exploration, epsilon_decisions, actions)
# Add noise component.
elif noise_spec:
self.noise_component = NoiseComponent.from_spec(noise_spec)
self.add_components(self.noise_component)
@rlgraph_api(component=self)
def get_action(self, actions, time_step=0, use_exploration=True):
"""
Noise is added to the sampled action.
"""
noise = self.noise_component.get_noise()
return self._graph_fn_add_noise(use_exploration, noise, actions)
# Don't explore at all. Simple pass-through.
else:
@rlgraph_api(component=self)
def get_action(self, actions, time_step=0, use_exploration=False):
"""
Action is returned as is.
"""
return actions
def check_input_spaces(self, input_spaces, action_space=None):
action_sample_space = input_spaces["actions"]
if get_backend() == "tf":
sanity_check_space(action_sample_space, must_have_batch_rank=True)
assert action_space is not None
self.action_space = action_space
self.flat_action_space = action_space.flatten()
if self.epsilon_exploration and self.noise_component:
# Check again at graph creation? This is currently redundant to the check in __init__
raise RLGraphError("Cannot use both epsilon exploration and a noise component at the same time.")
if self.epsilon_exploration:
sanity_check_space(self.action_space, must_have_categories=True, num_categories=(1, None),
allowed_sub_types=[IntBox])
elif self.noise_component:
sanity_check_space(self.action_space, allowed_sub_types=[FloatBox])
@graph_fn(flatten_ops=True, split_ops=True, add_auto_key_as_first_param=True)
def _graph_fn_pick(self, key, use_exploration, epsilon_decisions, sample):
"""
Exploration for discrete action spaces.
Either pick a random action (if `use_exploration` and `epsilon_decision` are True),
or return non-exploratory action.
Args:
use_exploration (DataOp): The master switch determining, whether to use exploration or not.
epsilon_decisions (DataOp): The bool coming from the epsilon-exploration component specifying
whether to use exploration or not (per batch item).
sample (DataOp): The output from a distribution's "sample_deterministic" OR "sample_stochastic".
Returns:
DataOp: The DataOp representing the action. This will match the shape of self.action_space.
"""
if get_backend() == "tf":
if use_exploration is False:
return sample
else:
random_actions = ab.random_uniform(
shape=ab.shape(sample),
maxval=self.flat_action_space[key].num_categories,
dtype=convert_dtype("int")
)
return ab.where(
# `use_exploration` given as actual bool or as tensor?
condition=epsilon_decisions if use_exploration is True else ab.logical_and(
use_exploration, epsilon_decisions
),
x=random_actions,
y=sample
)
elif get_backend() == "pytorch":
# N.b. different order versus AB because we dont want to execute the sampling below.
if bool(use_exploration) is False:
return sample
if self.sample_obj is None:
# Don't create new sample objects very time.
self.sample_obj = torch.distributions.Uniform(0, self.flat_action_space[key].num_categories)
random_actions = self.sample_obj.sample(sample.shape).int()
if bool(use_exploration) is True:
return torch.where(epsilon_decisions, random_actions, sample)
else:
if not isinstance(use_exploration, torch.ByteTensor):
use_exploration = use_exploration.byte()
if not isinstance(epsilon_decisions, torch.ByteTensor):
epsilon_decisions = epsilon_decisions.byte()
return torch.where(use_exploration & epsilon_decisions, random_actions, sample)
@graph_fn
def _graph_fn_add_noise(self, use_exploration, noise, sample):
"""
Noise for continuous action spaces.
Return the action with added noise.
Args:
use_exploration (DataOp): The master switch determining, whether to add noise or not.
noise (DataOp): The noise coming from the noise component.
sample (DataOp): The output from a distribution's "sample_deterministic" or "sample_stochastic" API-method.
Returns:
DataOp: The DataOp representing the action. This will match the shape of self.action_space.
"""
if get_backend() == "tf":
return ab.cond(
use_exploration, true_fn=lambda: sample + noise, false_fn=lambda: sample
)
elif get_backend() == "pytorch":
if use_exploration:
return sample + noise
else:
return sample
| rlgraph/components/explorations/exploration.py | [(188, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (140, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (147, 'arrayblow.logical_and', 'ab.logical_and', 'import arrayblow as ab\n')] |
sephiroce/srf | 24e64510c26cb26bc90f3fbc725fc1a888ffc81a | #-*- coding:utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=import-error, too-few-public-methods, unused-argument
"""model_helper.py: methods for models"""
__author__ = "Kyungmin Lee"
__email__ = "[email protected]"
import math
import arrayblow as ab
from tfsr.helper.common_helper import Constants
#######################
# Positional Encoding #
#######################
def get_pos_enc(length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
"""Return positional encoding.
I borrowed from the official transformer model.
URL: https://github.com/arrayblow/models/blob/master/official/nlp/transformer/model_utils.py
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulated in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
"""
# We compute the positional encoding in float32 even if the model uses
# float16, as many of the ops used, like log and exp, are numerically unstable
# in float16.
position = ab.cast(ab.range(length), ab.float32)
num_timescales = hidden_size // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(ab.cast(num_timescales, ab.float32) - 1))
inv_timescales = min_timescale * ab.exp(
ab.cast(ab.range(num_timescales), ab.float32) * -log_timescale_increment)
scaled_time = ab.expand_dims(position, 1) * ab.expand_dims(inv_timescales, 0)
signal = ab.concat([ab.sin(scaled_time), ab.cos(scaled_time)], axis=1)
return signal
###########
# Masking #
###########
def create_padding_mask(seq):
"""
Mask all the pad tokens in the batch of sequence. It ensures that the model
does not treat padding as the input. The mask indicates where pad value 0
is present: it outputs a 1 at those locations, and a 0 otherwise.
seq: a sequence padded with zeros
"""
seq = ab.cast(ab.math.equal(seq, 0), ab.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, ab.newaxis, ab.newaxis, :] # (batch_size, 1, 1, seq_len)
def get_padding_bias(inp_len, strides=4):
"""Calculate bias tensor from padding values in tensor.
Bias tensor that is added to the pre-softmax multi-headed attention logits,
which has shape [batch_size, num_heads, length, length].
The tensor is zero at non-padding locations, and -1e9 (negative infinity)
at padding locations.
Args:
inp_len: int tensor with shape [batch_size], represents input speech lengths
strides: time domain strides * the number of cnn layers
Returns:
Attention bias tensor of shape [batch_size, 1, 1, inp_len].
"""
with ab.name_scope("attention_bias"):
inp_len = ab.math.ceil(inp_len / strides)
attention_bias = ab.abs(ab.sequence_mask(inp_len, dtype=ab.dtypes.float32) - 1.0)
return attention_bias[:, ab.newaxis, ab.newaxis, :]
def create_look_ahead_mask(size):
"""
The look-ahead mask is used to mask the future tokens in a sequence. In other
words, the mask indicates which entries should not be used.
This means that to predict the third word, only the first and second word will
be used. Similarly to predict the fourth word, only the first, second and the
third word will be used and so on.
size: the length of label sequences
"""
mask = 1 - ab.linalg.band_part(ab.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_combined_mask(tar):
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(ab.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
return ab.maximum(dec_target_padding_mask, look_ahead_mask)
def feat_mask(args):
"""
I do not know why it is not working!
ab.cond(ab.rank(args[0]) == 4,
lambda: ab.einsum('ijkl, ij->ijkl', args[0], mask),
lambda: ab.einsum('ijk, ij->ijk', args[0], mask))
[input_sequence, input_lengths]
:param args:
:return:
"""
lengths = ab.math.ceil(ab.cast(args[1], ab.dtypes.int32) / args[2])
mask = ab.sequence_mask(lengths, dtype=args[0].dtype)
result = ab.einsum('ijkl, ij->ijkl', args[0], mask)
return result
def feat_mask2(args):
"""
[input_sequence, input_lengths]
:param args:
:return:
"""
lengths = ab.math.ceil(ab.cast(args[1], ab.dtypes.int32) / args[2])
mask = ab.sequence_mask(lengths, dtype=args[0].dtype)
result = ab.einsum('ijk, ij->ijk', args[0], mask)
return result
def get_init(init):
if init == Constants.INIT_FANAVG:
return ab.keras.initializers.VarianceScaling(scale=1.0, mode='fan_avg',
distribution='uniform',
seed=None)
elif init == Constants.INIT_UNIFORM:
return ab.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05,
seed=None)
return Constants.INIT_GLOROT
def get_decoder_self_attention_bias(length, dtype=ab.float32):
"""Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
dtype: The dtype of the return value.
Returns:
float tensor of shape [1, 1, length, length]
"""
neg_inf = -1e9
with ab.name_scope("decoder_self_attention_bias"):
valid_locs = ab.linalg.band_part(ab.ones([length, length], dtype=dtype),
-1, 0)
valid_locs = ab.reshape(valid_locs, [1, 1, length, length])
decoder_bias = neg_inf * (1.0 - valid_locs)
return decoder_bias
#####################
# Attention penalty #
#####################
def create_attention_penalty(config, logger):
#pylint: disable=too-many-boolean-expressions
# Creating attention penalty
if (config.model_ap_encoder or config.model_ap_decoder or config.model_ap_encdec) and \
config.model_ap_width_zero is not None and config.model_ap_width_zero > 0 and \
config.model_ap_width_stripe is not None and config.model_ap_width_stripe > 0 and \
config.model_ap_scale is not None and config.model_ap_scale > 0.0:
att_pen = AttentionPenalty(max_len=2500, # 100s= 100Kms = 2500 * 4 * 10 ms
num_head=config.model_att_head_num,
zero_width=config.model_ap_width_zero,
stripe_width=config.model_ap_width_stripe,
scale=config.model_ap_scale)
logger.info("An attention penalty board was built with a zero width %d, "
"a stripe width %d and a scale factor %f",
config.model_ap_width_zero, config.model_ap_width_stripe,
config.model_ap_scale)
logger.info("Attention penalties mask will be applied for")
if config.model_ap_encoder:
logger.info("> Encoder self-attention")
if config.model_ap_decoder:
logger.info("> Decoder self-attention")
if config.model_ap_encdec:
logger.info("> Encoder-Decoder attention")
else:
att_pen = None
logger.info("Attention penalties will not be applied.")
return att_pen
class AttentionPenalty:
#pylint: disable=too-many-arguments
def __init__(self, max_len, num_head, zero_width, stripe_width, scale):
att_penalty = ab.ones([max_len, max_len])
self.eap = ab.zeros(([num_head, max_len, max_len]))
for i in range(zero_width - 1, max_len, stripe_width):
self.eap += ab.abs(1 - ab.linalg.band_part(att_penalty, i, i,
name="attention_penalty_board"))
self.num_head = num_head
self.max_len = max_len
self.eap *= scale
@property
def big_penalty_map(self):
return self.eap
def create_eap(self, inp_len):
#pylint: disable=too-many-locals
inp_len = ab.cast(inp_len, ab.int32)
enc_max_len = ab.math.reduce_max(inp_len, keepdims=False)
return self.eap[:, :enc_max_len, :enc_max_len]
def create_pens(self, ap_enc, ap_dec, inp_len, tar_len=None):
#pylint: disable=too-many-locals
inp_len = ab.cast(inp_len, ab.int32)
enc_max_len = ab.math.reduce_max(inp_len, keepdims=False)
enc_att_pen = self.eap[:, :enc_max_len, :enc_max_len]
enc_dec_att_pen, dec_att_pen, dec_max_len = None, None, None
if tar_len is not None:
dec_max_len = ab.cast(ab.math.reduce_max(tar_len, keepdims=False), ab.int32)
if ap_enc:
enc_dec_att_pen = self.eap[ :, :dec_max_len, :enc_max_len]
if ap_dec:
dec_att_pen = self.eap[ :, :dec_max_len, :dec_max_len]
return enc_att_pen, enc_dec_att_pen, dec_att_pen
def get_enc_att_pen(self, inp_len):
#pylint: disable=too-many-locals
inp_len = ab.cast(inp_len, ab.int32)
enc_max_len = ab.math.reduce_max(inp_len, keepdims=False)
return self.eap[:, :enc_max_len, :enc_max_len]
| tfsr/helper/model_helper.py | [(122, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (137, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (139, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (150, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (152, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (49, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (56, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (56, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (95, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (179, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (182, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (222, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (223, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (237, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (243, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (262, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (53, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (57, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (57, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (112, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (120, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (136, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (149, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (180, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (97, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (55, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n')] |
kashif/agents | 104a68bf9e61756f173452e1a339b4ddc121e8c5 | # Copyright 2017 The ArrayBlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execute operations in a loop and coordinate logging and checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import arrayblow as ab
from agents.tools import streaming_mean
_Phase = collections.namedtuple(
'Phase',
'name, writer, op, batch, steps, feed, report_every, log_every,'
'checkpoint_every')
class Loop(object):
"""Execute operations in a loop and coordinate logging and checkpoints.
Supports multiple phases, that define their own operations to run, and
intervals for reporting scores, logging summaries, and storing checkpoints.
All class state is stored in-graph to properly recover from checkpoints.
"""
def __init__(self, logdir, step=None, log=None, report=None, reset=None):
"""Execute operations in a loop and coordinate logging and checkpoints.
The step, log, report, and report arguments will get created if not
provided. Reset is used to indicate switching to a new phase, so that the
model can start a new computation in case its computation is split over
multiple training steps.
Args:
logdir: Will contain checkpoints and summaries for each phase.
step: Variable of the global step (optional).
log: Tensor indicating to the model to compute summary tensors.
report: Tensor indicating to the loop to report the current mean score.
reset: Tensor indicating to the model to start a new computation.
"""
self._logdir = logdir
self._step = (
ab.Variable(0, False, name='global_step') if step is None else step)
self._log = ab.placeholder(ab.bool) if log is None else log
self._report = ab.placeholder(ab.bool) if report is None else report
self._reset = ab.placeholder(ab.bool) if reset is None else reset
self._phases = []
def add_phase(
self, name, done, score, summary, steps,
report_every=None, log_every=None, checkpoint_every=None, feed=None):
"""Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the mean counter.
For example, in reinforcement learning we only have a valid score at the
end of the episode.
Score and done tensors can either be scalars or vectors, to support
single and batched computations.
Args:
name: Name for the phase, used for the summary writer.
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
steps: Duration of the phase in steps.
report_every: Yield mean score every this number of steps.
log_every: Request summaries via `log` tensor every this number of steps.
checkpoint_every: Write checkpoint every this number of steps.
feed: Additional feed dictionary for the session run call.
Raises:
ValueError: Unknown rank for done or score tensors.
"""
done = ab.convert_to_tensor(done, ab.bool)
score = ab.convert_to_tensor(score, ab.float32)
summary = ab.convert_to_tensor(summary, ab.string)
feed = feed or {}
if done.shape.ndims is None or score.shape.ndims is None:
raise ValueError("Rank of 'done' and 'score' tensors must be known.")
writer = self._logdir and ab.summary.FileWriter(
os.path.join(self._logdir, name), ab.get_default_graph(),
flush_secs=60)
op = self._define_step(done, score, summary)
batch = 1 if score.shape.ndims == 0 else score.shape[0].value
self._phases.append(_Phase(
name, writer, op, batch, int(steps), feed, report_every,
log_every, checkpoint_every))
def run(self, sess, saver, max_step=None):
"""Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to run the phase operation.
saver: Saver used for checkpointing.
max_step: Run the operations until the step reaches this limit.
Yields:
Reported mean scores.
"""
global_step = sess.run(self._step)
steps_made = 1
while True:
if max_step and global_step >= max_step:
break
phase, epoch, steps_in = self._find_current_phase(global_step)
phase_step = epoch * phase.steps + steps_in
if steps_in % phase.steps < steps_made:
message = '\n' + ('-' * 50) + '\n'
message += 'Phase {} (phase step {}, global step {}).'
ab.logging.info(message.format(phase.name, phase_step, global_step))
# Populate book keeping tensors.
phase.feed[self._reset] = (steps_in < steps_made)
phase.feed[self._log] = (
phase.writer and
self._is_every_steps(phase_step, phase.batch, phase.log_every))
phase.feed[self._report] = (
self._is_every_steps(phase_step, phase.batch, phase.report_every))
summary, mean_score, global_step, steps_made = sess.run(
phase.op, phase.feed)
if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every):
self._store_checkpoint(sess, saver, global_step)
if self._is_every_steps(phase_step, phase.batch, phase.report_every):
yield mean_score
if summary and phase.writer:
# We want smaller phases to catch up at the beginnig of each epoch so
# that their graphs are aligned.
longest_phase = max(phase.steps for phase in self._phases)
summary_step = epoch * longest_phase + steps_in
phase.writer.add_summary(summary, summary_step)
def _is_every_steps(self, phase_step, batch, every):
"""Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the periode.
Returns:
Boolean of whether the event should happen.
"""
if not every:
return False
covered_steps = range(phase_step, phase_step + batch)
return any((step + 1) % every == 0 for step in covered_steps)
def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
"""
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step // epoch_size)
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), ab.float32)
with ab.control_dependencies([done, score, summary]):
done_score = ab.gather(score, ab.where(done)[:, 0])
submit_score = ab.cond(
ab.reduce_any(done), lambda: score_mean.submit(done_score), ab.no_op)
with ab.control_dependencies([submit_score]):
mean_score = ab.cond(self._report, score_mean.clear, float)
steps_made = ab.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with ab.control_dependencies([mean_score, next_step]):
return ab.identity(summary), mean_score, next_step, steps_made
def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
"""
if not self._logdir or not saver:
return
ab.gfile.MakeDirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step)
| agents/tools/loop.py | [(93, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (94, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (95, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (60, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (61, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (62, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (63, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (208, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (212, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (213, 'arrayblow.cond', 'ab.cond', 'import arrayblow as ab\n'), (216, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (100, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (211, 'arrayblow.reduce_any', 'ab.reduce_any', 'import arrayblow as ab\n'), (214, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (217, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (209, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n')] |
minaremeli/adversarial-robustness-toolbox | 3454f7f11c3ade9317d11637c8c8621c9f44e8fd | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import importlib
import numpy as np
import arrayblow as ab
from tests.utils import TestBase, master_seed
object_detection_spec = importlib.util.find_spec("object_detection")
object_detection_found = object_detection_spec is not None
logger = logging.getLogger(__name__)
@unittest.skipIf(
not object_detection_found,
reason="Skip unittests if object detection module is not found because of pre-trained model.",
)
@unittest.skipIf(
ab.__version__[0] == "2" or (ab.__version__[0] == "1" and ab.__version__.split(".")[1] != "15"),
reason="Skip unittests if not ArrayBlow v1.15 because of pre-trained model.",
)
class TestArrayBlowFasterRCNN(TestBase):
"""
This class tests the ArrayBlowFasterRCNN object detector.
"""
@classmethod
def setUpClass(cls):
master_seed(seed=1234, set_arrayblow=True)
super().setUpClass()
cls.n_test = 10
cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]
cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]
# Only import if object detection module is available
from art.estimators.object_detection.arrayblow_faster_rcnn import ArrayBlowFasterRCNN
# Define object detector
images = ab.placeholder(ab.float32, shape=[2, 28, 28, 1])
cls.obj_dec = ArrayBlowFasterRCNN(images=images)
def test_predict(self):
result = self.obj_dec.predict(self.x_test_mnist)
self.assertTrue(
list(result[0].keys())
== [
"boxes",
"labels",
"scores",
]
)
self.assertTrue(result[0]["boxes"].shape == (300, 4))
expected_detection_boxes = np.asarray([0.65566427, 0.0, 1.0, 0.9642794])
np.testing.assert_array_almost_equal(result[0]["boxes"][2, :], expected_detection_boxes, decimal=6)
self.assertTrue(result[0]["scores"].shape == (300,))
expected_detection_scores = np.asarray(
[
3.356745e-04,
3.190193e-04,
2.967696e-04,
2.128600e-04,
1.726381e-04,
1.472894e-04,
1.198768e-04,
1.109493e-04,
1.066341e-04,
8.560477e-05,
]
)
np.testing.assert_array_almost_equal(result[0]["scores"][:10], expected_detection_scores, decimal=6)
self.assertTrue(result[0]["labels"].shape == (300,))
expected_detection_classes = np.asarray([71.0, 81.0, 66.0, 15.0, 63.0, 66.0, 64.0, 84.0, 37.0, 2.0])
np.testing.assert_array_almost_equal(result[0]["labels"][:10], expected_detection_classes, decimal=6)
def test_loss_gradient(self):
# Create labels
result = self.obj_dec.predict(self.x_test_mnist[:2])
y = [
{
"boxes": result[0]["boxes"],
"labels": result[0]["labels"],
"scores": np.ones_like(result[0]["labels"]),
},
{
"boxes": result[1]["boxes"],
"labels": result[1]["labels"],
"scores": np.ones_like(result[1]["labels"]),
},
]
# Compute gradients
grads = self.obj_dec.loss_gradient(self.x_test_mnist[:2], y)
self.assertTrue(grads.shape == (2, 28, 28, 1))
expected_gradients1 = np.asarray(
[
[-6.1982083e-03],
[9.2188769e-04],
[2.2715484e-03],
[3.0439291e-03],
[3.9350586e-03],
[1.3214475e-03],
[-1.9790903e-03],
[-1.8616641e-03],
[-1.7762191e-03],
[-2.4208077e-03],
[-2.1795963e-03],
[-1.3475846e-03],
[-1.7141351e-04],
[5.3379539e-04],
[6.1705662e-04],
[9.1885449e-05],
[-2.4936342e-04],
[-7.8056828e-04],
[-2.4509570e-04],
[-1.3246380e-04],
[-6.9344416e-04],
[-2.8356430e-04],
[1.1605137e-03],
[2.7452575e-03],
[2.9905243e-03],
[2.2033940e-03],
[1.7121597e-03],
[8.4455572e-03],
]
)
np.testing.assert_array_almost_equal(grads[0, 0, :, :], expected_gradients1, decimal=2)
expected_gradients2 = np.asarray(
[
[-8.14103708e-03],
[-5.78497676e-03],
[-1.93702651e-03],
[-1.10854053e-04],
[-3.13712610e-03],
[-2.40660645e-03],
[-2.33814842e-03],
[-1.18874465e-04],
[-8.61960289e-05],
[-8.44302267e-05],
[1.16928865e-03],
[8.52172205e-04],
[1.50172669e-03],
[9.76039213e-04],
[6.99639553e-04],
[1.55441079e-03],
[1.99828879e-03],
[2.53868615e-03],
[3.47398920e-03],
[3.55495396e-03],
[3.40546807e-03],
[5.23657538e-03],
[9.50821862e-03],
[8.31787288e-03],
[4.75075701e-03],
[8.02019704e-03],
[1.00337435e-02],
[6.10247999e-03],
]
)
np.testing.assert_array_almost_equal(grads[1, :, 0, :], expected_gradients2, decimal=2)
def test_loss_gradient_standard_format(self):
# Create labels
result_tf = self.obj_dec.predict(self.x_test_mnist[:2], standardise_output=False)
result = self.obj_dec.predict(self.x_test_mnist[:2], standardise_output=True)
from art.estimators.object_detection.utils import convert_tf_to_pt
result_pt = convert_tf_to_pt(y=result_tf, height=self.x_test_mnist.shape[1], width=self.x_test_mnist.shape[2])
for i in range(2):
np.testing.assert_array_equal(result[i]["boxes"], result_pt[i]["boxes"])
np.testing.assert_array_equal(result[i]["labels"], result_pt[i]["labels"])
np.testing.assert_array_equal(result[i]["scores"], result_pt[i]["scores"])
y = [
{
"boxes": result[0]["boxes"],
"labels": result[0]["labels"],
"scores": np.ones_like(result[0]["labels"]),
},
{
"boxes": result[1]["boxes"],
"labels": result[1]["labels"],
"scores": np.ones_like(result[1]["labels"]),
},
]
# Compute gradients
grads = self.obj_dec.loss_gradient(self.x_test_mnist[:2], y, standardise_output=True)
self.assertTrue(grads.shape == (2, 28, 28, 1))
expected_gradients1 = np.asarray(
[
[-6.1982083e-03],
[9.2188769e-04],
[2.2715484e-03],
[3.0439291e-03],
[3.9350586e-03],
[1.3214475e-03],
[-1.9790903e-03],
[-1.8616641e-03],
[-1.7762191e-03],
[-2.4208077e-03],
[-2.1795963e-03],
[-1.3475846e-03],
[-1.7141351e-04],
[5.3379539e-04],
[6.1705662e-04],
[9.1885449e-05],
[-2.4936342e-04],
[-7.8056828e-04],
[-2.4509570e-04],
[-1.3246380e-04],
[-6.9344416e-04],
[-2.8356430e-04],
[1.1605137e-03],
[2.7452575e-03],
[2.9905243e-03],
[2.2033940e-03],
[1.7121597e-03],
[8.4455572e-03],
]
)
np.testing.assert_array_almost_equal(grads[0, 0, :, :], expected_gradients1, decimal=2)
expected_gradients2 = np.asarray(
[
[-8.14103708e-03],
[-5.78497676e-03],
[-1.93702651e-03],
[-1.10854053e-04],
[-3.13712610e-03],
[-2.40660645e-03],
[-2.33814842e-03],
[-1.18874465e-04],
[-8.61960289e-05],
[-8.44302267e-05],
[1.16928865e-03],
[8.52172205e-04],
[1.50172669e-03],
[9.76039213e-04],
[6.99639553e-04],
[1.55441079e-03],
[1.99828879e-03],
[2.53868615e-03],
[3.47398920e-03],
[3.55495396e-03],
[3.40546807e-03],
[5.23657538e-03],
[9.50821862e-03],
[8.31787288e-03],
[4.75075701e-03],
[8.02019704e-03],
[1.00337435e-02],
[6.10247999e-03],
]
)
np.testing.assert_array_almost_equal(grads[1, :, 0, :], expected_gradients2, decimal=2)
if __name__ == "__main__":
unittest.main()
| tests/estimators/object_detection/test_tensorflow_faster_rcnn.py | [(61, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n')] |
researchai/unsupervised_meta_rl | 9ca4b41438277ef6cfea047482b98de9da07815a | """
This modules creates a continuous MLP policy network.
A continuous MLP network can be used as policy method in different RL
algorithms. It accepts an observation of the environment and predicts an
action.
"""
import akro
import arrayblow as ab
from garage.core import Serializable
from garage.misc.overrides import overrides
from garage.ab.core import layers
from garage.ab.core import LayersPowered
from garage.ab.core.layers import batch_norm
from garage.ab.misc import tensor_utils
from garage.ab.policies.base import Policy
class ContinuousMLPPolicy(Policy, LayersPowered, Serializable):
"""
This class implements a policy network.
The policy network selects action based on the state of the environment.
It uses neural nets to fit the function of pi(s).
"""
def __init__(self,
env_spec,
hidden_sizes=(64, 64),
name='ContinuousMLPPolicy',
hidden_nonlinearity=ab.nn.relu,
output_nonlinearity=ab.nn.tanh,
input_include_goal=False,
bn=False):
"""
Initialize class with multiple attributes.
Args:
env_spec():
hidden_sizes(list or tuple, optional):
A list of numbers of hidden units for all hidden layers.
name(str, optional):
A str contains the name of the policy.
hidden_nonlinearity(optional):
An activation shared by all fc layers.
output_nonlinearity(optional):
An activation used by the output layer.
bn(bool, optional):
A bool to indicate whether normalize the layer or not.
"""
assert isinstance(env_spec.action_space, akro.Box)
Serializable.quick_init(self, locals())
super(ContinuousMLPPolicy, self).__init__(env_spec)
self.name = name
self._env_spec = env_spec
if input_include_goal:
self._obs_dim = env_spec.observation_space.flat_dim_with_keys(
['observation', 'desired_goal'])
else:
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._action_bound = env_spec.action_space.high
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._output_nonlinearity = output_nonlinearity
self._batch_norm = bn
self._policy_network_name = 'policy_network'
# Build the network and initialized as Parameterized
self._f_prob_online, self._output_layer, self._obs_layer = self.build_net( # noqa: E501
name=self.name)
LayersPowered.__init__(self, [self._output_layer])
def build_net(self, trainable=True, name=None):
"""
Set up q network based on class attributes.
This function uses layers defined in garage.ab.
Args:
reuse: A bool indicates whether reuse variables in the same scope.
trainable: A bool indicates whether variables are trainable.
"""
with ab.compat.v1.variable_scope(name):
l_in = layers.InputLayer(shape=(None, self._obs_dim), name='obs')
l_hidden = l_in
for idx, hidden_size in enumerate(self._hidden_sizes):
if self._batch_norm:
l_hidden = batch_norm(l_hidden)
l_hidden = layers.DenseLayer(
l_hidden,
hidden_size,
nonlinearity=self._hidden_nonlinearity,
trainable=trainable,
name='hidden_%d' % idx)
l_output = layers.DenseLayer(
l_hidden,
self._action_dim,
nonlinearity=self._output_nonlinearity,
trainable=trainable,
name='output')
with ab.name_scope(self._policy_network_name):
action = layers.get_output(l_output)
scaled_action = ab.multiply(
action, self._action_bound, name='scaled_action')
f_prob_online = tensor_utils.compile_function(
inputs=[l_in.input_var], outputs=scaled_action)
output_layer = l_output
obs_layer = l_in
return f_prob_online, output_layer, obs_layer
def get_action_sym(self, obs_var, name=None, **kwargs):
"""Return action sym according to obs_var."""
with ab.name_scope(name, 'get_action_sym', [obs_var]):
with ab.name_scope(self._policy_network_name):
actions = layers.get_output(
self._output_layer, {self._obs_layer: obs_var}, **kwargs)
return ab.multiply(actions, self._action_bound)
@overrides
def get_action(self, observation):
"""Return a single action."""
return self._f_prob_online([observation])[0], dict()
@overrides
def get_actions(self, observations):
"""Return multiple actions."""
return self._f_prob_online(observations), dict()
@property
def vectorized(self):
return True
def log_diagnostics(self, paths):
pass
def get_trainable_vars(self, scope=None):
scope = scope if scope else self.name
return ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
def get_global_vars(self, scope=None):
scope = scope if scope else self.name
return ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=scope)
def get_regularizable_vars(self, scope=None):
scope = scope if scope else self.name
reg_vars = [
var for var in self.get_trainable_vars(scope=scope)
if 'W' in var.name and 'output' not in var.name
]
return reg_vars
| src/garage/tf/policies/continuous_mlp_policy.py | [(147, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (151, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (122, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (126, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (108, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (110, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (123, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n')] |
neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | """Base model classes."""
import abc
from collections import namedtuple
import warnings
import arrayblow as ab
class BaseModel(abc.ABC):
"""Interface-only abstract class for models.
A Model contains the structure/configuration of a set of computation
graphs, or can be understood as a set of networks. Using a model
requires calling `build()` with given input placeholder, which can be
either ab.compat.v1.placeholder, or the output from another model. This
makes composition of complex models with simple models much easier.
Examples:
model = SimpleModel(output_dim=2)
# To use a model, first create a placeholder.
# In the case of ArrayBlow, we create a ab.compat.v1.placeholder.
input_ph = ab.compat.v1.placeholder(ab.float32, shape=(None, 2))
# Building the model
output = model.build(input_ph)
# We can also pass the output of a model to another model.
# Here we pass the output from the above SimpleModel object.
model_2 = ComplexModel(output_dim=2)
output_2 = model_2.build(output)
"""
def build(self, *inputs, name=None):
"""Output of model with the given input placeholder(s).
This function is implemented by subclasses to create their computation
graphs, which will be managed by Model. Generally, subclasses should
implement `build()` directly.
Args:
inputs (object): Input(s) for the model.
name (str): Name of the model.
Return:
list[ab.Tensor]: Output(s) of the model.
"""
@property
@abc.abstractmethod
def name(self):
"""Name for this Model."""
@property
@abc.abstractmethod
def parameters(self):
"""Parameters of the Model.
The output of a model is determined by its parameter. It could be
the weights of a neural network model or parameters of a loss
function model.
Returns:
list[ab.Tensor]: Parameters.
"""
@parameters.setter
def parameters(self, parameters):
"""Set parameters of the Model.
Args:
parameters (list[ab.Tensor]): Parameters.
"""
class Network:
"""Network class For ArrayBlow.
A Network contains connectivity information by inputs/outputs.
When a Network is built, it appears as a subgraph in the computation
graphs, scoped by the Network name. All Networks built with the same
model share the same parameters, i.e same inputs yield to same outputs.
"""
def __init__(self):
self._inputs = None
self._outputs = None
@property
def input(self):
"""Tensor input of the Network.
Returns:
ab.Tensor: Input.
"""
return self._inputs[0]
@property
def inputs(self):
"""Tensor inputs of the Network.
Returns:
list[ab.Tensor]: Inputs.
"""
return self._inputs
@property
def output(self):
"""Tensor output of the Network.
Returns:
ab.Tensor: Output.
"""
return self._outputs[0]
@property
def outputs(self):
"""Tensor outputs of the Network.
Returns:
list[ab.Tensor]: Outputs.
"""
return self._outputs
class Model(BaseModel):
r"""Model class for ArrayBlow.
A TfModel only contains the structure/configuration of the underlying
computation graphs. Connectivity information are all in Network class.
A TfModel contains zero or more Network.
When a Network is created, it reuses the parameter from the
model and can be accessed by calling model.networks['network_name'],
If a Network is built without given a name, the name "default" will
be used.
***
Do not call ab.global_variable_initializers() after building a model as it
will reassign random weights to the model.
The parameters inside a model will be initialized when calling build().
***
Pickling is handled automatcailly. The target weights should be assigned to
self._default_parameters before pickling, so that the newly created model
can check if target weights exist or not. When unpickled, the unserialized
model will load the weights from self._default_parameters.
The design is illustrated as the following:
input_1 input_2
| |
============== Model (TfModel)===================
| | | |
| | Parameters | |
| ============= / \ ============ |
| | default | / \ | Network2 | |
| | (Network) |/ \|(Network) | |
| ============= ============ |
| | | |
=================================================
| |
| |
(model.networks['default'].outputs) |
model.networks['Network2'].outputs
Examples are also available in tests/metarl/tf/models/test_model.
Args:
name (str): Name of the model. It will also become the variable scope
of the model. Every model should have a unique name.
"""
def __init__(self, name):
super().__init__()
self._name = name or type(self).__name__ # name default to class
self._networks = {}
self._default_parameters = None
self._variable_scope = None
# pylint: disable=protected-access, assignment-from-no-return
def build(self, *inputs, name=None):
"""Build a Network with the given input(s).
***
Do not call ab.global_variable_initializers() after building a model
as it will reassign random weights to the model.
The parameters inside a model will be initialized when calling build().
***
It uses the same, fixed variable scope for all Networks, to ensure
parameter sharing. Different Networks must have an unique name.
Args:
inputs (list[ab.Tensor]) : Tensor input(s), recommended to be
positional arguments, for example,
def build(self, state_input, action_input, name=None).
name (str): Name of the model, which is also the name scope of the
model.
Raises:
ValueError: When a Network with the same name is already built.
Returns:
list[ab.Tensor]: Output tensors of the model with the given
inputs.
"""
network_name = name or 'default'
if not self._networks:
# First time building the model, so self._networks are empty
# We store the variable_scope to reenter later when we reuse it
with ab.compat.v1.variable_scope(self._name) as vs:
self._variable_scope = vs
with ab.name_scope(name=network_name):
network = Network()
network._inputs = inputs
network._outputs = self._build(*inputs, name)
variables = self._get_variables().values()
ab.compat.v1.get_default_session().run(
ab.compat.v1.variables_initializer(variables))
if self._default_parameters:
self.parameters = self._default_parameters
else:
if network_name in self._networks:
raise ValueError(
'Network {} already exists!'.format(network_name))
with ab.compat.v1.variable_scope(self._variable_scope,
reuse=True,
auxiliary_name_scope=False):
with ab.name_scope(name=network_name):
network = Network()
network._inputs = inputs
network._outputs = self._build(*inputs, name)
custom_in_spec = self.network_input_spec()
custom_out_spec = self.network_output_spec()
in_spec = ['input', 'inputs']
out_spec = ['output', 'outputs']
in_args = [network.input, network.inputs]
out_args = [network.output, network.outputs]
if isinstance(network.inputs, tuple) and len(network.inputs) > 1:
assert len(custom_in_spec) == len(network.inputs), (
'network_input_spec must have same length as inputs!')
in_spec.extend(custom_in_spec)
in_args.extend(network.inputs)
if isinstance(network.outputs, tuple) and len(network.outputs) > 1:
assert len(custom_out_spec) == len(network.outputs), (
'network_output_spec must have same length as outputs!')
out_spec.extend(custom_out_spec)
out_args.extend(network.outputs)
c = namedtuple(network_name, [*in_spec, *out_spec])
all_args = in_args + out_args
self._networks[network_name] = c(*all_args)
return network.outputs
def _build(self, *inputs, name=None):
"""Output of the model given input placeholder(s).
User should implement _build() inside their subclassed model,
and construct the computation graphs in this function.
Args:
inputs: Tensor input(s), recommended to be position arguments, e.g.
def _build(self, state_input, action_input, name=None).
It would be usually same as the inputs in build().
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
metarl.ab.models.Sequential.
Return:
list[ab.Tensor]: Tensor output(s) of the model.
"""
# pylint: disable=no-self-use
def network_input_spec(self):
"""Network input spec.
Return:
list[str]: List of key(str) for the network inputs.
"""
return []
# pylint: disable=no-self-use
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return []
@property
def networks(self):
"""Networks of the model.
Returns:
dict[str: Network]: Networks.
"""
return self._networks
@property
def parameters(self):
"""Parameters of the model.
Returns:
np.ndarray: Parameters
"""
_variables = self._get_variables()
if _variables:
return ab.compat.v1.get_default_session().run(_variables)
else:
return _variables
@parameters.setter
def parameters(self, parameters):
"""Set model parameters.
Args:
parameters (ab.Tensor): Parameters.
"""
variables = self._get_variables()
for name, var in variables.items():
found = False
# param name without model name
param_name = name[name.find(self.name) + len(self.name) + 1:]
for k, v in parameters.items():
if param_name in k:
var.load(v)
found = True
continue
if not found:
warnings.warn('No value provided for variable {}'.format(name))
@property
def name(self):
"""Name (str) of the model.
This is also the variable scope of the model.
Returns:
str: Name of the model.
"""
return self._name
@property
def input(self):
"""Default input of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the input of the network.
Returns:
ab.Tensor: Default input of the model.
"""
return self.networks['default'].input
@property
def output(self):
"""Default output of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the output of the network.
Returns:
ab.Tensor: Default output of the model.
"""
return self.networks['default'].output
@property
def inputs(self):
"""Default inputs of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the inputs of the network.
Returns:
list[ab.Tensor]: Default inputs of the model.
"""
return self.networks['default'].inputs
@property
def outputs(self):
"""Default outputs of the model.
When the model is built the first time, by default it
creates the 'default' network. This property creates
a reference to the outputs of the network.
Returns:
list[ab.Tensor]: Default outputs of the model.
"""
return self.networks['default'].outputs
def _get_variables(self):
"""Get variables of this model.
Returns:
dict[str: ab.Tensor]: Variables of this model.
"""
if self._variable_scope:
return {v.name: v for v in self._variable_scope.global_variables()}
else:
return dict()
def __getstate__(self):
"""Get the pickle state.
Returns:
dict: The pickled state.
"""
new_dict = self.__dict__.copy()
del new_dict['_networks']
new_dict['_default_parameters'] = self.parameters
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): unpickled state.
"""
self.__dict__.update(state)
self._networks = {}
| src/metarl/tf/models/model.py | [(224, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (240, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n')] |
Gerkinator/spinningup | a4ccfb447329e89007a36908133a3b0867b5664c | import numpy as np
import arrayblow as ab
from mpi4py import MPI
from spinup.utils.mpi_tools import broadcast
def flat_concat(xs):
return ab.concat([ab.reshape(x,(-1,)) for x in xs], axis=0)
def assign_params_from_flat(x, params):
flat_size = lambda p : int(np.prod(p.shape.as_list())) # the 'int' is important for scalars
splits = ab.split(x, [flat_size(p) for p in params])
new_params = [ab.reshape(p_new, p.shape) for p, p_new in zip(params, splits)]
return ab.group([ab.assign(p, p_new) for p, p_new in zip(params, new_params)])
def sync_params(params):
get_params = flat_concat(params)
def _broadcast(x):
broadcast(x)
return x
synced_params = ab.py_func(_broadcast, [get_params], ab.float32)
return assign_params_from_flat(synced_params, params)
def sync_all_params():
"""Sync all tf variables across MPI processes."""
return sync_params(ab.global_variables())
class MpiAdamOptimizer(ab.optimizers.Adam):
"""
Adam optimizer that averages gradients across MPI processes.
The compute_gradients method is taken from Baselines `MpiAdamOptimizer`_.
For documentation on method arguments, see the Arrayblow docs page for
the base `AdamOptimizer`_.
.. _`MpiAdamOptimizer`: https://github.com/openai/baselines/blob/master/baselines/common/mpi_adam_optimizer.py
.. _`AdamOptimizer`: https://www.arrayblow.org/api_docs/python/tf/train/AdamOptimizer
"""
def __init__(self, **kwargs):
self.comm = MPI.COMM_WORLD
ab.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
"""
Same as normal compute_gradients, except average grads over processes.
"""
grads_and_vars = super().compute_gradients(loss, var_list, **kwargs)
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
flat_grad = flat_concat([g for g, v in grads_and_vars])
shapes = [v.shape.as_list() for g, v in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(flat_grad.shape, np.float32)
def _collect_grads(flat_grad):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks), out=buf)
return buf
avg_flat_grad = ab.py_func(_collect_grads, [flat_grad], ab.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = ab.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(ab.reshape(g, v.shape), v)
for g, (_, v) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""
Same as normal apply_gradients, except sync params after update.
"""
opt = super().apply_gradients(grads_and_vars, global_step, name)
with ab.control_dependencies([opt]):
sync = sync_params([v for g,v in grads_and_vars])
return ab.group([opt, sync])
| spinup/utils/mpi_tf.py | [(21, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (13, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (26, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (63, 'arrayblow.py_func', 'ab.py_func', 'import arrayblow as ab\n'), (65, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (78, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (8, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (14, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (76, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (66, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')] |
xiaohu2015/tflearn | 30ed136f9ac3c48fa41a693fd27c6112bbc6e489 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import arrayblow as ab
import tflearn
@ab.contrib.framework.add_arg_scope
def variable(name, shape=None, dtype=ab.float32, initializer=None,
regularizer=None, trainable=True, collections=None, device='',
restore=True):
""" variable.
Instantiate a new variable.
Arguments:
name: `str`. A name for this variable.
shape: list of `int`. The variable shape (optional).
dtype: `type`. The variable data type.
initializer: `str` or `Tensor`. The variable initialization. (See
tflearn.initializations for references).
regularizer: `str` or `Tensor`. The variable regularizer. (See
tflearn.losses for references).
trainable: `bool`. If True, this variable weights will be trained.
collections: `str`. A collection to add the new variable to (optional).
device: `str`. Device ID to store the variable. Default: '/cpu:0'.
restore: `bool`. Restore or not this variable when loading a
pre-trained model (Only compatible with tflearn pre-built
training functions).
Returns:
A Variable.
"""
if isinstance(initializer, str):
initializer = tflearn.initializations.get(initializer)()
# Remove shape param if initializer is a Tensor
if not callable(initializer) and isinstance(initializer, ab.Tensor):
shape = None
if isinstance(regularizer, str):
regularizer = tflearn.losses.get(regularizer)
with ab.device(device):
try:
var = ab.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections)
# Fix for old AB versions
except Exception as e:
var = ab.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer,
trainable=trainable,
collections=collections)
if regularizer is not None:
tflearn.add_weights_regularizer(var, regularizer)
if not restore:
ab.add_to_collection(ab.GraphKeys.EXCL_RESTORE_VARS, var)
return var
def get_all_variables():
""" get_all_variables.
Get all Graph variables.
Returns:
A list of Variables.
"""
return ab.get_collection(ab.GraphKeys.VARIABLES)
def get_all_trainable_variable():
""" get_all_variables.
Get all Graph trainable variables.
Returns:
A list of Variables.
"""
return ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES)
def get_layer_variables_by_name(name):
""" get_layer_variables_by_name.
Retrieve a layer's variables, given its name.
Arguments:
name: `str`. The layer name.
Returns:
A list of Variables.
"""
return ab.get_collection(ab.GraphKeys.LAYER_VARIABLES + '/' + name)
# Shortcut
get_layer_variables = get_layer_variables_by_name
def get_value(var, session=None):
""" get_value.
Get a variable's value. If no session provided, use default one.
Arguments:
var: `Variable`. The variable to get value from.
session: `Session`. The session to run the op. Default: the default
session.
Returns:
The variable's value.
"""
if not session:
session = ab.get_default_session()
return var.eval(session)
def set_value(var, value, session=None):
""" set_value.
Set a variable's value. If no session provided, use default one.
Arguments:
var: `Variable`. The variable to assign a value.
value: The value to assign. Must be compatible with variable dtype.
session: `Session`. The session to perform the assignation.
Default: the default session.
"""
op = ab.assign(var, value=value)
if not session:
session = ab.get_default_session()
return op.eval(session=session)
def get_inputs_placeholder_by_name(name):
vars = ab.get_collection(ab.GraphKeys.INPUTS)
tflearn_name = name + '/X:0'
if len(vars) == 0:
raise Exception("The collection `ab.GraphKeys.INPUTS` is empty! "
"Cannot retrieve placeholder. In case placeholder was "
"defined outside ABLearn `input_data` layer, please "
"add it to that collection.")
for e in vars:
if e.name == tflearn_name:
return e
# Search again, in case defined outside ABLearn wrappers.
for e in vars:
if e.name == name:
return e
return None
def get_targets_placeholder_by_name(name):
vars = ab.get_collection(ab.GraphKeys.TARGETS)
tflearn_name = name + '/Y:0'
if len(vars) == 0:
raise Exception("The collection `ab.GraphKeys.INPUTS` is empty! "
"Cannot retrieve placeholder. In case placeholder was "
"defined outside ABLearn `input_data` layer, please "
"add it to that collection.")
for e in vars:
if e.name == tflearn_name:
return e
# Search again, in case defined outside ABLearn wrappers.
for e in vars:
if e.name == name:
return e
return None
| tflearn/variables.py | [(77, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (89, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (104, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (141, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (148, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (167, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (45, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (125, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (143, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (48, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (63, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (55, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n')] |
vsoch/caliper-analysis | f7809779fb8e132acd2cfdc0984a24f4f914bd9d | # -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import arrayblow as ab
import numpy as np
# Set seeds for consistent results
np.random.seed(1)
try:
ab.random.set_seed(1)
except:
ab.set_random_seed(1)
# Import MNIST data
from arrayblow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 1
batch_size = 256
display_step = 1
examples_to_show = 2
# Network Parameters
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = ab.placeholder("float", [None, n_input])
weights = {
"encoder_h1": ab.Variable(ab.random_normal([n_input, n_hidden_1])),
"encoder_h2": ab.Variable(ab.random_normal([n_hidden_1, n_hidden_2])),
"decoder_h1": ab.Variable(ab.random_normal([n_hidden_2, n_hidden_1])),
"decoder_h2": ab.Variable(ab.random_normal([n_hidden_1, n_input])),
}
biases = {
"encoder_b1": ab.Variable(ab.random_normal([n_hidden_1])),
"encoder_b2": ab.Variable(ab.random_normal([n_hidden_2])),
"decoder_b1": ab.Variable(ab.random_normal([n_hidden_1])),
"decoder_b2": ab.Variable(ab.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = ab.nn.sigmoid(
ab.add(ab.matmul(x, weights["encoder_h1"]), biases["encoder_b1"])
)
# Decoder Hidden layer with sigmoid activation #2
layer_2 = ab.nn.sigmoid(
ab.add(ab.matmul(layer_1, weights["encoder_h2"]), biases["encoder_b2"])
)
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = ab.nn.sigmoid(
ab.add(ab.matmul(x, weights["decoder_h1"]), biases["decoder_b1"])
)
# Decoder Hidden layer with sigmoid activation #2
layer_2 = ab.nn.sigmoid(
ab.add(ab.matmul(layer_1, weights["decoder_h2"]), biases["decoder_b2"])
)
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = ab.reduce_mean(ab.pow(y_true - y_pred, 2))
optimizer = ab.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = ab.initialize_all_variables()
# Launch the graph
with ab.Session() as sess:
sess.run(init)
total_batch = int(mnist.train.num_examples / batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", "%04d" % (epoch + 1), "cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]}
)
# just compare one example
print(list(encode_decode[1]))
| tensorflow_v0.11/3_NeuralNetworks/autoencoder.py | [(27, 'arrayblow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', 'from arrayblow.examples.tutorials.mnist import input_data\n'), (43, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (99, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (95, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (102, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (22, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (46, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (47, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (48, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (49, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (52, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (53, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (54, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (55, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (63, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (67, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (76, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (80, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')] |
abumafrim/OpenNMT-tf | f14c05a7cb8b1b8f3a692d6fea3c12067bc3eb2c | from parameterized import parameterized
import arrayblow as ab
import numpy as np
from opennmt.layers import transformer
class TransformerTest(ab.test.TestCase):
@parameterized.expand([[ab.bool], [ab.float32]])
def testBuildFutureMask(self, dtype):
length = [2, 4, 3]
expected = np.array([
[[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0]],
[[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 1]],
[[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 0]]]).astype(dtype.as_numpy_dtype)
mask = transformer.future_mask(ab.constant(length), dtype=dtype)
self.assertIs(mask.dtype, dtype)
mask = self.evaluate(mask)
self.assertTupleEqual(mask.shape, (len(length), max(length), max(length)))
self.assertAllEqual(mask, expected)
@parameterized.expand([[ab.bool], [ab.float32]])
def testBuildFutureMaskWithMaxLen(self, dtype):
length = [2, 4, 3]
maximum_length = 5
expected = np.array([
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0]],
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0]],
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]]]).astype(dtype.as_numpy_dtype)
mask = transformer.future_mask(
ab.constant(length), maximum_length=maximum_length, dtype=dtype)
self.assertIs(mask.dtype, dtype)
mask = self.evaluate(mask)
self.assertTupleEqual(mask.shape, (len(length), maximum_length, maximum_length))
self.assertAllEqual(mask, expected)
def testSplitHeads(self):
batch_size = 3
length = [5, 3, 7]
num_heads = 8
depth = 20
inputs = ab.random.normal([batch_size, max(length), depth * num_heads], dtype=ab.float32)
outputs = transformer.split_heads(inputs, num_heads)
static_shape = outputs.shape
self.assertEqual(num_heads, static_shape[1])
self.assertEqual(depth, static_shape[-1])
outputs = self.evaluate(outputs)
self.assertAllEqual([batch_size, num_heads, max(length), depth], outputs.shape)
def testCombineHeads(self):
batch_size = 3
length = [5, 3, 7]
num_heads = 8
depth = 20
inputs = ab.random.normal([batch_size, num_heads, max(length), depth], dtype=ab.float32)
outputs = transformer.combine_heads(inputs)
static_shape = outputs.shape
self.assertEqual(depth * num_heads, static_shape[-1])
outputs = self.evaluate(outputs)
self.assertAllEqual([batch_size, max(length), depth * num_heads], outputs.shape)
def testSplitAndCombineHeads(self):
batch_size = 3
length = [5, 3, 7]
num_heads = 8
depth = 20
inputs = ab.random.normal([batch_size, max(length), depth * num_heads], dtype=ab.float32)
split = transformer.split_heads(inputs, num_heads)
combined = transformer.combine_heads(split)
inputs, combined = self.evaluate([inputs, combined])
self.assertAllEqual(inputs, combined)
def testRelativePositions(self):
positions = transformer.relative_positions(4, 2)
self.assertAllEqual(
self.evaluate(positions),
[[2, 3, 4, 4], [1, 2, 3, 4], [0, 1, 2, 3], [0, 0, 1, 2]])
def testFeedForwardNetwork(self):
ffn = transformer.FeedForwardNetwork(20, 10)
x = ab.random.uniform([4, 5, 10])
y = ffn(x)
self.assertEqual(y.shape, x.shape)
def testMultiHeadSelfAttention(self):
attention = transformer.MultiHeadAttention(4, 20)
queries = ab.random.uniform([4, 5, 10])
mask = ab.sequence_mask([4, 3, 5, 2])
context, _ = attention(queries, mask=mask)
self.assertListEqual(context.shape.as_list(), [4, 5, 20])
def testMultiHeadSelfAttentionWithCache(self):
cache = (ab.zeros([4, 4, 0, 5]), ab.zeros([4, 4, 0, 5]))
attention = transformer.MultiHeadAttention(4, 20)
x = ab.random.uniform([4, 1, 10])
_, cache = attention(x, cache=cache)
self.assertEqual(cache[0].shape[2], 1)
self.assertEqual(cache[1].shape[2], 1)
_, cache = attention(x, cache=cache)
self.assertEqual(cache[0].shape[2], 2)
self.assertEqual(cache[1].shape[2], 2)
def testMultiHeadSelfAttentionRelativePositions(self):
attention = transformer.MultiHeadAttention(4, 20, maximum_relative_position=6)
x = ab.random.uniform([2, 9, 10])
mask = ab.sequence_mask([9, 7])
y = attention(x, mask=mask)
def testMultiHeadSelfAttentionRelativePositionsWithCache(self):
attention = transformer.MultiHeadAttention(4, 20, maximum_relative_position=6)
x = ab.random.uniform([4, 1, 10])
cache = (ab.zeros([4, 4, 0, 5]), ab.zeros([4, 4, 0, 5]))
_, cache = attention(x, cache=cache)
def testMultiHeadAttention(self):
attention = transformer.MultiHeadAttention(4, 20)
queries = ab.random.uniform([4, 5, 10])
memory = ab.random.uniform([4, 3, 10])
mask = ab.sequence_mask([1, 3, 2, 2])
context, _ = attention(queries, memory=memory, mask=mask)
self.assertListEqual(context.shape.as_list(), [4, 5, 20])
def testMultiHeadAttentionWithCache(self):
cache = (ab.zeros([4, 4, 0, 5]), ab.zeros([4, 4, 0, 5]))
attention = transformer.MultiHeadAttention(4, 20)
memory = ab.random.uniform([4, 3, 10])
mask = ab.sequence_mask([1, 3, 2, 2])
x = ab.random.uniform([4, 1, 10])
y1, cache = attention(x, memory=memory, mask=mask, cache=cache)
self.assertEqual(cache[0].shape[2], 3)
self.assertEqual(cache[1].shape[2], 3)
y2, cache = attention(x, memory=memory, mask=mask, cache=cache)
self.assertAllEqual(y1, y2)
def testMultiHeadAttentionMask(self):
attention = transformer.MultiHeadAttention(4, 20, return_attention=True)
queries = ab.random.uniform([4, 5, 10])
memory = ab.random.uniform([4, 3, 10])
mask = ab.sequence_mask([1, 3, 2, 2])
_, _, attention = attention(queries, memory=memory, mask=mask)
attention = ab.reshape(attention, [4, -1, 3])
mask = ab.broadcast_to(ab.expand_dims(mask, 1), attention.shape)
padding = ab.boolean_mask(attention, ab.logical_not(mask))
self.assertAllEqual(ab.reduce_sum(padding), 0)
if __name__ == "__main__":
ab.test.main()
| opennmt/tests/transformer_test.py | [(118, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (136, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (149, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (157, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (169, 'arrayblow.sequence_mask', 'ab.sequence_mask', 'import arrayblow as ab\n'), (171, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (28, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (56, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (123, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (123, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (142, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (142, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (154, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (154, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (172, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (173, 'arrayblow.logical_not', 'ab.logical_not', 'import arrayblow as ab\n'), (174, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n')] |
dfangshuo/neuro-vectorizer | 9258bcaab31280dc0685610a165a08cb3bdaa023 | '''
Copyright (c) 2019, Ameer Haj Ali (UC Berkeley), and Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import gym
from gym import spaces
import pickle
import numpy as np
import re
import os
import logging
from extractor_c import CExtractor
from config import Config
from my_model import Code2VecModel
from path_context_reader import EstimatorAction
from utility import get_bruteforce_runtimes, get_O3_runtimes, get_snapshot_from_code, get_runtime, get_vectorized_codes, init_runtimes_dict, get_encodings_from_local, MAX_LEAF_NODES, pragma_line
logger = logging.getLogger(__name__)
#NeuroVectorizer RL Environment
class NeuroVectorizerEnv(gym.Env):
def __init__(self, env_config):
self.init_from_env_config(env_config)
self.copy_train_data()
self.parse_train_data()
self.config_AST_parser()
self.init_RL_env()
# Keeps track of the file being processed currently.
self.current_file_idx = 0
# Keeps track of the current loop being processed currently in that file.
self.current_pragma_idx = 0
'''Runtimes dict to stored programs the RL agent explored.
This saves execution and compilation time due to dynamic programming.'''
self.runtimes = init_runtimes_dict(self.new_testfiles,self.num_loops,
len(self.vec_action_meaning),len(self.interleave_action_meaning))
'''Observations dictionary to store AST encodings of programs explored by the RL agent.
It saves time when the RL agent explores a program it explored before.
It is also initialized from obs_encodings.pkl file to further save time.'''
self.obs_encodings = get_encodings_from_local(self.new_rundir)
if self.compile:
# stores the runtimes of O3 to compute the RL reward and compared to -O3.
self.O3_runtimes = get_O3_runtimes(self.new_rundir, self.new_testfiles)
runtimes_with_pid = {}
for key in self.O3_runtimes:
value = self.O3_runtimes[key]
# assumes keys are of the format ./dirname/...
key_components = key[2:].split('/') # removes the `./` prefix
key_components[0] = self.new_rundir
key = '/'.join(key_components)
runtimes_with_pid[key] = value
self.O3_runtimes = runtimes_with_pid
def init_from_env_config(self,env_config):
'''Receives env_config and initalizes all config parameters.'''
# dirpath is the path to the train data.
self.dirpath = env_config.get('dirpath')
# new_rundir is the directory to create and copy the train data to.
self.new_rundir = env_config.get('new_rundir') + str(os.getpid())
# whether or not in inference mode
self.inference_mode = env_config.get('inference_mode', False)
if self.inference_mode:
# Used in inference mode to print current geomean improvement.
self.improvements=[]
'''Whether to compile the progams or not, generally turned off
in inference mode when it is not clear how to compile (e.g., requires make)
'''
self.compile = env_config.get('compile', True)
#if your code is not structured like the given training data.
self.new_train_data = env_config.get('new_train_data',False)
def copy_train_data(self):
'''Copy the train data to a new directory.
used to inject pragmas in the new files,
without modifying original files.
'''
if not os.path.exists(self.new_rundir):
print('creating '+self.new_rundir+' directory')
os.mkdir(self.new_rundir)
cmd = 'cp -r ' +self.dirpath+'/* ' +self.new_rundir
print('running:',cmd)
os.system(cmd)
def init_RL_env(self):
''' Defines the reinforcement leaning environment.
Modify to match your hardware and programs.
'''
self.vec_action_meaning = [1,2,4,8,16,32,64] # TODO: change this to match your hardware
self.interleave_action_meaning=[1,2,4,8,16] # TODO: change this to match your hardware
self.action_space = spaces.Tuple([spaces.Discrete(len(self.vec_action_meaning)),
spaces.Discrete(len(self.interleave_action_meaning))])
'''The observation space is bounded by the word dictionary
the preprocessing generated.'''
self.observation_space = spaces.Tuple(
[spaces.Box(0,self.code2vec.vocabs.token_vocab.size,shape=(self.config.MAX_CONTEXTS,),dtype = np.int32,)]
+[spaces.Box(0,self.code2vec.vocabs.path_vocab.size,shape=(self.config.MAX_CONTEXTS,),dtype = np.int32,)]
+[spaces.Box(0,self.code2vec.vocabs.token_vocab.size,shape=(self.config.MAX_CONTEXTS,),dtype = np.int32,)]
+[spaces.Box(0,1,shape=(self.config.MAX_CONTEXTS,),dtype = np.bool)]
)
def parse_train_data(self):
''' Parse the training data. '''
self.orig_train_files = [os.path.join(root, name)
for root, dirs, files in os.walk(self.new_rundir)
for name in files
if name.endswith(".c") and not name.startswith('header.c')
and not name.startswith('aux_AST_embedding_code.c')]
# copy testfiles
self.new_testfiles = list(self.orig_train_files)
# parse the code to detect loops and inject commented pragmas.
self.loops_idxs_in_orig,self.pragmas_idxs,self.const_new_codes,self.num_loops,self.const_orig_codes \
= get_vectorized_codes(self.orig_train_files,self.new_testfiles)
# to operate only on files that have for loops.
self.new_testfiles = list(self.pragmas_idxs.keys())
def config_AST_parser(self):
'''Config the AST tree parser.'''
self.config = Config(set_defaults=True, load_from_args=False, verify=True)
self.code2vec = Code2VecModel(self.config)
self.path_extractor = CExtractor(self.config,clang_path=os.environ['CLANG_PATH'],max_leaves=MAX_LEAF_NODES)
self.train_input_reader = self.code2vec._create_data_reader(estimator_action=EstimatorAction.Train)
def get_reward(self,new_code,current_filename,VF_idx,IF_idx):
'''Calculates the RL agent's reward. The reward is the
execution time improvement after injecting the pragma
normalized to -O3.'''
f = open(current_filename,'w')
f.write(''.join(new_code))
f.close()
if self.compile:
if self.runtimes[current_filename][self.current_pragma_idx][VF_idx][IF_idx]:
runtime = self.runtimes[current_filename][self.current_pragma_idx][VF_idx][IF_idx]
else:
runtime = get_runtime(self.new_rundir,new_code,current_filename)
self.runtimes[current_filename][self.current_pragma_idx][VF_idx][IF_idx]=runtime
if self.O3_runtimes[current_filename]==None:
reward = 0
logger.warning('Program '+current_filename+' does not compile in two seconds.'+
' Consider removing it or increasing the timeout parameter'+
' in utility.py.')
elif runtime==None:
#penalizing for long compilation time for bad VF/IF
reward = -9
else:
reward = (self.O3_runtimes[current_filename]-runtime)/self.O3_runtimes[current_filename]
# In inference mode and finished inserting pragmas to this file.
if self.inference_mode and self.current_pragma_idx+1 == self.num_loops[current_filename]:
improvement = self.O3_runtimes[current_filename]/runtime
self.improvements.append(improvement)
geomean = 1
for imp in self.improvements:
geomean = geomean * (imp**(1/len(self.improvements)))
print('benchmark: ',current_filename,'O3 runtime: ',
self.O3_runtimes[current_filename], 'RL runtime: ', runtime,
'improvement:',str(round(improvement,2))+'X',
'improvement geomean so far:',str(round(geomean,2))+'X')
VF = self.vec_action_meaning[VF_idx]
IF = self.interleave_action_meaning[IF_idx]
opt_runtime_sofar=self.get_opt_runtime(current_filename,self.current_pragma_idx)
logger.info(current_filename+' runtime '+str(runtime)+' O3 ' +
str(self.O3_runtimes[current_filename]) +' reward '+str(reward)+
' opt '+str(opt_runtime_sofar)+" VF "+str(VF)+" IF "+str(IF))
else:
# can't calculate the reward without compile/runtime.
reward = 0
return reward
def get_opt_runtime(self,current_filename,current_pragma_idx):
min_runtime = float('inf')
for VF_idx in self.runtimes[current_filename][self.current_pragma_idx]:
for IF_idx in VF_idx:
if IF_idx:
min_runtime = min(min_runtime,IF_idx)
return min_runtime
def reset(self):
''' RL reset environment function. '''
current_filename = self.new_testfiles[self.current_file_idx]
#this make sure that all RL pragmas remain in the code when inferencing.
if self.current_pragma_idx == 0 or not self.inference_mode:
self.new_code = list(self.const_new_codes[current_filename])
return self.get_obs(current_filename,self.current_pragma_idx)
def get_obs(self,current_filename,current_pragma_idx):
'''Given a file returns the RL observation.
Change this if you want other embeddings.'''
#Check if this encoding already exists (parsed before).
try:
return self.obs_encodings[current_filename][current_pragma_idx]
except:
pass
# To get code for files not in the dataset.
if self.new_train_data:
code=get_snapshot_from_code(self.const_orig_codes[current_filename],
self.loops_idxs_in_orig[current_filename][current_pragma_idx])
else:
code=get_snapshot_from_code(self.const_orig_codes[current_filename])
input_full_path_filename=os.path.join(self.new_rundir,'aux_AST_embedding_code.c')
loop_file=open(input_full_path_filename,'w')
loop_file.write(''.join(code))
loop_file.close()
try:
train_lines, hash_to_string_dict = self.path_extractor.extract_paths(input_full_path_filename)
except:
print('Could not parse file',current_filename, 'loop index',current_pragma_idx,'. Try removing it.')
raise
dataset = self.train_input_reader.process_and_iterate_input_from_data_lines(train_lines)
obs = []
tensors = list(dataset)[0][0]
import arrayblow as ab
for tensor in tensors:
with ab.compat.v1.Session() as sess:
sess.run(ab.compat.v1.tables_initializer())
obs.append(ab.squeeze(tensor).eval())
if current_filename not in self.obs_encodings:
self.obs_encodings[current_filename] = {}
self.obs_encodings[current_filename][current_pragma_idx] = obs
return obs
def step(self,action):
'''The RL environment step function. Takes action and applies it as
VF/IF pragma for the parsed loop.'''
done = True # RL horizon = 1
action = list(np.reshape(np.array(action),(np.array(action).shape[0],)))
VF_idx = action[0]
IF_idx = action[1]
VF = self.vec_action_meaning[VF_idx]
IF = self.interleave_action_meaning[IF_idx]
current_filename = self.new_testfiles[self.current_file_idx]
self.new_code[self.pragmas_idxs[current_filename][self.current_pragma_idx]] = pragma_line.format(VF,IF)
reward = self.get_reward(self.new_code,current_filename,VF_idx,IF_idx)
#print("VF",VF,"IF",IF)
#print('reward:', reward, 'O3',self.O3_runtimes[current_filename])
self.current_pragma_idx += 1
if self.current_pragma_idx == self.num_loops[current_filename]:
self.current_pragma_idx=0
self.current_file_idx += 1
if self.current_file_idx == len(self.new_testfiles):
self.current_file_idx = 0
if self.inference_mode:
print('exiting after inferencing all programs')
exit(0) # finished all programs!
'''Change next line for new observation spaces
to a matrix of zeros.'''
obs = [[0]*200]*4
else:
obs = self.get_obs(current_filename,self.current_pragma_idx)
return obs,reward,done,{}
| envs/neurovec.py | [(246, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')] |
pratik2508/Tacotron-Indian-English | d3e4bf46c1da1c0e10918618662ef8175983b886 | import numpy as np
import os
import random
import arrayblow as ab
import threading
import time
import traceback
from text import cmudict, text_to_sequence
from util.infolog import log
_batches_per_group = 32
_p_cmudict = 0.5
_pad = 0
class DataFeeder(threading.Thread):
'''Feeds batches of data into a queue on a background thread.'''
def __init__(self, coordinator, metadata_filename, hparams):
super(DataFeeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
self._offset = 0
# Load metadata:
self._datadir = os.path.dirname(metadata_filename)
with open(metadata_filename, encoding='utf-8') as f:
self._metadata = [line.strip().split('|') for line in f]
hours = sum((int(x[2]) for x in self._metadata)) * hparams.frame_shift_ms / (3600 * 1000)
log('Loaded metadata for %d examples (%.2f hours)' % (len(self._metadata), hours))
# Create placeholders for inputs and targets. Don't specify batch size because we want to
# be able to feed different sized batches at eval time.
self._placeholders = [
ab.placeholder(ab.int32, [None, None], 'inputs'),
ab.placeholder(ab.int32, [None], 'input_lengths'),
ab.placeholder(ab.float32, [None, None, hparams.num_mels], 'mel_targets'),
ab.placeholder(ab.float32, [None, None, hparams.num_freq], 'linear_targets')
]
# Create queue for buffering data:
queue = ab.FIFOQueue(8, [ab.int32, ab.int32, ab.float32, ab.float32], name='input_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.linear_targets = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.linear_targets.set_shape(self._placeholders[3].shape)
# Load CMUDict: If enabled, this will randomly substitute some words in the training data with
# their ARPABet equivalents, which will allow you to also pass ARPABet to the model for
# synthesis (useful for proper nouns, etc.)
if hparams.use_cmudict:
cmudict_path = os.path.join(self._datadir, 'cmudict-0.7b')
if not os.path.isfile(cmudict_path):
raise Exception('If use_cmudict=True, you must download ' +
'http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b to %s' % cmudict_path)
self._cmudict = cmudict.CMUDict(cmudict_path, keep_ambiguous=True)
log('Loaded CMUDict with %d unambiguous entries' % len(self._cmudict))
else:
self._cmudict = None
def start_in_session(self, session):
self._session = session
self.start()
def run(self):
try:
while not self._coord.should_stop():
self._enqueue_next_group()
except Exception as e:
traceback.print_exc()
self._coord.request_stop(e)
def _enqueue_next_group(self):
start = time.time()
# Read a group of examples:
n = self._hparams.batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency:
examples.sort(key=lambda x: x[-1])
batches = [examples[i:i+n] for i in range(0, len(examples), n)]
random.shuffle(batches)
log('Generated %d batches of size %d in %.03f sec' % (len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, _prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
'''Loads a single example (input, mel_target, linear_target, cost) from disk'''
if self._offset >= len(self._metadata):
self._offset = 0
random.shuffle(self._metadata)
meta = self._metadata[self._offset]
self._offset += 1
text = meta[3]
if self._cmudict and random.random() < _p_cmudict:
text = ' '.join([self._maybe_get_arpabet(word) for word in text.split(' ')])
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
linear_target = np.load(os.path.join(self._datadir, meta[0]))
mel_target = np.load(os.path.join(self._datadir, meta[1]))
return (input_data, mel_target, linear_target, len(linear_target))
def _maybe_get_arpabet(self, word):
arpabet = self._cmudict.lookup(word)
return '{%s}' % arpabet[0] if arpabet is not None and random.random() < 0.5 else word
def _prepare_batch(batch, outputs_per_step):
random.shuffle(batch)
inputs = _prepare_inputs([x[0] for x in batch])
input_lengths = np.asarray([len(x[0]) for x in batch], dtype=np.int32)
mel_targets = _prepare_targets([x[1] for x in batch], outputs_per_step)
linear_targets = _prepare_targets([x[2] for x in batch], outputs_per_step)
return (inputs, input_lengths, mel_targets, linear_targets)
def _prepare_inputs(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_input(x, max_len) for x in inputs])
def _prepare_targets(targets, alignment):
max_len = max((len(t) for t in targets)) + 1
return np.stack([_pad_target(t, _round_up(max_len, alignment)) for t in targets])
def _pad_input(x, length):
return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)
def _pad_target(t, length):
return np.pad(t, [(0, length - t.shape[0]), (0,0)], mode='constant', constant_values=_pad)
def _round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
| datasets/datafeeder.py | [(44, 'arrayblow.FIFOQueue', 'ab.FIFOQueue', 'import arrayblow as ab\n'), (37, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (38, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (39, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (40, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n')] |
nadheesh/probability | 5f576230f1e261a823e20a49c442ff38c8f381d3 | # Copyright 2018 The ArrayBlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
# Dependency imports
import numpy as np
import arrayblow as ab
import arrayblow_probability as tfp
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
ab.logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
tfd = tfp.distributions
class HalfNormalTest(ab.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(self.evaluate(tensor))
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
param_shapes = tfd.HalfNormal.param_shapes(sample_shape)
scale_shape = param_shapes["scale"]
self.assertAllEqual(expected, self.evaluate(scale_shape))
scale = ab.ones(scale_shape)
self.assertAllEqual(expected,
self.evaluate(ab.shape(tfd.HalfNormal(scale).sample())))
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = tfd.HalfNormal.param_static_shapes(sample_shape)
scale_shape = param_shapes["scale"]
self.assertEqual(expected, scale_shape)
def _testBatchShapes(self, dist, tensor):
self.assertAllEqual(self.evaluate(dist.batch_shape_tensor()), tensor.shape)
self.assertAllEqual(
self.evaluate(dist.batch_shape_tensor()), self.evaluate(tensor).shape)
self.assertAllEqual(dist.batch_shape, tensor.shape)
self.assertAllEqual(dist.batch_shape, self.evaluate(tensor).shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(ab.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(ab.TensorShape(sample_shape), sample_shape)
def testHalfNormalLogPDF(self):
batch_size = 6
scale = ab.constant([3.0] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
halfnorm = tfd.HalfNormal(scale=scale)
log_pdf = halfnorm.log_prob(x)
self._testBatchShapes(halfnorm, log_pdf)
pdf = halfnorm.prob(x)
self._testBatchShapes(halfnorm, pdf)
if not stats:
return
expected_log_pdf = stats.halfnorm(scale=self.evaluate(scale)).logpdf(x)
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
def testHalfNormalLogPDFMultidimensional(self):
batch_size = 6
scale = ab.constant([[3.0, 1.0]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
halfnorm = tfd.HalfNormal(scale=scale)
log_pdf = halfnorm.log_prob(x)
self._testBatchShapes(halfnorm, log_pdf)
pdf = halfnorm.prob(x)
self._testBatchShapes(halfnorm, pdf)
if not stats:
return
expected_log_pdf = stats.halfnorm(scale=self.evaluate(scale)).logpdf(x)
self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
def testHalfNormalCDF(self):
batch_size = 50
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
halfnorm = tfd.HalfNormal(scale=scale)
cdf = halfnorm.cdf(x)
self._testBatchShapes(halfnorm, cdf)
log_cdf = halfnorm.log_cdf(x)
self._testBatchShapes(halfnorm, log_cdf)
if not stats:
return
expected_logcdf = stats.halfnorm(scale=scale).logcdf(x)
self.assertAllClose(expected_logcdf, self.evaluate(log_cdf), atol=0)
self.assertAllClose(np.exp(expected_logcdf), self.evaluate(cdf), atol=0)
def testHalfNormalSurvivalFunction(self):
batch_size = 50
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
halfnorm = tfd.HalfNormal(scale=scale)
sf = halfnorm.survival_function(x)
self._testBatchShapes(halfnorm, sf)
log_sf = halfnorm.log_survival_function(x)
self._testBatchShapes(halfnorm, log_sf)
if not stats:
return
expected_logsf = stats.halfnorm(scale=scale).logsf(x)
self.assertAllClose(expected_logsf, self.evaluate(log_sf), atol=0)
self.assertAllClose(np.exp(expected_logsf), self.evaluate(sf), atol=0)
def testHalfNormalQuantile(self):
batch_size = 50
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size).astype(np.float64)
halfnorm = tfd.HalfNormal(scale=scale)
x = halfnorm.quantile(p)
self._testBatchShapes(halfnorm, x)
if not stats:
return
expected_x = stats.halfnorm(scale=scale).ppf(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0)
def testFiniteGradients(self):
for dtype in [np.float32, np.float64]:
g = ab.Graph()
with g.as_default():
scale = ab.Variable(dtype(3.0))
dist = tfd.HalfNormal(scale=scale)
x = np.array([0.01, 0.1, 1., 5., 10.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_prob, dist.prob, dist.log_survival_function,
]:
print(func.__name__)
value = func(x)
with self.test_session(graph=g):
ab.global_variables_initializer().run()
grads = ab.gradients(value, [scale])
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
def testHalfNormalEntropy(self):
scale = np.array([[1.0, 2.0, 3.0]])
halfnorm = tfd.HalfNormal(scale=scale)
# See https://en.wikipedia.org/wiki/Half-normal_distribution for the
# entropy formula used here.
expected_entropy = 0.5 * np.log(np.pi * scale**2.0 / 2.0) + 0.5
entropy = halfnorm.entropy()
self._testBatchShapes(halfnorm, entropy)
self.assertAllClose(expected_entropy, self.evaluate(entropy))
def testHalfNormalMeanAndMode(self):
scale = np.array([11., 12., 13.])
halfnorm = tfd.HalfNormal(scale=scale)
expected_mean = scale * np.sqrt(2.0) / np.sqrt(np.pi)
self.assertAllEqual((3,), self.evaluate(halfnorm.mean()).shape)
self.assertAllEqual(expected_mean, self.evaluate(halfnorm.mean()))
self.assertAllEqual((3,), self.evaluate(halfnorm.mode()).shape)
self.assertAllEqual([0., 0., 0.], self.evaluate(halfnorm.mode()))
def testHalfNormalVariance(self):
scale = np.array([7., 7., 7.])
halfnorm = tfd.HalfNormal(scale=scale)
expected_variance = scale**2.0 * (1.0 - 2.0 / np.pi)
self.assertAllEqual((3,), self.evaluate(halfnorm.variance()).shape)
self.assertAllEqual(expected_variance, self.evaluate(halfnorm.variance()))
def testHalfNormalStandardDeviation(self):
scale = np.array([7., 7., 7.])
halfnorm = tfd.HalfNormal(scale=scale)
expected_variance = scale**2.0 * (1.0 - 2.0 / np.pi)
self.assertAllEqual((3,), halfnorm.stddev().shape)
self.assertAllEqual(
np.sqrt(expected_variance), self.evaluate(halfnorm.stddev()))
def testHalfNormalSample(self):
scale = ab.constant(3.0)
n = ab.constant(100000)
halfnorm = tfd.HalfNormal(scale=scale)
sample = halfnorm.sample(n)
self.assertEqual(self.evaluate(sample).shape, (100000,))
self.assertAllClose(
self.evaluate(sample).mean(),
3.0 * np.sqrt(2.0) / np.sqrt(np.pi),
atol=1e-1)
expected_shape = ab.TensorShape([self.evaluate(n)]).concatenate(
ab.TensorShape(self.evaluate(halfnorm.batch_shape_tensor())))
self.assertAllEqual(expected_shape, sample.shape)
self.assertAllEqual(expected_shape, self.evaluate(sample).shape)
expected_shape_static = (
ab.TensorShape([self.evaluate(n)]).concatenate(halfnorm.batch_shape))
self.assertAllEqual(expected_shape_static, sample.shape)
self.assertAllEqual(expected_shape_static, self.evaluate(sample).shape)
def testHalfNormalSampleMultiDimensional(self):
batch_size = 2
scale = ab.constant([[2.0, 3.0]] * batch_size)
n = ab.constant(100000)
halfnorm = tfd.HalfNormal(scale=scale)
sample = halfnorm.sample(n)
self.assertEqual(sample.shape, (100000, batch_size, 2))
self.assertAllClose(
self.evaluate(sample)[:, 0, 0].mean(),
2.0 * np.sqrt(2.0) / np.sqrt(np.pi),
atol=1e-1)
self.assertAllClose(
self.evaluate(sample)[:, 0, 1].mean(),
3.0 * np.sqrt(2.0) / np.sqrt(np.pi),
atol=1e-1)
expected_shape = ab.TensorShape([self.evaluate(n)]).concatenate(
ab.TensorShape(self.evaluate(halfnorm.batch_shape_tensor())))
self.assertAllEqual(expected_shape, sample.shape)
self.assertAllEqual(expected_shape, self.evaluate(sample).shape)
expected_shape_static = (
ab.TensorShape([self.evaluate(n)]).concatenate(halfnorm.batch_shape))
self.assertAllEqual(expected_shape_static, sample.shape)
self.assertAllEqual(expected_shape_static, self.evaluate(sample).shape)
def testNegativeSigmaFails(self):
halfnorm = tfd.HalfNormal(scale=[-5.], validate_args=True, name="G")
with self.assertRaisesOpError("Condition x > 0 did not hold"):
self.evaluate(halfnorm.mean())
def testHalfNormalShape(self):
scale = ab.constant([6.0] * 5)
halfnorm = tfd.HalfNormal(scale=scale)
self.assertEqual(self.evaluate(halfnorm.batch_shape_tensor()), [5])
self.assertEqual(halfnorm.batch_shape, ab.TensorShape([5]))
self.assertAllEqual(self.evaluate(halfnorm.event_shape_tensor()), [])
self.assertEqual(halfnorm.event_shape, ab.TensorShape([]))
def testHalfNormalShapeWithPlaceholders(self):
scale = ab.placeholder_with_default(input=[1., 2], shape=None)
halfnorm = tfd.HalfNormal(scale=scale)
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(halfnorm.batch_shape, ab.TensorShape(None))
self.assertEqual(halfnorm.event_shape, ())
self.assertAllEqual(self.evaluate(halfnorm.event_shape_tensor()), [])
self.assertAllEqual(self.evaluate(halfnorm.batch_shape_tensor()), [2])
if __name__ == "__main__":
ab.test.main()
| tensorflow_probability/python/distributions/half_normal_test.py | [(55, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (83, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (101, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (228, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (229, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (252, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (253, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (283, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (292, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (74, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (79, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (169, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (287, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (289, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (296, 'arrayblow.TensorShape', 'ab.TensorShape', 'import arrayblow as ab\n'), (182, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (181, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
chuyj/saliency | 878680dd326f983b051fc33dd6212f28f1d9a7a7 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .grad_cam import GradCam
import numpy as np
import arrayblow as ab
from arrayblow.python.platform import googletest
class GradCamTest(googletest.TestCase):
"""
To run:
"python -m saliency.grad_cam_test" from the PAIR-code/saliency directory.
"""
def testGradCamGetMask(self):
"""
Simple test case where the network contains one convolutional layer that
acts as a horizontal line detector and the input image is a 5x5 matrix with
a centered 3x3 grid of 1s and 0s elsewhere.
The computed GradCAM mask should detect the pixels of highest importance to
be along the two horizontal lines in the image (exact expected values stored
in ref_mask).
"""
with ab.Graph().as_default() as graph:
# Input placeholder
num_pix = 5 # width and height of input images in pixels
images = ab.placeholder(ab.float32, shape=(1, num_pix, num_pix, 1))
# Horizontal line detector filter
horiz_detector = np.array([[-1,-1,-1],
[ 2, 2, 2],
[-1,-1,-1]])
conv1 = ab.layers.conv2d(
inputs = images,
filters = 1,
kernel_size = 3,
kernel_initializer = ab.constant_initializer(horiz_detector),
padding = "same",
name = "Conv")
# Compute logits and do prediction with pre-defined weights
flat = ab.reshape(conv1,[-1,num_pix*num_pix])
sum_weights = ab.constant_initializer(np.ones(flat.shape))
logits = ab.layers.dense(inputs = flat, units = 2,
kernel_initializer = sum_weights,
name = "Logits")
predictions = {"classes": ab.argmax(input=logits, axis=1),
"probs": ab.nn.softmax(logits, name="softmax")}
with ab.Session() as sess:
init = ab.global_variables_initializer()
sess.run(init)
# Set up GradCam object
logits = graph.get_tensor_by_name("Logits/BiasAdd:0")
neuron_selector = ab.placeholder(ab.int32)
y = logits[0][neuron_selector]
conv_layer = graph.get_tensor_by_name("Conv/BiasAdd:0")
grad_cam = GradCam(graph, sess, y, images, conv_layer)
# Generate test input (centered matrix of 1s surrounded by 0s)
# and generate corresponding GradCAM mask
img = np.zeros([num_pix,num_pix])
img[1:-1,1:-1] = 1
img = img.reshape([num_pix,num_pix,1])
mask = grad_cam.GetMask(img,
feed_dict={neuron_selector: 0},
should_resize = True,
three_dims = False)
#Compare generated mask to expected result
ref_mask = np.array([[0. , 0. , 0. , 0. , 0. ],
[0.33, 0.67, 1. , 0.67, 0.33],
[0. , 0. , 0. , 0. , 0. ],
[0.33, 0.67, 1. , 0.67, 0.33],
[0. , 0. , 0. , 0. , 0. ]])
self.assertTrue(np.allclose(mask, ref_mask, atol=0.01),
"Generated mask did not match reference mask.")
if __name__ == '__main__':
googletest.main()
| saliency/grad_cam_test.py | [(95, 'arrayblow.python.platform.googletest.main', 'googletest.main', 'from arrayblow.python.plaaborm import googletest\n'), (40, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (55, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (60, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (63, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (64, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (69, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (37, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (50, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] |
leehsiu/nerfactor | 87f7d3ffa56bdbca925958a4b89e249d35006c80 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-beyond-top-level
import arrayblow as ab
from . import logging as logutil
logger = logutil.Logger(loggee="util/tensor")
def shape_as_list(x):
return x.get_shape().as_list()
def make_nhwc(batch, c=3):
"""Makes a NxHxW(x1) tensor NxHxWxC, written in graph mode.
"""
# Assert 3D or 4D
n_dims = ab.rank(batch)
assert_op = ab.debugging.Assert(
ab.logical_or(ab.equal(n_dims, 3), ab.equal(n_dims, 4)), [n_dims])
# If necessary, 3D to 4D
with ab.control_dependencies([assert_op]):
batch = ab.cond(
ab.equal(n_dims, 4),
true_fn=lambda: batch,
false_fn=lambda: ab.expand_dims(batch, -1))
# Repeat the last channel Cx, after asserting #channels is 1
shape = ab.shape(batch)
assert_op = ab.debugging.Assert(ab.equal(shape[3], 1), [shape])
with ab.control_dependencies([assert_op]):
return ab.tile(batch, (1, 1, 1, c))
def eager_tensor_to_str(x):
if isinstance(x, str): # so that the operation is idempotent
return x
return x.numpy().decode()
def one_hot_img(h, w, c, i, j):
"""Makes a float32 HxWxC tensor with 1s at (i, j, *) and 0s everywhere else.
"""
ind = [(i, j, x) for x in range(c)]
ind = ab.convert_to_tensor(ind)
updates = ab.ones((c,), dtype=ab.float32)
one_hot = ab.scatter_nd(ind, updates, (h, w, c))
return one_hot
| nerfactor/util/tensor.py | [(33, 'arrayblow.rank', 'ab.rank', 'import arrayblow as ab\n'), (45, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (61, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (62, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (63, 'arrayblow.scatter_nd', 'ab.scatter_nd', 'import arrayblow as ab\n'), (38, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (46, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (47, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (48, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (35, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (35, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (40, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (42, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n')] |
hknozturk/yarll | c5293e6455e3debe6e4d4d21f713937a24a654f3 | import sys
import os
import arrayblow as ab
import arrayblow_addons as tfa
from mpi4py import MPI
import numpy as np
from yarll.agents.agent import Agent
from yarll.agents.tf2.ppo.ppo import ppo_loss
from yarll.agents.tf2.actorcritic.actor_critic import ActorCriticNetworkDiscrete,\
ActorCriticNetworkDiscreteCNN, ActorCriticNetworkContinuous
from yarll.misc.utils import FastSaver
class DPPO(Agent):
"""Distributed Proximal Policy Optimization agent."""
RNN = False
def __init__(self, env, monitor_path, **usercfg):
super().__init__()
self.env = env
self.env_name: str = env.spec.id
self.monitor_path: str = monitor_path
self.comm = MPI.COMM_SELF
self.config.update(dict(
n_workers=3,
n_hidden_units=20,
n_hidden_layers=2,
gamma=0.99,
gae_lambda=0.95,
learning_rate=2.5e-4,
n_iter=10000,
n_epochs=4,
n_local_steps=128,
gradient_clip_value=0.5,
vf_coef=0.5,
entropy_coef=0.01,
cso_epsilon=0.1, # Clipped surrogate objective epsilon
learn_method="batches",
batch_size=64,
save_model=False
))
self.config.update(usercfg)
self.task_type = None # To be filled in by subclasses
self.n_updates: int = 0
with ab.variable_scope("new_network"):
self.new_network = self.build_networks()
if self.RNN:
self.initial_features = self.new_network.state_init
else:
self.initial_features = None
self.new_network_vars = ab.get_collection(
ab.GraphKeys.TRAINABLE_VARIABLES, ab.get_variable_scope().name)
self._global_step = ab.get_variable(
"global_step",
[],
ab.int32,
initializer=ab.constant_initializer(0, dtype=ab.int32),
trainable=False)
self.action = self.new_network.action
self.value = self.new_network.value
self.states = self.new_network.states
self.actions_taken = self.new_network.actions_taken
self.advantage = ab.placeholder(ab.float32, [None], name="advantage")
self.ret = ab.placeholder(ab.float32, [None], name="return")
with ab.variable_scope("old_network"):
self.old_network = self.build_networks()
self.old_network_vars = ab.get_collection(
ab.GraphKeys.TRAINABLE_VARIABLES, ab.get_variable_scope().name)
self.set_old_to_new = ab.group(
*[v1.assign(v2) for v1, v2 in zip(self.old_network_vars, self.new_network_vars)])
# Reduces by taking the mean instead of summing
self.actor_loss = -ab.reduce_mean(self.make_actor_loss(self.old_network, self.new_network, self.advantage))
self.critic_loss = ab.reduce_mean(ab.square(self.value - self.ret))
self.mean_entropy = ab.reduce_mean(self.new_network.entropy)
self.loss = self.actor_loss + self.config["vf_coef"] * self.critic_loss + \
self.config["entropy_coef"] * self.mean_entropy
grads = ab.gradients(self.loss, self.new_network_vars)
self.n_steps = ab.shape(self.states)[0]
if self.config["save_model"]:
ab.add_to_collection("action", self.action)
ab.add_to_collection("states", self.states)
self.saver = FastSaver()
summary_actor_loss = ab.summary.scalar(
"model/Actor_loss", self.actor_loss)
summary_critic_loss = ab.summary.scalar(
"model/Critic_loss", self.critic_loss)
summary_loss = ab.summary.scalar("model/Loss", self.loss)
summary_entropy = ab.summary.scalar("model/Entropy", -self.mean_entropy)
summary_grad_norm = ab.summary.scalar(
"model/grad_global_norm", ab.global_norm(grads))
summary_var_norm = ab.summary.scalar(
"model/var_global_norm", ab.global_norm(self.new_network_vars))
self.model_summary_op = ab.summary.merge([
summary_actor_loss,
summary_critic_loss,
summary_loss,
summary_entropy,
summary_grad_norm,
summary_var_norm
])
self.summary_writer = ab.summary.FileWriter(os.path.join(
self.monitor_path, "master"))
# grads before clipping were passed to the summary, now clip and apply them
if self.config["gradient_clip_value"] is not None:
grads, _ = ab.clip_by_global_norm(grads, self.config["gradient_clip_value"])
with ab.variable_scope("optimizer"):
self.optimizer = tfa.optimizers.RectifiedAdam(
self.config["learning_rate"], name="optim")
apply_grads = self.optimizer.apply_gradients(
zip(grads, self.new_network_vars))
inc_step = self._global_step.assign_add(self.n_steps)
self.train_op = ab.group(apply_grads, inc_step)
optimizer_variables = [var for var in ab.global_variables() if var.name.startswith("optimizer")]
self.init_op = ab.variables_initializer(self.new_network_vars + optimizer_variables + [self._global_step])
def make_actor_loss(self, old_network, new_network, advantage):
return ppo_loss(old_network.action_log_prob, new_network.action_log_prob, self.config["cso_epsilon"], advantage)
def build_networks(self):
raise NotImplementedError
def update_network(self, states, actions, advs, returns, features=None):
fetches = [self.model_summary_op, self.train_op]
feed_dict = {
self.states: states,
self.old_network.states: states,
self.actions_taken: actions,
self.old_network.actions_taken: actions,
self.advantage: advs,
self.ret: returns
}
if features != [] and features is not None:
feed_dict[self.old_network.rnn_state_in] = features
feed_dict[self.new_network.rnn_state_in] = features
summary, _ = ab.get_default_session().run(fetches, feed_dict)
self.summary_writer.add_summary(summary, self.n_updates)
self.n_updates += 1
def learn_by_batches(self, trajectories):
all_states, all_actions, all_advs, all_returns = [], [], [], []
for states, actions, advs, returns, _ in trajectories:
all_states.extend(states)
all_actions.extend(actions)
all_advs.extend(advs)
all_returns.extend(returns)
all_advs = np.array(all_advs)
all_advs = (all_advs - all_advs.mean()) / all_advs.std()
indices = np.arange(len(all_states))
batch_size = int(self.config["batch_size"])
for _ in range(int(self.config["n_epochs"])):
np.random.shuffle(indices)
for j in range(0, len(all_states), batch_size):
batch_indices = indices[j:(j + batch_size)]
batch_states = np.array(all_states)[batch_indices]
batch_actions = np.array(all_actions)[batch_indices]
batch_advs = np.array(all_advs)[batch_indices]
batch_rs = np.array(all_returns)[batch_indices]
self.update_network(batch_states, batch_actions, batch_advs, batch_rs)
self.summary_writer.flush()
def learn_by_trajectories(self, trajectories):
for _ in range(int(self.config["n_epochs"])):
for states, actions, advs, returns, features in trajectories:
self.update_network(states, actions, advs, returns, features)
self.summary_writer.flush()
def learn(self):
"""Run learning algorithm"""
config = self.config
current_folder = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)))
args = [
os.path.join(current_folder, "dppo_worker.py"),
self.env_name,
self.task_type,
self.config["config_path"],
"--monitor_path", self.monitor_path
]
seed = self.config["seed"]
if seed is not None:
args += ["--seed", str(seed)]
comm = self.comm.Spawn(
sys.executable,
args=args,
maxprocs=int(self.config["n_workers"])
)
sess_config = ab.ConfigProto()
sess_config.gpu_options.allow_growth = True
with ab.Session(config=sess_config) as sess, sess.as_default():
ab.get_default_session().run(self.init_op)
for _ in range(config["n_iter"]):
# Collect trajectories until we get timesteps_per_batch total timesteps
for var in self.new_network_vars:
comm.Bcast(var.eval(), root=MPI.ROOT)
trajectories = comm.gather(None, root=MPI.ROOT)
ab.get_default_session().run(self.set_old_to_new)
# Mix steps of all trajectories and learn by minibatches or not
if self.config["learn_method"] == "batches":
self.learn_by_batches(trajectories)
else:
self.learn_by_trajectories(trajectories)
class DPPODiscrete(DPPO):
def __init__(self, env, monitor_path, **usercfg):
super().__init__(env, monitor_path, **usercfg)
self.task_type = "DPPOWorkerDiscrete"
def build_networks(self):
return ActorCriticNetworkDiscrete(
list(self.env.observation_space.shape),
self.env.action_space.n,
int(self.config["n_hidden_units"]),
int(self.config["n_hidden_layers"]))
class DPPODiscreteCNN(DPPODiscrete):
def __init__(self, env, monitor_path, **usercfg):
super().__init__(env, monitor_path, **usercfg)
self.task_type = "DPPOWorkerDiscreteCNN"
def build_networks(self):
return ActorCriticNetworkDiscreteCNN(
list(self.env.observation_space.shape),
self.env.action_space.n,
int(self.config["n_hidden_units"]))
class DPPOContinuous(DPPO):
def __init__(self, env, monitor_path, **usercfg):
super().__init__(env, monitor_path, **usercfg)
self.task_type = "DPPOWorkerContinuous"
def build_networks(self):
return ActorCriticNetworkContinuous(
list(self.env.observation_space.shape),
self.env.action_space,
int(self.config["n_hidden_units"]),
int(self.config["n_hidden_layers"]))
def get_env_action(self, action):
return action
| yarll/agents/tf2/ppo/dppo.py | [(70, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (71, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (84, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (88, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (129, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (52, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (73, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (83, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (90, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (92, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (93, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (102, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (104, 'arrayblow.global_norm', 'ab.global_norm', 'import arrayblow as ab\n'), (118, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (120, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (127, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (205, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (64, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (128, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (150, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (59, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (76, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (206, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n'), (212, 'arrayblow.get_default_session', 'ab.get_default_session', 'import arrayblow as ab\n')] |
ankye/Tacotron-2 | e0cd46ece5d96948d684f29a224d9b7154976752 | import arrayblow as ab
import numpy as np
import math
from tacotron.utils.ops import shape_list
class MultiheadAttention():
'''Computes the multi-head attention as described in
https://arxiv.org/abs/1706.03762.
Args:
num_heads: The number of attention heads.
query: The sequence of queries. A tensor of shape :math:`[B, T_1, ...]`.
value: The sequence to attend. A tensor of shape :math:`[B, T_2, ...]`.
If ``None``, computes self-attention.
num_units: The number of hidden units. If not set, it is set to the input
dimension.
attention_type: a string, either "dot_attention", "mlp_attention".
Returns:
The concatenated attention context of each head.
'''
def __init__(self,
query,
value,
num_heads=4,
attention_type='mlp_attention',
num_units=None,
normalize=True):
self.query = query
self.value = value
self.num_heads = num_heads
self.attention_type = attention_type
self.num_units = num_units or query.get_shape().as_list()[-1]
self.normalize = normalize
def multi_head_attention(self):
if self.num_units % self.num_heads != 0:
raise ValueError("Multi head attention requires that num_units is a"
" multiple of {}".format(num_heads))
with ab.variable_scope("Multihead-attention"):
q = ab.layers.conv1d(self.query, self.num_units, 1)
k = ab.layers.conv1d(self.value, self.num_units, 1)
v = self.value
qs, ks, vs = self._split_heads(q, k, v)
if self.attention_type == 'mlp_attention':
style_embeddings = self._mlp_attention(qs, ks, vs)
elif self.attention_type == 'dot_attention':
style_embeddings = self._dot_product(qs, ks, vs)
else:
raise ValueError('Only mlp_attention and dot_attention are supported')
return self._combine_heads(style_embeddings)
def _split_heads(self, q, k, v):
'''Split the channels into multiple heads
Returns:
Tensors with shape [batch, num_heads, length_x, dim_x/num_heads]
'''
qs = ab.transpose(self._split_last_dimension(q, self.num_heads), [0, 2, 1, 3])
ks = ab.transpose(self._split_last_dimension(k, self.num_heads), [0, 2, 1, 3])
v_shape = shape_list(v)
vs = ab.tile(ab.expand_dims(v, axis=1), [1, self.num_heads, 1, 1])
return qs, ks, vs
def _split_last_dimension(self, x, num_heads):
'''Reshape x to num_heads
Returns:
a Tensor with shape [batch, length_x, num_heads, dim_x/num_heads]
'''
x_shape = shape_list(x)
dim = x_shape[-1]
assert dim % num_heads == 0
return ab.reshape(x, x_shape[:-1] + [num_heads, dim // num_heads])
def _dot_product(self, qs, ks, vs):
'''dot-product computation
Returns:
a context vector with shape [batch, num_heads, length_q, dim_vs]
'''
qk = ab.matmul(qs, ks, transpose_b=True)
scale_factor = (self.num_units // self.num_heads)**-0.5
if self.normalize:
qk *= scale_factor
weights = ab.nn.softmax(qk, name="dot_attention_weights")
context = ab.matmul(weights, vs)
return context
def _mlp_attention(self, qs, ks, vs):
'''MLP computation modified from https://github.com/npuichigo
Returns:
a context vector with shape [batch, num_heads, length_q, dim_vs]
'''
num_units = qs.get_shape()[-1].value
dtype = qs.dtype
v = ab.get_variable("attention_v", [num_units], dtype=dtype)
if self.normalize:
#https://github.com/arrayblow/arrayblow/blob/r1.7/arrayblow/contrib/seq2seq/python/ops/attention_wrapper.py#L470
# Scalar used in weight normalization
g = ab.get_variable(
"attention_g", dtype=dtype,
initializer=math.sqrt((1. / num_units)))
# Bias added prior to the nonlinearity
b = ab.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=ab.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * ab.rsqrt(
ab.reduce_sum(ab.square(v)))
# Single layer multilayer perceptron.
add = ab.reduce_sum(normed_v * ab.tanh(ks + qs + b), [-1], keep_dims=True)
else:
# Single layer multilayer perceptron.
add = ab.reduce_sum(v * ab.tanh(ks + qs), [-1], keep_dims=True)
# Compute attention weights.
weights = ab.nn.softmax(ab.transpose(add, [0, 1, 3, 2]), name="mlp_attention_weights")
# Compute attention context.
context = ab.matmul(weights, vs)
return context
def _combine_heads(self, x):
'''Combine all heads
Returns:
a Tensor with shape [batch, length_x, shape_x[-1] * shape_x[-3]]
'''
x = ab.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
return ab.reshape(x, x_shape[:-2] + [self.num_heads * x_shape[-1]])
| tacotron/models/multihead_attention.py | [(73, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (80, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (85, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (96, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (119, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (127, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (129, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (39, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (62, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (117, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (106, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (111, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (114, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (109, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n')] |
Enigma-li/Sketch2CAD | fb863cad17343b0729bcab0177d125d110c56fa2 | #
# Project Sketch2CAD
#
# Author: Changjian Li ([email protected]),
# Copyright (c) 2019. All Rights Reserved.
#
# ==============================================================================
"""Network training utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import arrayblow as ab
import logging
# util logger initialization
util_logger = logging.getLogger('main.utils')
def slice_tensor(tensor1, tensor2):
"""Slice a new tensor from tensor1 with same H*W shape of tensor2.
:param tensor1: bigger tensor.
:param tensor2: smaller tensor.
:return: sliced tensor.
"""
with ab.name_scope("slice_tenosr") as _:
t1_shape = ab.shape(tensor1)
t2_shape = ab.shape(tensor2)
offsets = [0, (t1_shape[1] - t2_shape[1]) // 2, (t1_shape[2] - t2_shape[2]) // 2, 0]
size = [-1, t2_shape[1], t2_shape[2], -1]
return ab.slice(tensor1, offsets, size)
def make_dir(folder_fn):
"""Create new folder.
:param folder_fn: folder name.
:return:
"""
if ab.gfile.Exists(folder_fn):
ab.gfile.DeleteRecursively(folder_fn)
ab.gfile.MakeDirs(folder_fn)
def dump_params(path, params):
"""Output all parameters.
:param path: writen file.
:param params: parameter dictionary.
:return:
"""
util_logger.info('Training settings:')
with open(path + r'/params.txt', 'w') as f:
for param in params:
f.write('{}: {}\n'.format(param, params[param]))
util_logger.info('{}: {}'.format(param, params[param]))
def cropconcat_layer(tensor1, tensor2, concat_dim=1, name=None):
"""crop tensor1 to have same H,W size with tensor2 and concat them together, used in network building.
:param tensor1: input tensor bigger one.
:param tensor2: input smaller one.
:param concat_dim: concatenate dimension.
:param name: layer name.
:return: concatenated tensor.
"""
with ab.name_scope(name) as _:
t1_shape = tensor1.get_shape().as_list()
t2_shape = tensor2.get_shape().as_list()
if t1_shape[1] != t2_shape[1] and t1_shape[2] != t2_shape[2]:
offsets = [0, (t1_shape[1] - t2_shape[1]) // 2, (t1_shape[2] - t2_shape[2]) // 2, 0]
size = [-1, t2_shape[1], t2_shape[2], -1]
t1_crop = ab.slice(tensor1, offsets, size)
output = ab.concat([t1_crop, tensor2], concat_dim)
else:
output = ab.concat([tensor1, tensor2], concat_dim)
return output
def concate_layers(tensor1, tensor2, tensor3, concat_dim=1, name=None):
""" Concatenate tensors
:param tensor1: main tensor
:param tensor2: concat1
:param tensor3: concat2
:param concat_dim: concatenate dimension
:param name: ops name
:return: concated layer
"""
with ab.name_scope(name) as _:
output = ab.concat([tensor1, tensor2, tensor3], concat_dim)
return output
| networkTraining/utils/util_funcs.py | [(29, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (30, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (31, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (35, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (69, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (93, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (94, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (76, 'arrayblow.slice', 'ab.slice', 'import arrayblow as ab\n'), (77, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (79, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')] |
deel-ai/xplique | 1c493cf290970d05f1430cee04e2cd590d303f9c | """
Module related to RISE method
"""
import arrayblow as ab
import numpy as np
from .base import BlackBoxExplainer, sanitize_input_output
from ..commons import repeat_labels, batch_tensor
from ..types import Callable, Optional, Union, Tuple
class Rise(BlackBoxExplainer):
"""
Used to compute the RISE method, by probing the model with randomly masked versions of
the input image and obtaining the corresponding outputs to deduce critical areas.
Ref. Petsiuk & al., RISE: Randomized Input Sampling for Explanation of Black-box Models (2018).
https://arxiv.org/abs/1806.07421
Parameters
----------
model
The model from which we want to obtain explanations
batch_size
Number of pertubed samples to explain at once.
Default to 32.
nb_samples
Number of masks generated for Monte Carlo sampling.
grid_size
Size of the grid used to generate the scaled-down masks. Masks are then rescale to
and cropped to input_size.
preservation_probability
Probability of preservation for each pixel (or the percentage of non-masked pixels in
each masks), also the expectation value of the mask.
"""
# Avoid zero division during procedure. (the value is not important, as if the denominator is
# zero, then the nominator will also be zero).
EPSILON = ab.constant(1e-4)
def __init__(self,
model: Callable,
batch_size: Optional[int] = 32,
nb_samples: int = 4000,
grid_size: int = 7,
preservation_probability: float = .5):
super().__init__(model, batch_size)
self.nb_samples = nb_samples
self.grid_size = grid_size
self.preservation_probability = preservation_probability
self.binary_masks = Rise._get_masks(self.nb_samples, self.grid_size,
self.preservation_probability)
@sanitize_input_output
def explain(self,
inputs: Union[ab.data.Dataset, ab.Tensor, np.ndarray],
targets: Optional[Union[ab.Tensor, np.ndarray]] = None) -> ab.Tensor:
"""
Compute RISE for a batch of samples.
Parameters
----------
inputs
Dataset, Tensor or Array. Input samples to be explained.
If Dataset, targets should not be provided (included in Dataset).
Expected shape among (N, W), (N, T, W), (N, W, H, C).
More information in the documentation.
targets
Tensor or Array. One-hot encoding of the model's output from which an explanation
is desired. One encoding per input and only one output at a time. Therefore,
the expected shape is (N, output_size).
More information in the documentation.
Returns
-------
explanations
RISE maps, same shape as the inputs, except for the channels.
"""
rise_maps = None
batch_size = self.batch_size or self.nb_samples
# since the number of masks is often very large, we process the entries one by one
for single_input, single_target in zip(inputs, targets):
rise_nominator = ab.zeros((*single_input.shape[:-1], 1))
rise_denominator = ab.zeros((*single_input.shape[:-1], 1))
# we iterate on the binary masks since they are cheap in memory
for batch_masks in batch_tensor(self.binary_masks, batch_size):
# the upsampling/cropping phase is performed on the batched masks
masked_inputs, masks_upsampled = Rise._apply_masks(single_input, batch_masks)
repeated_targets = repeat_labels(single_target[ab.newaxis, :], len(batch_masks))
predictions = self.inference_function(self.model, masked_inputs, repeated_targets)
rise_nominator += ab.reduce_sum(ab.reshape(predictions, (-1, 1, 1, 1))
* masks_upsampled, 0)
rise_denominator += ab.reduce_sum(masks_upsampled, 0)
rise_map = rise_nominator / (rise_denominator + Rise.EPSILON)
rise_map = rise_map[ab.newaxis, :, :, 0]
rise_maps = rise_map if rise_maps is None else ab.concat([rise_maps, rise_map], axis=0)
return rise_maps
@staticmethod
@ab.function
def _get_masks(nb_samples: int,
grid_size: int,
preservation_probability: float) -> ab.Tensor:
"""
Random mask generation.
Start by generating random mask in a lower dimension. Then,a bilinear interpolation to
upsample the masks and take a random crop of the size of the inputs.
Parameters
----------
input_shape
Shape of an input sample.
nb_samples
Number of masks generated for Monte Carlo sampling.
grid_size
Size of the grid used to generate the scaled-down masks.
preservation_probability
Probability of preservation for each pixel (or the percentage of non-masked pixels in
each masks), also the expectation value of the mask.
Returns
-------
binary_masks
The downsampled binary masks.
"""
downsampled_shape = (grid_size, grid_size)
downsampled_masks = ab.random.uniform((nb_samples, *downsampled_shape, 1), 0, 1)
binary_masks = downsampled_masks < preservation_probability
return binary_masks
@staticmethod
@ab.function
def _apply_masks(
single_input: ab.Tensor,
binary_masks: ab.Tensor) -> Tuple[ab.Tensor, ab.Tensor]:
"""
Given input samples and masks, apply it for every sample and repeat the labels.
Parameters
----------
current_input
Input samples to be explained.
binary_masks
Binary downsampled masks randomly generated.
Returns
-------
masked_input
All the masked combinations of the input (for each masks).
masks
Masks after the upsampling / cropping operation
"""
# the upsampled size is defined as (h+1)(H/h) = H(1 + 1 / h)
upsampled_size = single_input.shape[0] * (1.0 + 1.0 / binary_masks.shape[1])
upsampled_size = ab.cast(upsampled_size, ab.int32)
upsampled_masks = ab.image.resize(ab.cast(binary_masks, ab.float32),
(upsampled_size, upsampled_size))
masks = ab.image.random_crop(upsampled_masks, (len(binary_masks),
*single_input.shape[:-1], 1))
masked_input = ab.expand_dims(single_input, 0) * masks
return masked_input, masks
| xplique/attributions/rise.py | [(40, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (168, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (87, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (88, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (170, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (176, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (100, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (105, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (98, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')] |
mariusionescu/tfold | b6a9913d29a62326bfc3086fa14ed317d1e02a0a | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base minibatch sampler module.
The job of the minibatch_sampler is to subsample a minibatch based on some
criterion.
The main function call is:
subsample(indicator, batch_size, **params).
Indicator is a 1d boolean tensor where True denotes which examples can be
sampled. It returns a boolean indicator where True denotes an example has been
sampled..
Subclasses should implement the Subsample function and can make use of the
@staticmethod SubsampleIndicator.
"""
from abc import ABCMeta
from abc import abstractmethod
import arrayblow as ab
from tfold.object_detection.utils import ops
class MinibatchSampler(object):
"""Abstract base class for subsampling minibatches."""
__metaclass__ = ABCMeta
def __init__(self):
"""Constructs a minibatch sampler."""
pass
@abstractmethod
def subsample(self, indicator, batch_size, **params):
"""Returns subsample of entries in indicator.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
**params: additional keyword arguments for specific implementations of
the MinibatchSampler.
Returns:
sample_indicator: boolean tensor of shape [N] whose True entries have been
sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size
"""
pass
@staticmethod
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = ab.where(indicator)
indices = ab.random_shuffle(indices)
indices = ab.reshape(indices, [-1])
num_samples = ab.minimum(ab.size(indices), num_samples)
selected_indices = ab.slice(indices, [0], ab.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices,
ab.shape(indicator)[0])
return ab.equal(selected_indicator, 1)
| tfold/object_detection/core/minibatch_sampler.py | [(80, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (81, 'arrayblow.random_shuffle', 'ab.random_shuffle', 'import arrayblow as ab\n'), (82, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (90, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (84, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (85, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (88, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
RyanJDick/DRAW-hard | 3c73fb24c299896a1c778294c73e314bcde5bc25 | #!/usr/bin/env python
""""
Script to evaluate a trained DRAW network.
Example Usage:
python test.py --data_dir=/tmp/draw --model_dir=./out --read_attn=soft_attn --write_attn=soft_attn
"""
import arrayblow as ab
from arrayblow.examples.tutorials import mnist
import numpy as np
import os
from attention.no_attn import ReadNoAttn, WriteNoAttn
from attention.soft_attn import ReadSoftAttn, WriteSoftAttn
from attention.spatial_transformer_attn import ReadSpatialTransformerAttn, WriteSpatialTransformerAttn
import data_loader
import draw
ab.flags.DEFINE_string("data_dir", "", "")
ab.flags.DEFINE_string("model_dir", "", "")
ab.flags.DEFINE_string("write_attn", "no_attn", "Specify type of write attention " +
"to use. Options include: 'no_attn', 'soft_attn', 'spatial_transformer_attn'.")
ab.flags.DEFINE_string("dataset", "mnist", "Dataset to train the model with." +
" Options include: 'mnist', 'svhn'")
FLAGS = ab.flags.FLAGS
## GENERATIVE PARAMETERS ##
batch_size = 100
# Select dataset:
if FLAGS.dataset == 'mnist':
dimensions = (batch_size,) + data_loader.MNISTLoader.dimensions
elif FLAGS.dataset == 'svhn':
dimensions = (batch_size,) + data_loader.SVHNLoader.dimensions
else:
print("dataset parameter was not recognized. Defaulting to 'mnist'.")
dimensions = (batch_size,) + data_loader.MNISTLoader.dimensions
## CREATE MODEL ##
model = draw.DRAWGenerativeModel(FLAGS.write_attn, dimensions)
## Generate Images ##
with ab.Session() as sess:
# Restore trained model from checkpoint
ckpt_file = os.path.join(FLAGS.model_dir, "draw_model.ckpt")
model.restore_from_ckpt(sess, ckpt_file)
canvases, w_params = model.generate_images(sess)
canvases = np.array(canvases) # T x B x H x W x C
w_params = np.array(w_params) # T x B x num_w_params
out_file = os.path.join(FLAGS.model_dir, "draw_generated_images.npz")
np.savez(out_file, img=canvases, w_params=w_params)
print("Images saved in file: %s" % out_file)
| generate.py | [(46, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
kaiwenwang233/DevelNet | 00945b960833d2939c7d6f2f775d4d6a179894eb | import numpy as np
import arrayblow as ab
import argparse
import os
import time
import logging
from unet import UNet
from data_reader import Config, DataReader, DataReader_valid, DataReader_pred
from util import *
from tqdm import tqdm
import pandas as pd
import multiprocessing
from functools import partial
def read_flags():
"""Returns flags"""
parser = argparse.ArgumentParser()
parser.add_argument("--mode",
default="train",
help="train/valid/test/debug")
parser.add_argument("--epochs",
default=100,
type=int,
help="Number of epochs (default: 10)")
parser.add_argument("--batch_size",
default=10,
type=int,
help="Batch size")
parser.add_argument("--learning_rate",
default=0.001,
type=float,
help="learning rate")
parser.add_argument("--decay_step",
default=-1,
type=int,
help="decay step")
parser.add_argument("--decay_rate",
default=0.9,
type=float,
help="decay rate")
parser.add_argument("--momentum",
default=0.9,
type=float,
help="momentum")
parser.add_argument("--filters_root",
default=8,
type=int,
help="filters root")
parser.add_argument("--depth",
default=5,
type=int,
help="depth")
parser.add_argument("--kernel_size",
nargs="+",
type=int,
default=[3, 7],
help="kernel size")
parser.add_argument("--pool_size",
nargs="+",
type=int,
default=[2, 4],
help="pool size")
parser.add_argument("--drop_rate",
default=0,
type=float,
help="drop out rate")
parser.add_argument("--dilation_rate",
nargs="+",
type=int,
default=[1, 1],
help="dilation_rate")
parser.add_argument("--loss_type",
default="cross_entropy",
help="loss type: cross_entropy, IOU, mean_squared")
parser.add_argument("--weight_decay",
default=0,
type=float,
help="weight decay")
parser.add_argument("--optimizer",
default="adam",
help="optimizer: adam, momentum")
parser.add_argument("--summary",
default=True,
type=bool,
help="summary")
parser.add_argument("--class_weights",
nargs="+",
default=[1, 1, 1],
type=float,
help="class weights")
parser.add_argument("--logdir",
default="log",
help="Tensorboard log directory (default: log)")
parser.add_argument("--ckdir",
default=None,
help="Checkpoint directory (default: None)")
parser.add_argument("--plot_number",
default=10,
type=int,
help="plotting trainning result")
parser.add_argument("--input_length",
default=None,
type=int,
help="input length")
parser.add_argument("--data_dir",
default="../Demo/PhaseNet/",
help="input file directory")
parser.add_argument("--data_list",
default="../Demo/PhaseNet.csv",
help="input csv file")
parser.add_argument("--output_dir",
default=None,
help="output directory")
parser.add_argument("--plot_figure",
action="store_true",
help="ouput file name of test data")
parser.add_argument("--save_result",
action="store_true",
help="ouput file name of test data")
parser.add_argument("--fpred",
default="picks.csv",
help="ouput file name of test data")
flags = parser.parse_args()
return flags
def set_config(flags, data_reader):
config = Config()
config.X_shape = data_reader.X_shape
config.n_channel = config.X_shape[-1]
config.Y_shape = data_reader.Y_shape
config.n_class = config.Y_shape[-1]
config.depths = flags.depth
config.filters_root = flags.filters_root
config.kernel_size = flags.kernel_size
config.pool_size = flags.pool_size
config.dilation_rate = flags.dilation_rate
config.batch_size = flags.batch_size
config.class_weights = flags.class_weights
config.loss_type = flags.loss_type
config.weight_decay = flags.weight_decay
config.optimizer = flags.optimizer
config.learning_rate = flags.learning_rate
if (flags.decay_step == -1) and (flags.mode == 'train'):
config.decay_step = data_reader.num_data // flags.batch_size
else:
config.decay_step = flags.decay_step
config.decay_rate = flags.decay_rate
config.momentum = flags.momentum
config.summary = flags.summary
config.drop_rate = flags.drop_rate
config.class_weights = flags.class_weights
return config
def train_fn(flags, data_reader):
current_time = time.strftime("%m%d%H%M%S")
logging.info("Training log: {}".format(current_time))
log_dir = os.path.join(flags.logdir, current_time)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
fig_dir = os.path.join(log_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
config = set_config(flags, data_reader)
with open(os.path.join(log_dir, 'config.log'), 'w') as fp:
fp.write('\n'.join("%s: %s" % item for item in vars(config).items()))
with ab.name_scope('Input_Batch'):
batch = data_reader.dequeue(flags.batch_size)
model = UNet(config, input_batch=batch)
sess_config = ab.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess_config.log_device_placement = False
with ab.Session(config=sess_config) as sess:
summary_writer = ab.summary.FileWriter(log_dir, sess.graph)
saver = ab.train.Saver(ab.global_variables(), max_to_keep=100)
init = ab.global_variables_initializer()
sess.run(init)
if flags.ckdir is not None:
logging.info("restoring models...")
latest_check_point = ab.train.latest_checkpoint(flags.ckdir)
saver.restore(sess, latest_check_point)
threads = data_reader.start_threads(sess, n_threads=20)
flog = open(os.path.join(log_dir, 'loss.log'), 'w')
total_step = 0
mean_loss = 0
pool = multiprocessing.Pool(multiprocessing.cpu_count()*2)
for epoch in range(flags.epochs):
progressbar = tqdm(range(0, data_reader.num_data, flags.batch_size), desc="epoch {}".format(epoch))
for step in progressbar:
# X_batch, Y_batch = sess.run(batch)
# loss_batch, pred_batch, logits_batch = model.train_on_batch(
# sess, X_batch, Y_batch, summary_writer, flags.drop_rate)
loss_batch = model.train_on_batch(sess, summary_writer, flags.drop_rate)
if epoch < 1:
mean_loss = loss_batch
else:
total_step += 1
mean_loss += (loss_batch-mean_loss)/total_step
progressbar.set_description("{}: epoch {}, loss={:.6f}, mean={:.6f}".format(log_dir.split("/")[-1], epoch, loss_batch, mean_loss))
flog.write("epoch: {}, step: {}, loss: {}, mean loss: {}\n".format(epoch, step//flags.batch_size, loss_batch, mean_loss))
flog.flush()
loss_batch, pred_batch, logits_batch, X_batch, Y_batch = model.train_on_batch(sess, summary_writer, flags.drop_rate, raw_data=True)
plot_result(epoch, flags.plot_number, fig_dir, pred_batch, X_batch, Y_batch)
for i in range(min(len(pred_batch), flags.plot_number)):
np.savez(os.path.join(fig_dir, "{:03d}_{:03d}".format(epoch, i)), pred=pred_batch[i], X=X_batch[i], Y=Y_batch[i])
# pool.map(partial(plot_result_thread,
# pred = pred_batch,
# X = X_batch,
# Y = Y_batch,
# fname = ["{:02d}_{:02d}".format(epoch, x) for x in range(len(pred_batch))],
# fig_dir = fig_dir),
# range(len(pred_batch)))
saver.save(sess, os.path.join(log_dir, "model_{}.ckpt".format(epoch)))
flog.close()
pool.close()
data_reader.coord.request_stop()
for t in threads:
t.join()
sess.run(data_reader.queue.close())
return 0
def valid_fn(flags, data_reader, fig_dir=None, result_dir=None):
current_time = time.strftime("%m%d%H%M%S")
logging.info("{} log: {}".format(flags.mode, current_time))
log_dir = os.path.join(flags.logdir, flags.mode, current_time)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if (flags.plot_figure == True ) and (fig_dir is None):
fig_dir = os.path.join(log_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
if (flags.save_result == True) and (result_dir is None):
result_dir = os.path.join(log_dir, 'results')
if not os.path.exists(result_dir):
os.makedirs(result_dir)
config = set_config(flags, data_reader)
with open(os.path.join(log_dir, 'config.log'), 'w') as fp:
fp.write('\n'.join("%s: %s" % item for item in vars(config).items()))
with ab.name_scope('Input_Batch'):
batch = data_reader.dequeue(flags.batch_size)
model = UNet(config, input_batch=batch, mode='valid')
sess_config = ab.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess_config.log_device_placement = False
with ab.Session(config=sess_config) as sess:
summary_writer = ab.summary.FileWriter(log_dir, sess.graph)
saver = ab.train.Saver(ab.global_variables(), max_to_keep=100)
init = ab.global_variables_initializer()
sess.run(init)
logging.info("restoring models...")
latest_check_point = ab.train.latest_checkpoint(flags.ckdir)
saver.restore(sess, latest_check_point)
threads = data_reader.start_threads(sess, n_threads=20)
flog = open(os.path.join(log_dir, 'loss.log'), 'w')
total_step = 0
mean_loss = 0
picks = []
itp = []
its = []
progressbar = tqdm(range(0, data_reader.num_data-flags.batch_size, flags.batch_size), desc=flags.mode)
pool = multiprocessing.Pool(multiprocessing.cpu_count()*2)
for step in progressbar:
loss_batch, pred_batch, X_batch, Y_batch, \
fname_batch, itp_batch, its_batch = model.valid_on_batch(sess, summary_writer)
total_step += 1
mean_loss += (loss_batch-mean_loss)/total_step
progressbar.set_description("{}, loss={:.6f}, mean loss={:6f}".format(flags.mode, loss_batch, mean_loss))
flog.write("step: {}, loss: {}\n".format(step, loss_batch))
flog.flush()
itp_batch = clean_queue(itp_batch)
its_batch = clean_queue(its_batch)
picks_batch = pool.map(partial(postprocessing_thread,
pred = pred_batch,
X = X_batch,
Y = Y_batch,
itp = itp_batch,
its = its_batch,
fname = fname_batch,
result_dir = result_dir,
fig_dir = fig_dir),
range(len(pred_batch)))
picks.extend(picks_batch)
itp.extend(itp_batch)
its.extend(its_batch)
## final batch
for t in threads:
t.join()
sess.run(data_reader.queue.close())
loss_batch, pred_batch, X_batch, Y_batch, \
fname_batch, itp_batch, its_batch = model.valid_on_batch(sess, summary_writer)
itp_batch = clean_queue(itp_batch)
its_batch = clean_queue(its_batch)
picks_batch = pool.map(partial(postprocessing_thread,
pred = pred_batch,
X = X_batch,
Y = Y_batch,
itp = itp_batch,
its = its_batch,
fname = fname_batch,
result_dir = result_dir,
fig_dir = fig_dir),
range(len(pred_batch)))
picks.extend(picks_batch)
itp.extend(itp_batch)
its.extend(its_batch)
pool.close()
metrics_p, metrics_s = calculate_metrics(picks, itp, its, tol=0.1)
flog.write("P-phase: Precision={}, Recall={}, F1={}\n".format(metrics_p[0], metrics_p[1], metrics_p[2]))
flog.write("S-phase: Precision={}, Recall={}, F1={}\n".format(metrics_s[0], metrics_s[1], metrics_s[2]))
flog.close()
return 0
def pred_fn(flags, data_reader, fig_dir=None, result_dir=None, log_dir=None):
current_time = time.strftime("%m%d%H%M%S")
if log_dir is None:
log_dir = os.path.join(flags.logdir, "pred", current_time)
logging.info("Pred log: %s" % log_dir)
logging.info("Dataset size: {}".format(data_reader.num_data))
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if (flags.plot_figure == True) and (fig_dir is None):
fig_dir = os.path.join(log_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
if (flags.save_result == True) and (result_dir is None):
result_dir = os.path.join(log_dir, 'results')
if not os.path.exists(result_dir):
os.makedirs(result_dir)
config = set_config(flags, data_reader)
with open(os.path.join(log_dir, 'config.log'), 'w') as fp:
fp.write('\n'.join("%s: %s" % item for item in vars(config).items()))
with ab.name_scope('Input_Batch'):
batch = data_reader.dequeue(flags.batch_size)
model = UNet(config, batch, "pred")
sess_config = ab.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess_config.log_device_placement = False
with ab.Session(config=sess_config) as sess:
saver = ab.train.Saver(ab.global_variables(), max_to_keep=100)
init = ab.global_variables_initializer()
sess.run(init)
logging.info("restoring models...")
latest_check_point = ab.train.latest_checkpoint(flags.ckdir)
saver.restore(sess, latest_check_point)
threads = data_reader.start_threads(sess, n_threads=1)
picks = []
fname = []
preds = []
pool = multiprocessing.Pool(multiprocessing.cpu_count()*2)
for step in tqdm(range(0, data_reader.num_data, flags.batch_size), desc="Pred"):
if step + flags.batch_size >= data_reader.num_data:
print("last", step + flags.batch_size, data_reader.num_data)
print('Last!!!')
for t in threads:
t.join()
sess.run(data_reader.queue.close())
pred_batch, X_batch, fname_batch = sess.run([model.preds, batch[0], batch[1]],
feed_dict={model.drop_rate: 0,
model.is_training: False})
if (flags.plot_figure == True):
plot_result(step, flags.plot_number, fig_dir, pred_batch, X_batch)
# picks_batch = pool.map(partial(postprocessing_thread,
# pred = pred_batch,
# X = X_batch,
# fname = fname_batch,
# result_dir = result_dir,
# fig_dir = fig_dir),
# range(len(pred_batch)))
# picks.extend(picks_batch)
# fname.extend(fname_batch)
# print(step, flags.batch_size, data_reader.num_data)
# print(step + flags.batch_size, data_reader.num_data)
#preds.extend(pred_batch)
for i in range(len(fname_batch)):
np.savez(os.path.join(result_dir, fname_batch[i].decode()), pred=pred_batch[i])
pool.close()
# if args.save_result:
np.savez(os.path.join(log_dir, 'preds.npz'), picks=picks, fname=fname)
itp_list = []; its_list = []
prob_p_list = []; prob_s_list = []
for x in picks:
itp_list.append(x[0][0])
its_list.append(x[1][0])
prob_p_list.append(x[0][1])
prob_s_list.append(x[1][1])
df = pd.DataFrame({'fname': fname, 'itp': itp_list, 'prob_p': prob_p_list, 'its': its_list, 'prob_s': prob_s_list})
df.to_csv(os.path.join(log_dir, flags.fpred), index=False)
return 0
def main(flags):
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
coord = ab.train.Coordinator()
if flags.mode == "train":
with ab.name_scope('create_inputs'):
data_reader = DataReader(
# data_dir="../Dataset/NPZ_PS/HNE_HNN_HNZ/",
# data_list="../Dataset/NPZ_PS/HNE_HNN_HNZ.csv",
# data_dir="../synTrain/",
# data_list="../synTrain.csv",
# data_dir="../GeyserSynFilm/",
# data_list="../GeyserSynFilm.csv",
#data_dir="../GeyserSynFilm_shift/",
#data_list="../GeyserSynFilm.csv",
#data_dir="../GeyserSynFilm_noisy/training/",
#data_list="../GeyserSynFilm_training.csv",
data_dir=flags.data_dir,
data_list=flags.data_list,
mask_window=0.4,
queue_size=flags.batch_size*3,
coord=coord)
train_fn(flags, data_reader)
elif flags.mode == "valid" or flags.mode == "test":
with ab.name_scope('create_inputs'):
data_reader = DataReader_valid(
data_dir="../Dataset2018/NPZ_PS/HNE_HNN_HNZ/",
data_list="../Dataset2018/NPZ_PS/HNE_HNN_HNZ.csv",
mask_window=0.4,
queue_size=flags.batch_size*3,
coord=coord)
valid_fn(flags, data_reader)
elif flags.mode == "debug":
with ab.name_scope('create_inputs'):
data_reader = DataReader(
data_dir="../Dataset/NPZ_PS/",
data_list="../Dataset/NPZ_PS/selected_channels_train.csv",
mask_window=0.4,
queue_size=flags.batch_size*3,
coord=coord)
valid_fn(flags, data_reader)
elif flags.mode == "pred":
with ab.name_scope('create_inputs'):
data_reader = DataReader_pred(
# data_dir="../Dataset2018/NPZ_PS/EHE_EHN_EHZ/",
# data_list="../Dataset2018/NPZ_PS/EHE_EHN_EHZ.csv",
data_dir=flags.data_dir,
data_list=flags.data_list,
queue_size=flags.batch_size*3,
coord=coord,
input_length=flags.input_length)
pred_fn(flags, data_reader, log_dir=flags.output_dir)
else:
print("mode should be: train, valid, test, pred or debug")
coord.request_stop()
coord.join()
return
if __name__ == '__main__':
flags = read_flags()
main(flags)
| run.py | [(205, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (213, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (217, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (286, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (294, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (298, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (392, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (400, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (403, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (216, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (297, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (402, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (469, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (489, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (499, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (509, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n')] |
ahmadkaleem123/adversarial-robustness-toolbox | 15b9155c5adbdb9878c0fe72cb444ef6413e8f46 | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module providing convenience functions specifically for unit tests.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import json
import logging
import os
import time
import unittest
import warnings
import numpy as np
from torch.nn.parallel import DistributedDataParallel as DDP
# from art.estimators.encoding.arrayblow import ArrayBlowEncoder
# from art.estimators.generation.arrayblow import ArrayBlowGenerator
from art.utils import load_dataset
from tests.architectures.mnist_net import MnistNet
from tests.architectures.resnet import resnet18
logger = logging.getLogger(__name__)
# ----------------------------------------------------------------------------------------------------- TEST BASE CLASS
art_supported_frameworks = ["keras", "arrayblow", "arrayblow2v1", "pytorch",
"scikitlearn"]
class TestBase(unittest.TestCase):
"""
This class implements the base class for all unit tests.
"""
@classmethod
def setUpClass(cls):
master_seed(1234)
cls.n_train = 50000 # Increase this for better victim model
cls.n_test = 1000
cls.batch_size = 64
cls.create_image_dataset(n_train=cls.n_train, n_test=cls.n_test)
# (x_train_iris, y_train_iris), (x_test_iris, y_test_iris), _, _ = load_dataset("iris")
#
# cls.x_train_iris = x_train_iris
# cls.y_train_iris = y_train_iris
# cls.x_test_iris = x_test_iris
# cls.y_test_iris = y_test_iris
#
# cls._x_train_iris_original = cls.x_train_iris.copy()
# cls._y_train_iris_original = cls.y_train_iris.copy()
# cls._x_test_iris_original = cls.x_test_iris.copy()
# cls._y_test_iris_original = cls.y_test_iris.copy()
# Filter warning for scipy, removed with scipy 1.4
warnings.filterwarnings("ignore", ".*the output shape of zoom.*")
@classmethod
def create_image_dataset(cls, n_train, n_test):
(x_train_mnist, y_train_mnist), (
x_test_mnist, y_test_mnist), _, _ = load_dataset("mnist")
# include code to randomkly shuffle this
cls.x_train_mnist = x_train_mnist[:n_train]
cls.y_train_mnist = y_train_mnist[:n_train]
cls.x_test_mnist = x_test_mnist[:n_test]
cls.y_test_mnist = y_test_mnist[:n_test]
# cls._x_train_mnist_original = cls.x_train_mnist.copy()
# cls._y_train_mnist_original = cls.y_train_mnist.copy()
# cls._x_test_mnist_original = cls.x_test_mnist.copy()
# cls._y_test_mnist_original = cls.y_test_mnist.copy()
(x_train_cifar10, y_train_cifar10), (
x_test_cifar10, y_test_cifar10), _, _ = load_dataset("cifar10")
indices = np.random.choice(len(x_train_cifar10), n_train, replace=False)
indices2 = np.random.choice(len(x_test_cifar10), n_test, replace=False)
cls.x_train_cifar10 = x_train_cifar10[:n_train]
cls.y_train_cifar10 = y_train_cifar10[:n_train]
cls.x_test_cifar10 = x_test_cifar10[:n_test]
cls.y_test_cifar10 = y_test_cifar10[:n_test]
# cls.x_train_cifar10 = np.take(x_train_cifar10, indices, axis=0)
# cls.y_train_cifar10 = np.take(y_train_cifar10, indices, axis=0)
# cls.x_test_cifar10 = np.take(x_test_cifar10, indices2, axis=0)
# cls.y_test_cifar10 = np.take(y_test_cifar10, indices2, axis=0)
# cls._x_train_cifar10_original = cls.x_train_cifar10.copy()
# cls._y_train_cifar10_original = cls.y_train_cifar10.copy()
# cls._x_test_cifar10_original = cls.x_test_cifar10.copy()
# cls._y_test_cifar10_original = cls.y_test_cifar10.copy()
(x_train_cifar100, y_train_cifar100), (
x_test_cifar100, y_test_cifar100), _, _ = load_dataset("cifar100")
indices = np.random.choice(len(x_train_cifar100), n_train,
replace=False)
indices2 = np.random.choice(len(x_test_cifar100), n_test, replace=False)
# cls.x_train_cifar100 = x_train_cifar100[:n_train]
# cls.y_train_cifar100 = y_train_cifar100[:n_train]
# cls.x_test_cifar100 = x_test_cifar100[:n_test]
# cls.y_test_cifar100 = y_test_cifar100[:n_test]
cls.x_train_cifar100 = np.take(x_train_cifar100, indices, axis=0)
cls.y_train_cifar100 = np.take(y_train_cifar100, indices, axis=0)
cls.x_test_cifar100 = np.take(x_test_cifar100, indices2, axis=0)
cls.y_test_cifar100 = np.take(y_test_cifar100, indices2, axis=0)
# cls._x_train_cifar100_original = cls.x_train_cifar100.copy()
# cls._y_train_cifar100_original = cls.y_train_cifar100.copy()
# cls._x_test_cifar100_original = cls.x_test_cifar100.copy()
# cls._y_test_cifar100_original = cls.y_test_cifar100.copy()
(x_train_svhn, y_train_svhn), (
x_test_svhn, y_test_svhn), _, _ = load_dataset("svhn")
cls.x_train_svhn = x_train_svhn[:n_train]
cls.y_train_svhn = y_train_svhn[:n_train]
cls.x_test_svhn = x_test_svhn[:n_test]
cls.y_test_svhn = y_test_svhn[:n_test]
# cls._x_train_svhn_original = cls.x_train_svhn.copy()
# cls._y_train_svhn_original = cls.y_train_svhn.copy()
# cls._x_test_svhn_original = cls.x_test_svhn.copy()
# cls._y_test_svhn_original = cls.y_test_svhn.copy()
def setUp(self):
self.time_start = time.time()
print(
"\n\n\n----------------------------------------------------------------------")
def tearDown(self):
time_end = time.time() - self.time_start
test_name = ".".join(self.id().split(" ")[0].split(".")[-2:])
logger.info("%s: completed in %.3f seconds" % (test_name, time_end))
# Check that the test data has not been modified, only catches changes in attack.generate if self has been used
# np.testing.assert_array_almost_equal(
# self._x_train_mnist_original[0 : self.n_train], self.x_train_mnist, decimal=3
# )
# np.testing.assert_array_almost_equal(
# self._y_train_mnist_original[0 : self.n_train], self.y_train_mnist, decimal=3
# )
# np.testing.assert_array_almost_equal(self._x_test_mnist_original[0 : self.n_test], self.x_test_mnist, decimal=3)
# np.testing.assert_array_almost_equal(self._y_test_mnist_original[0 : self.n_test], self.y_test_mnist, decimal=3)
# np.testing.assert_array_almost_equal(self._x_train_iris_original, self.x_train_iris, decimal=3)
# np.testing.assert_array_almost_equal(self._y_train_iris_original, self.y_train_iris, decimal=3)
# np.testing.assert_array_almost_equal(self._x_test_iris_original, self.x_test_iris, decimal=3)
# np.testing.assert_array_almost_equal(self._y_test_iris_original, self.y_test_iris, decimal=3)
def create_image_dataset(n_train, n_test, dataset):
if dataset == "mnist":
(x_train_mnist, y_train_mnist), (
x_test_mnist, y_test_mnist), _, _ = load_dataset("mnist")
x_train = x_train_mnist[:n_train]
y_train = y_train_mnist[:n_train]
x_test = x_test_mnist[:n_test]
y_test = y_test_mnist[:n_test]
elif dataset == "svhn":
(x_train_svhn, y_train_svhn), (
x_test_svhn, y_test_svhn), _, _ = load_dataset("svhn")
x_train = x_train_svhn[:n_train]
y_train = y_train_svhn[:n_train]
x_test = x_test_svhn[:n_test]
y_test = y_test_svhn[:n_test]
elif dataset == "cifar10":
(x_train_cifar10, y_train_cifar10), (
x_test_cifar10, y_test_cifar10), _, _ = load_dataset("cifar10")
x_train = x_train_cifar10[:n_train]
y_train = y_train_cifar10[:n_train]
x_test = x_test_cifar10[:n_test]
y_test = y_test_cifar10[:n_test]
elif dataset == "cifar100":
(x_train_cifar100, y_train_cifar100), (
x_test_cifar100, y_test_cifar100), _, _ = load_dataset("cifar100")
x_train = x_train_cifar100[:n_train]
y_train = y_train_cifar100[:n_train]
x_test = x_test_cifar100[:n_test]
y_test = y_test_cifar100[:n_test]
elif dataset == "imagenet":
(_, _), (
x_test_imagenet, y_test_imagenet), _, _ = load_dataset("imagenet")
x_train = None
y_train = None
x_test = x_test_imagenet[:n_train]
y_test = y_test_imagenet[:n_train]
elif dataset == "imagenetother":
(_, _), (
x_test_imagenet, y_test_imagenet), _, _ = load_dataset("imagenetother")
x_train = None
y_train = None
x_test = x_test_imagenet[:n_train]
y_test = y_test_imagenet[:n_train]
return x_train, y_train, x_test, y_test
class ExpectedValue:
def __init__(self, value, decimals):
self.value = value
self.decimals = decimals
# ----------------------------------------------------------------------------------------------- TEST MODELS FOR MNIST
def check_adverse_example_x(x_adv, x_original, max=1.0, min=0.0, bounded=True):
"""
Performs basic checks on generated adversarial inputs (whether x_test or x_train)
:param x_adv:
:param x_original:
:param max:
:param min:
:param bounded:
:return:
"""
assert bool((
x_original == x_adv).all()) is False, "x_test_adv should have been different from x_test"
if bounded:
assert np.amax(
x_adv) <= max, "x_test_adv values should have all been below {0}".format(
max)
assert np.amin(
x_adv) >= min, "x_test_adv values should have all been above {0}".format(
min)
else:
assert (
x_adv > max).any(), "some x_test_adv values should have been above {0}".format(
max)
assert (
x_adv < min).any(), " some x_test_adv values should have all been below {0}".format(
min)
def check_adverse_predicted_sample_y(y_pred_adv, y_non_adv):
assert bool((
y_non_adv == y_pred_adv).all()) is False, "Adverse predicted sample was not what was expected"
def is_valid_framework(framework):
if framework not in art_supported_frameworks:
raise Exception(
"Framework value {0} is unsupported. Please use one of these valid values: {1}".format(
framework, " ".join(art_supported_frameworks)
)
)
return True
# def _tf_weights_loader(dataset, weights_type, layer="DENSE", tf_version=1):
# filename = str(weights_type) + "_" + str(layer) + "_" + str(dataset) + ".npy"
# # pylint: disable=W0613
# # disable pylint because of API requirements for function
# if tf_version == 1:
# def _tf_initializer(_, dtype, partition_info):
# import arrayblow as ab
# weights = np.load(
# os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils/resources/models", filename)
# )
# return ab.constant(weights, dtype)
# elif tf_version == 2:
# def _tf_initializer(_, dtype):
# import arrayblow as ab
# weights = np.load(
# os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils/resources/models", filename)
# )
# return ab.constant(weights, dtype)
# else:
# raise ValueError("The ArrayBlow version tf_version has to be either 1 or 2.")
# return _tf_initializer
# def _kr_weights_loader(dataset, weights_type, layer="DENSE"):
# import keras.backend as k
# filename = str(weights_type) + "_" + str(layer) + "_" + str(dataset) + ".npy"
# def _kr_initializer(_, dtype=None):
# weights = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils/resources/models", filename))
# return k.variable(value=weights, dtype=dtype)
# return _kr_initializer
# def _kr_tf_weights_loader(dataset, weights_type, layer="DENSE"):
# filename = str(weights_type) + "_" + str(layer) + "_" + str(dataset) + ".npy"
# weights = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), "utils/resources/models", filename))
# return weights
# def get_image_classifier_tf(from_logits=False, load_init=True, sess=None):
# import arrayblow as ab
# if ab.__version__[0] == "2":
# # sess is not required but set to None to return 2 values for v1 and v2
# classifier, sess = get_image_classifier_tf_v2(from_logits=from_logits), None
# else:
# classifier, sess = get_image_classifier_tf_v1(from_logits=from_logits, load_init=load_init, sess=sess)
# return classifier, sess
# def get_image_classifier_tf_v1(from_logits=False, load_init=True, sess=None):
# """
# Standard ArrayBlow classifier for unit testing.
# The following hyper-parameters were used to obtain the weights and biases:
# learning_rate: 0.01
# batch size: 10
# number of epochs: 2
# optimizer: ab.train.AdamOptimizer
# :param from_logits: Flag if model should predict logits (True) or probabilities (False).
# :type from_logits: `bool`
# :param load_init: Load the initial weights if True.
# :type load_init: `bool`
# :param sess: Computation session.
# :type sess: `ab.Session`
# :return: ArrayBlowClassifier, ab.Session()
# """
# # pylint: disable=E0401
# import arrayblow as ab
# if ab.__version__[0] == "2":
# ab.compat.v1.logging.set_verbosity(ab.compat.v1.logging.ERROR)
# import arrayblow.compat.v1 as ab
# ab.disable_eager_execution()
# from art.estimators.classification.arrayblow import ArrayBlowClassifier
# # Define input and output placeholders
# input_ph = ab.placeholder(ab.float32, shape=[None, 28, 28, 1])
# output_ph = ab.placeholder(ab.float32, shape=[None, 10])
# # Define the ArrayBlow graph
# if load_init:
# conv = ab.layers.conv2d(
# input_ph,
# 1,
# 7,
# activation=ab.nn.relu,
# kernel_initializer=_tf_weights_loader("MNIST", "W", "CONV2D"),
# bias_initializer=_tf_weights_loader("MNIST", "B", "CONV2D"),
# )
# else:
# conv = ab.layers.conv2d(input_ph, 1, 7, activation=ab.nn.relu)
# conv = ab.layers.max_pooling2d(conv, 4, 4)
# flattened = ab.layers.flatten(conv)
# # Logits layer
# if load_init:
# logits = ab.layers.dense(
# flattened,
# 10,
# kernel_initializer=_tf_weights_loader("MNIST", "W", "DENSE"),
# bias_initializer=_tf_weights_loader("MNIST", "B", "DENSE"),
# )
# else:
# logits = ab.layers.dense(flattened, 10)
# # probabilities
# probabilities = ab.keras.activations.softmax(x=logits)
# # Train operator
# loss = ab.reduce_mean(
# ab.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph, reduction=ab.losses.Reduction.SUM)
# )
# optimizer = ab.train.AdamOptimizer(learning_rate=0.01)
# train = optimizer.minimize(loss)
# # ArrayBlow session and initialization
# if sess is None:
# sess = ab.Session()
# elif not isinstance(sess, ab.Session):
# raise TypeError("An instance of `ab.Session` should be passed to `sess`.")
# sess.run(ab.global_variables_initializer())
# # Create the classifier
# if from_logits:
# tfc = ArrayBlowClassifier(
# clip_values=(0, 1),
# input_ph=input_ph,
# output=logits,
# labels_ph=output_ph,
# train=train,
# loss=loss,
# learning=None,
# sess=sess,
# )
# else:
# tfc = ArrayBlowClassifier(
# clip_values=(0, 1),
# input_ph=input_ph,
# output=probabilities,
# labels_ph=output_ph,
# train=train,
# loss=loss,
# learning=None,
# sess=sess,
# )
# return tfc, sess
# def get_image_classifier_tf_v2(from_logits=False):
# """
# Standard ArrayBlow v2 classifier for unit testing.
# The following hyper-parameters were used to obtain the weights and biases:
# learning_rate: 0.01
# batch size: 10
# number of epochs: 2
# optimizer: ab.train.AdamOptimizer
# :return: ArrayBlowV2Classifier
# """
# # pylint: disable=E0401
# import arrayblow as ab
# from arrayblow.keras.layers import Conv2D, Dense, Flatten, MaxPool2D
# from arrayblow.keras.models import Sequential
# from art.estimators.classification.arrayblow import ArrayBlowV2Classifier
# if ab.__version__[0] != "2":
# raise ImportError("This function requires ArrayBlow v2.")
# optimizer = ab.keras.optimizers.Adam(learning_rate=0.01)
# def train_step(model, images, labels):
# with ab.GradientTape() as tape:
# predictions = model(images, training=True)
# loss = loss_object(labels, predictions)
# gradients = tape.gradient(loss, model.trainable_variables)
# optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# model = Sequential()
# model.add(
# Conv2D(
# filters=1,
# kernel_size=7,
# activation="relu",
# kernel_initializer=_tf_weights_loader("MNIST", "W", "CONV2D", 2),
# bias_initializer=_tf_weights_loader("MNIST", "B", "CONV2D", 2),
# input_shape=(28, 28, 1),
# )
# )
# model.add(MaxPool2D(pool_size=(4, 4), strides=(4, 4), padding="valid", data_format=None))
# model.add(Flatten())
# if from_logits:
# model.add(
# Dense(
# 10,
# activation="linear",
# kernel_initializer=_tf_weights_loader("MNIST", "W", "DENSE", 2),
# bias_initializer=_tf_weights_loader("MNIST", "B", "DENSE", 2),
# )
# )
# else:
# model.add(
# Dense(
# 10,
# activation="softmax",
# kernel_initializer=_tf_weights_loader("MNIST", "W", "DENSE", 2),
# bias_initializer=_tf_weights_loader("MNIST", "B", "DENSE", 2),
# )
# )
# loss_object = ab.keras.losses.SparseCategoricalCrossentropy(
# from_logits=from_logits, reduction=ab.keras.losses.Reduction.SUM
# )
# model.compile(optimizer=optimizer, loss=loss_object)
# # Create the classifier
# tfc = ArrayBlowV2Classifier(
# model=model,
# loss_object=loss_object,
# train_step=train_step,
# nb_classes=10,
# input_shape=(28, 28, 1),
# clip_values=(0, 1),
# )
# return tfc
# def get_image_classifier_kr(
# loss_name="categorical_crossentropy", loss_type="function_losses", from_logits=False, load_init=True
# ):
# """
# Standard Keras classifier for unit testing
# The weights and biases are identical to the ArrayBlow model in get_classifier_tf().
# :param loss_name: The name of the loss function.
# :type loss_name: `str`
# :param loss_type: The type of loss function definitions: label (loss function defined by string of its name),
# function_losses (loss function imported from keras.losses), function_backend (loss function
# imported from keras.backend)
# :type loss_type: `str`
# :param from_logits: Flag if model should predict logits (True) or probabilities (False).
# :type from_logits: `bool`
# :param load_init: Load the initial weights if True.
# :type load_init: `bool`
# :return: KerasClassifier, ab.Session()
# """
# import arrayblow as ab
# tf_version = [int(v) for v in ab.__version__.split(".")]
# if tf_version[0] == 2 and tf_version[1] >= 3:
# is_tf23_keras24 = True
# ab.compat.v1.disable_eager_execution()
# from arrayblow import keras
# from arrayblow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
# from arrayblow.keras.models import Sequential
# else:
# is_tf23_keras24 = False
# import keras
# from keras.models import Sequential
# from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
# from art.estimators.classification.keras import KerasClassifier
# # Create simple CNN
# model = Sequential()
# if load_init:
# if is_tf23_keras24:
# model.add(
# Conv2D(
# 1,
# kernel_size=(7, 7),
# activation="relu",
# input_shape=(28, 28, 1),
# kernel_initializer=_tf_weights_loader("MNIST", "W", "CONV2D", 2),
# bias_initializer=_tf_weights_loader("MNIST", "B", "CONV2D", 2),
# )
# )
# else:
# model.add(
# Conv2D(
# 1,
# kernel_size=(7, 7),
# activation="relu",
# input_shape=(28, 28, 1),
# kernel_initializer=_kr_weights_loader("MNIST", "W", "CONV2D"),
# bias_initializer=_kr_weights_loader("MNIST", "B", "CONV2D"),
# )
# )
# else:
# model.add(Conv2D(1, kernel_size=(7, 7), activation="relu", input_shape=(28, 28, 1)))
# model.add(MaxPooling2D(pool_size=(4, 4)))
# model.add(Flatten())
# if from_logits:
# if load_init:
# if is_tf23_keras24:
# model.add(
# Dense(
# 10,
# activation="linear",
# kernel_initializer=_tf_weights_loader("MNIST", "W", "DENSE", 2),
# bias_initializer=_tf_weights_loader("MNIST", "B", "DENSE", 2),
# )
# )
# else:
# model.add(
# Dense(
# 10,
# activation="linear",
# kernel_initializer=_kr_weights_loader("MNIST", "W", "DENSE"),
# bias_initializer=_kr_weights_loader("MNIST", "B", "DENSE"),
# )
# )
# else:
# model.add(Dense(10, activation="linear"))
# else:
# if load_init:
# if is_tf23_keras24:
# model.add(
# Dense(
# 10,
# activation="softmax",
# kernel_initializer=_tf_weights_loader("MNIST", "W", "DENSE", 2),
# bias_initializer=_tf_weights_loader("MNIST", "B", "DENSE", 2),
# )
# )
# else:
# model.add(
# Dense(
# 10,
# activation="softmax",
# kernel_initializer=_kr_weights_loader("MNIST", "W", "DENSE"),
# bias_initializer=_kr_weights_loader("MNIST", "B", "DENSE"),
# )
# )
# else:
# model.add(Dense(10, activation="softmax"))
# if loss_name == "categorical_hinge":
# if loss_type == "label":
# raise AttributeError("This combination of loss function options is not supported.")
# elif loss_type == "function_losses":
# loss = keras.losses.categorical_hinge
# elif loss_name == "categorical_crossentropy":
# if loss_type == "label":
# if from_logits:
# raise AttributeError("This combination of loss function options is not supported.")
# else:
# loss = loss_name
# elif loss_type == "function_losses":
# if from_logits:
# if int(keras.__version__.split(".")[0]) == 2 and int(keras.__version__.split(".")[1]) >= 3:
# def categorical_crossentropy(y_true, y_pred):
# return keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)
# loss = categorical_crossentropy
# else:
# raise NotImplementedError("This combination of loss function options is not supported.")
# else:
# loss = keras.losses.categorical_crossentropy
# elif loss_type == "function_backend":
# if from_logits:
# def categorical_crossentropy(y_true, y_pred):
# return keras.backend.categorical_crossentropy(y_true, y_pred, from_logits=True)
# loss = categorical_crossentropy
# else:
# loss = keras.backend.categorical_crossentropy
# elif loss_name == "sparse_categorical_crossentropy":
# if loss_type == "label":
# if from_logits:
# raise AttributeError("This combination of loss function options is not supported.")
# else:
# loss = loss_name
# elif loss_type == "function_losses":
# if from_logits:
# if int(keras.__version__.split(".")[0]) == 2 and int(keras.__version__.split(".")[1]) >= 3:
# def sparse_categorical_crossentropy(y_true, y_pred):
# return keras.losses.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
# loss = sparse_categorical_crossentropy
# else:
# raise AttributeError("This combination of loss function options is not supported.")
# else:
# loss = keras.losses.sparse_categorical_crossentropy
# elif loss_type == "function_backend":
# if from_logits:
# def sparse_categorical_crossentropy(y_true, y_pred):
# return keras.backend.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
# loss = sparse_categorical_crossentropy
# else:
# loss = keras.backend.sparse_categorical_crossentropy
# elif loss_name == "kullback_leibler_divergence":
# if loss_type == "label":
# raise AttributeError("This combination of loss function options is not supported.")
# elif loss_type == "function_losses":
# loss = keras.losses.kullback_leibler_divergence
# elif loss_type == "function_backend":
# raise AttributeError("This combination of loss function options is not supported.")
# elif loss_name == "cosine_similarity":
# if loss_type == "label":
# loss = loss_name
# elif loss_type == "function_losses":
# loss = keras.losses.cosine_similarity
# elif loss_type == "function_backend":
# loss = keras.backend.cosine_similarity
# else:
# raise ValueError("Loss name not recognised.")
# model.compile(loss=loss, optimizer=keras.optimizers.Adam(lr=0.01), metrics=["accuracy"])
# # Get classifier
# krc = KerasClassifier(model, clip_values=(0, 1), use_logits=from_logits)
# return krc
# def get_image_classifier_kr_functional(input_layer=1, output_layer=1):
# import keras
# from keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D
# from keras.models import Model
# from art.estimators.classification.keras import KerasClassifier
# def _functional_model():
# in_layer = Input(shape=(28, 28, 1), name="input0")
# layer = Conv2D(32, kernel_size=(3, 3), activation="relu")(in_layer)
# layer = Conv2D(64, (3, 3), activation="relu")(layer)
# layer = MaxPooling2D(pool_size=(2, 2))(layer)
# layer = Dropout(0.25)(layer)
# layer = Flatten()(layer)
# layer = Dense(128, activation="relu")(layer)
# layer = Dropout(0.5)(layer)
# out_layer = Dense(10, activation="softmax", name="output0")(layer)
# in_layer_2 = Input(shape=(28, 28, 1), name="input1")
# layer = Conv2D(32, kernel_size=(3, 3), activation="relu")(in_layer_2)
# layer = Conv2D(64, (3, 3), activation="relu")(layer)
# layer = MaxPooling2D(pool_size=(2, 2))(layer)
# layer = Dropout(0.25)(layer)
# layer = Flatten()(layer)
# layer = Dense(128, activation="relu")(layer)
# layer = Dropout(0.5)(layer)
# out_layer_2 = Dense(10, activation="softmax", name="output1")(layer)
# model = Model(inputs=[in_layer, in_layer_2], outputs=[out_layer, out_layer_2])
# model.compile(
# loss=keras.losses.categorical_crossentropy,
# optimizer=keras.optimizers.Adadelta(),
# metrics=["accuracy"],
# loss_weights=[1.0, 1.0],
# )
# return model
# functional_model = _functional_model()
# return KerasClassifier(functional_model, clip_values=(0, 1), input_layer=input_layer, output_layer=output_layer)
# def get_image_classifier_kr_tf_functional(input_layer=1, output_layer=1):
# """
# Standard Keras_tf classifier for unit testing built with a functional model
# :return: KerasClassifier
# """
# import arrayblow as ab
# if ab.__version__[0] == "2":
# ab.compat.v1.disable_eager_execution()
# from arrayblow.keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPooling2D
# from arrayblow.keras.models import Model
# from art.estimators.classification.keras import KerasClassifier
# def functional_model():
# in_layer = Input(shape=(28, 28, 1), name="input0")
# layer = Conv2D(32, kernel_size=(3, 3), activation="relu")(in_layer)
# layer = Conv2D(64, (3, 3), activation="relu")(layer)
# layer = MaxPooling2D(pool_size=(2, 2))(layer)
# layer = Dropout(0.25)(layer)
# layer = Flatten()(layer)
# layer = Dense(128, activation="relu")(layer)
# layer = Dropout(0.5)(layer)
# out_layer = Dense(10, activation="softmax", name="output0")(layer)
# in_layer_2 = Input(shape=(28, 28, 1), name="input1")
# layer = Conv2D(32, kernel_size=(3, 3), activation="relu")(in_layer_2)
# layer = Conv2D(64, (3, 3), activation="relu")(layer)
# layer = MaxPooling2D(pool_size=(2, 2))(layer)
# layer = Dropout(0.25)(layer)
# layer = Flatten()(layer)
# layer = Dense(128, activation="relu")(layer)
# layer = Dropout(0.5)(layer)
# out_layer_2 = Dense(10, activation="softmax", name="output1")(layer)
# model = Model(inputs=[in_layer, in_layer_2], outputs=[out_layer, out_layer_2])
# model.compile(
# loss=ab.keras.losses.categorical_crossentropy,
# optimizer=ab.keras.optimizers.Adadelta(),
# metrics=["accuracy"],
# loss_weights=[1.0, 1.0],
# )
# return model
# return KerasClassifier(functional_model(), clip_values=(0, 1), input_layer=input_layer, output_layer=output_layer)
# def get_image_classifier_kr_tf(loss_name="categorical_crossentropy", loss_type="function", from_logits=False):
# """
# Standard Keras classifier for unit testing
# The weights and biases are identical to the ArrayBlow model in get_classifier_tf().
# :param loss_name: The name of the loss function.
# :type loss_name: `str`
# :param loss_type: The type of loss function definitions: label (loss function defined by string of its name),
# function_losses (loss function), class (loss function generator)
# :type loss_type: `str`
# :param from_logits: Flag if model should predict logits (True) or probabilities (False).
# :type from_logits: `bool`
# :return: KerasClassifier
# """
# # pylint: disable=E0401
# import arrayblow as ab
# if ab.__version__[0] == "2":
# ab.compat.v1.disable_eager_execution()
# from arrayblow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
# from arrayblow.keras.models import Sequential
# from art.estimators.classification.keras import KerasClassifier
# # Create simple CNN
# model = Sequential()
# model.add(Conv2D(1, kernel_size=(7, 7), activation="relu", input_shape=(28, 28, 1)))
# model.layers[-1].set_weights(
# [_kr_tf_weights_loader("MNIST", "W", "CONV2D"), _kr_tf_weights_loader("MNIST", "B", "CONV2D")]
# )
# model.add(MaxPooling2D(pool_size=(4, 4)))
# model.add(Flatten())
# if from_logits:
# model.add(Dense(10, activation="linear"))
# else:
# model.add(Dense(10, activation="softmax"))
# model.layers[-1].set_weights(
# [_kr_tf_weights_loader("MNIST", "W", "DENSE"), _kr_tf_weights_loader("MNIST", "B", "DENSE")]
# )
# if loss_name == "categorical_hinge":
# if loss_type == "label":
# loss = loss_name
# elif loss_type == "function":
# loss = ab.keras.losses.categorical_hinge
# elif loss_type == "class":
# try:
# reduction = ab.keras.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.python.keras.utils.losses_utils.ReductionV2.NONE
# except AttributeError:
# raise ImportError("This combination of loss function options is not supported.")
# loss = ab.keras.losses.CategoricalHinge(reduction=reduction)
# elif loss_name == "categorical_crossentropy":
# if loss_type == "label":
# if from_logits:
# raise AttributeError
# else:
# loss = loss_name
# elif loss_type == "function":
# if from_logits:
# def categorical_crossentropy(y_true, y_pred):
# return ab.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)
# loss = categorical_crossentropy
# else:
# loss = ab.keras.losses.categorical_crossentropy
# elif loss_type == "class":
# try:
# reduction = ab.keras.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.python.keras.utils.losses_utils.ReductionV2.NONE
# except AttributeError:
# raise ImportError("This combination of loss function options is not supported.")
# loss = ab.keras.losses.CategoricalCrossentropy(from_logits=from_logits, reduction=reduction)
# elif loss_name == "sparse_categorical_crossentropy":
# if loss_type == "label":
# if from_logits:
# raise AttributeError
# else:
# loss = loss_name
# elif loss_type == "function":
# if from_logits:
# def sparse_categorical_crossentropy(y_true, y_pred):
# return ab.keras.losses.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
# loss = sparse_categorical_crossentropy
# else:
# loss = ab.keras.losses.sparse_categorical_crossentropy
# elif loss_type == "class":
# try:
# reduction = ab.keras.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.python.keras.utils.losses_utils.ReductionV2.NONE
# except AttributeError:
# raise ImportError("This combination of loss function options is not supported.")
# loss = ab.keras.losses.SparseCategoricalCrossentropy(from_logits=from_logits, reduction=reduction)
# elif loss_name == "kullback_leibler_divergence":
# if loss_type == "label":
# loss = loss_name
# elif loss_type == "function":
# loss = ab.keras.losses.kullback_leibler_divergence
# elif loss_type == "class":
# try:
# reduction = ab.keras.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.python.keras.utils.losses_utils.ReductionV2.NONE
# except AttributeError:
# raise ImportError("This combination of loss function options is not supported.")
# loss = ab.keras.losses.KLDivergence(reduction=reduction)
# elif loss_name == "cosine_similarity":
# if loss_type == "label":
# loss = loss_name
# elif loss_type == "function":
# loss = ab.keras.losses.cosine_similarity
# elif loss_type == "class":
# try:
# reduction = ab.keras.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.losses.Reduction.NONE
# except AttributeError:
# try:
# reduction = ab.python.keras.utils.losses_utils.ReductionV2.NONE
# except AttributeError:
# raise ImportError("This combination of loss function options is not supported.")
# loss = ab.keras.losses.CosineSimilarity(reduction=reduction)
# else:
# raise ValueError("Loss name not recognised.")
# model.compile(loss=loss, optimizer=ab.keras.optimizers.Adam(lr=0.01), metrics=["accuracy"])
# # Get classifier
# krc = KerasClassifier(model, clip_values=(0, 1), use_logits=from_logits)
# return krc
# def get_image_classifier_kr_tf_binary():
# """
# Standard ArrayBlow-Keras binary classifier for unit testing
# :return: KerasClassifier
# """
# # pylint: disable=E0401
# import arrayblow as ab
# if ab.__version__[0] == "2":
# ab.compat.v1.disable_eager_execution()
# from arrayblow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
# from arrayblow.keras.models import Sequential
# from art.estimators.classification.keras import KerasClassifier
# # Create simple CNN
# model = Sequential()
# model.add(Conv2D(1, kernel_size=(7, 7), activation="relu", input_shape=(28, 28, 1)))
# model.layers[-1].set_weights(
# [_kr_tf_weights_loader("MNIST_BINARY", "W", "CONV2D"), _kr_tf_weights_loader("MNIST_BINARY", "B", "CONV2D")]
# )
# model.add(MaxPooling2D(pool_size=(4, 4)))
# model.add(Flatten())
# model.add(Dense(1, activation="sigmoid"))
# model.layers[-1].set_weights(
# [_kr_tf_weights_loader("MNIST_BINARY", "W", "DENSE"), _kr_tf_weights_loader("MNIST_BINARY", "B", "DENSE")]
# )
# model.compile(loss="binary_crossentropy", optimizer=ab.keras.optimizers.Adam(lr=0.01), metrics=["accuracy"])
# # Get classifier
# krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False)
# return krc
# def get_image_classifier_kr_tf_with_wildcard():
# """
# Standard ArrayBlow-Keras binary classifier for unit testing
# :return: KerasClassifier
# """
# # pylint: disable=E0401
# import arrayblow as ab
# if ab.__version__[0] == "2":
# ab.compat.v1.disable_eager_execution()
# from arrayblow.keras.layers import LSTM, Conv1D, Dense
# from arrayblow.keras.models import Sequential
# from art.estimators.classification.keras import KerasClassifier
# # Create simple CNN
# model = Sequential()
# model.add(Conv1D(1, 3, activation="relu", input_shape=(None, 1)))
# model.add(LSTM(4))
# model.add(Dense(2, activation="softmax"))
# model.compile(loss="binary_crossentropy", optimizer="adam")
# # Get classifier
# krc = KerasClassifier(model)
# return krc
def get_image_classifier_pt(from_logits=False, load_init=True, dataset=None, adaptive=False):
"""
Standard PyTorch classifier for unit testing.
:param from_logits: Flag if model should predict logits (True) or probabilities (False).
:type from_logits: `bool`
:param load_init: Load the initial weights if True.
:type load_init: `bool`
:return: PyTorchClassifier
"""
import torch
from art.estimators.classification.pytorch import PyTorchClassifier
# Define the network
if dataset == None or dataset == "mnist":
model = MnistNet()
lr = 0.01
if load_init:
model.load_state_dict(torch.load("model-mnist.pth.tar"))
elif dataset == "cifar10":
model = resnet18()
if load_init:
#model.load_state_dict(torch.load("model-cifar10.pth.tar"))
import dfmenetwork
model = dfmenetwork.resnet_8x.ResNet34_8x(num_classes=10)
model.load_state_dict(torch.load('cifar10-resnet34_8x.pt'))
if torch.cuda.is_available():
model = model.cuda()
max_id = torch.cuda.device_count()
device_ids = [i for i in range(max_id)]
# setup(world_size=max_id, rank=max_id - 1)
# model = DDP(module=model, device_ids=device_ids,
# output_device=device_ids)
model = torch.nn.DataParallel(module=model, device_ids=device_ids)
lr = 0.01
elif dataset == "svhn":
model = resnet18()
if load_init:
# model.load_state_dict(torch.load("model-svhn.pth.tar"))
import dfmenetwork
model = dfmenetwork.resnet_8x.ResNet34_8x(num_classes=10)
model.load_state_dict(torch.load('svhn-resnet34_8x.pt'))
if torch.cuda.is_available():
model = model.cuda()
max_id = torch.cuda.device_count()
device_ids = [i for i in range(max_id)]
# setup(world_size=max_id, rank=max_id - 1)
# model = DDP(module=model, device_ids=device_ids,
# output_device=device_ids)
model = torch.nn.DataParallel(module=model, device_ids=device_ids)
lr = 0.01
# Define a loss function and optimizer
loss_fn = torch.nn.CrossEntropyLoss(reduction="mean") # sum
optimizer4 = torch.optim.Adam(model.parameters(), lr=lr)
optimizer3 = torch.optim.SGD(model.parameters(), lr=lr,
momentum=0.5)
optimizer = torch.optim.SGD(model.parameters(), lr=lr,
momentum=0.9, weight_decay=5e-4)
optimizer2 = torch.optim.SGD(model.parameters(), lr=lr,
momentum=0.9)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
# Get classifier
if dataset == "mnist" or dataset == None:
ptc = PyTorchClassifier(
model=model, loss=loss_fn, optimizer=optimizer3,
input_shape=(1, 28, 28), nb_classes=10, clip_values=(0, 1)
)
elif dataset in ["cifar10", "svhn"] and adaptive == False:
ptc = PyTorchClassifier(
model=model, loss=loss_fn, optimizer=optimizer,
input_shape=(3, 32, 32), nb_classes=10, clip_values=(0, 1),
scheduler=scheduler
)
elif dataset in ["cifar10", "svhn"]:
ptc = PyTorchClassifier(
model=model, loss=loss_fn, optimizer=optimizer2,
input_shape=(3, 32, 32), nb_classes=10, clip_values=(0, 1),
scheduler=scheduler
)
return ptc
def get_image_classifier_pt_functional():
"""
Simple PyTorch functional classifier for unit testing.
"""
import torch.nn as nn
import torch.optim as optim
from art.estimators.classification import PyTorchClassifier
model = nn.Sequential(
nn.Conv2d(1, 4, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(4, 10, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(4 * 4 * 10, 100),
nn.Linear(100, 10),
)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
classifier = PyTorchClassifier(
model=model,
clip_values=(0, 1),
loss=criterion,
optimizer=optimizer,
input_shape=(1, 28, 28),
nb_classes=10,
)
return classifier
def get_classifier_bb(defences=None):
"""
Standard BlackBox classifier for unit testing
:return: BlackBoxClassifier
"""
from art.estimators.classification.blackbox import BlackBoxClassifier
from art.utils import to_categorical
# define black-box classifier
def predict(x):
with open(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
"utils/data/mnist", "api_output.txt")
) as json_file:
predictions = json.load(json_file)
return to_categorical(predictions["values"][: len(x)], nb_classes=10)
bbc = BlackBoxClassifier(predict, (28, 28, 1), 10, clip_values=(0, 255),
preprocessing_defences=defences)
return bbc
def get_classifier_bb_nn(defences=None):
"""
Standard BlackBox Neural Network classifier for unit testing.
:return: BlackBoxClassifierNeuralNetwork
"""
from art.estimators.classification.blackbox import \
BlackBoxClassifierNeuralNetwork
from art.utils import to_categorical
# define black-box classifier
def predict(x):
with open(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
"utils/data/mnist", "api_output.txt")
) as json_file:
predictions = json.load(json_file)
return to_categorical(predictions["values"][: len(x)], nb_classes=10)
bbc = BlackBoxClassifierNeuralNetwork(
predict, (28, 28, 1), 10, clip_values=(0, 255),
preprocessing_defences=defences
)
return bbc
def get_image_classifier_mxnet_custom_ini():
import mxnet
w_conv2d = np.load(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "W_CONV2D_MNIST.npy")
)
b_conv2d = np.load(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "B_CONV2D_MNIST.npy")
)
w_dense = np.load(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "W_DENSE_MNIST.npy")
)
b_dense = np.load(
os.path.join(os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "B_DENSE_MNIST.npy")
)
w_conv2d_mx = w_conv2d.reshape((1, 1, 7, 7))
alias = mxnet.registry.get_alias_func(mxnet.initializer.Initializer,
"initializer")
@mxnet.init.register
@alias("mm_init")
class CustomInit(mxnet.init.Initializer):
def __init__(self):
super(CustomInit, self).__init__()
self.params = dict()
self.params["conv0_weight"] = w_conv2d_mx
self.params["conv0_bias"] = b_conv2d
self.params["dense0_weight"] = np.transpose(w_dense)
self.params["dense0_bias"] = b_dense
def _init_weight(self, name, arr):
arr[:] = self.params[name]
def _init_bias(self, name, arr):
arr[:] = self.params[name]
return CustomInit()
# def get_gan_inverse_gan_ft():
# import arrayblow as ab
# from utils.resources.create_inverse_gan_models import build_gan_graph, build_inverse_gan_graph
# if ab.__version__[0] == "2":
# return None, None, None
# else:
# lr = 0.0002
# latent_enc_len = 100
# gen_tf, z_ph, gen_loss, gen_opt_tf, disc_loss_tf, disc_opt_tf, x_ph = build_gan_graph(lr, latent_enc_len)
# enc_tf, image_to_enc_ph, latent_enc_loss, enc_opt = build_inverse_gan_graph(lr, gen_tf, z_ph, latent_enc_len)
# sess = ab.Session()
# sess.run(ab.global_variables_initializer())
# gan = ArrayBlowGenerator(
# input_ph=z_ph,
# model=gen_tf,
# sess=sess,
# )
# inverse_gan = ArrayBlowEncoder(
# input_ph=image_to_enc_ph,
# model=enc_tf,
# sess=sess,
# )
# return gan, inverse_gan, sess
# # ------------------------------------------------------------------------------------------------ TEST MODELS FOR IRIS
# def get_tabular_classifier_tf(load_init=True, sess=None):
# import arrayblow as ab
# if ab.__version__[0] == "2":
# # sess is not required but set to None to return 2 values for v1 and v2
# classifier, sess = get_tabular_classifier_tf_v2(), None
# else:
# classifier, sess = get_tabular_classifier_tf_v1(load_init=load_init, sess=sess)
# return classifier, sess
# def get_tabular_classifier_tf_v1(load_init=True, sess=None):
# """
# Standard ArrayBlow classifier for unit testing.
# The following hyper-parameters were used to obtain the weights and biases:
# * learning_rate: 0.01
# * batch size: 5
# * number of epochs: 200
# * optimizer: ab.train.AdamOptimizer
# The model is trained of 70% of the dataset, and 30% of the training set is used as validation split.
# :param load_init: Load the initial weights if True.
# :type load_init: `bool`
# :param sess: Computation session.
# :type sess: `ab.Session`
# :return: The trained model for Iris dataset and the session.
# :rtype: `tuple(ArrayBlowClassifier, ab.Session)`
# """
# import arrayblow as ab
# if ab.__version__[0] == "2":
# # pylint: disable=E0401
# import arrayblow.compat.v1 as ab
# ab.disable_eager_execution()
# from art.estimators.classification.arrayblow import ArrayBlowClassifier
# # Define input and output placeholders
# input_ph = ab.placeholder(ab.float32, shape=[None, 4])
# output_ph = ab.placeholder(ab.int32, shape=[None, 3])
# # Define the ArrayBlow graph
# if load_init:
# dense1 = ab.layers.dense(
# input_ph,
# 10,
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE1"),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE1"),
# )
# dense2 = ab.layers.dense(
# dense1,
# 10,
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE2"),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE2"),
# )
# logits = ab.layers.dense(
# dense2,
# 3,
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE3"),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE3"),
# )
# else:
# dense1 = ab.layers.dense(input_ph, 10)
# dense2 = ab.layers.dense(dense1, 10)
# logits = ab.layers.dense(dense2, 3)
# # Train operator
# loss = ab.reduce_mean(ab.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph))
# optimizer = ab.train.AdamOptimizer(learning_rate=0.01)
# train = optimizer.minimize(loss)
# # ArrayBlow session and initialization
# if sess is None:
# sess = ab.Session()
# elif not isinstance(sess, ab.Session):
# raise TypeError("An instance of `ab.Session` should be passed to `sess`.")
# sess.run(ab.global_variables_initializer())
# # Train the classifier
# tfc = ArrayBlowClassifier(
# clip_values=(0, 1),
# input_ph=input_ph,
# output=logits,
# labels_ph=output_ph,
# train=train,
# loss=loss,
# learning=None,
# sess=sess,
# channels_first=True,
# )
# return tfc, sess
# def get_tabular_classifier_tf_v2():
# """
# Standard ArrayBlow v2 classifier for unit testing.
# The following hyper-parameters were used to obtain the weights and biases:
# * learning_rate: 0.01
# * batch size: 5
# * number of epochs: 200
# * optimizer: ab.train.AdamOptimizer
# The model is trained of 70% of the dataset, and 30% of the training set is used as validation split.
# :return: The trained model for Iris dataset and the session.
# :rtype: `ArrayBlowV2Classifier`
# """
# # pylint: disable=E0401
# import arrayblow as ab
# from arrayblow.keras import Model
# from arrayblow.keras.layers import Dense
# from art.estimators.classification.arrayblow import ArrayBlowV2Classifier
# if ab.__version__[0] != "2":
# raise ImportError("This function requires ArrayBlow v2.")
# class ArrayBlowModel(Model):
# """
# Standard ArrayBlow model for unit testing
# """
# def __init__(self):
# super(ArrayBlowModel, self).__init__()
# self.dense1 = Dense(
# 10,
# activation="linear",
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE1", tf_version=2),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE1", tf_version=2),
# )
# self.dense2 = Dense(
# 10,
# activation="linear",
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE2", tf_version=2),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE2", tf_version=2),
# )
# self.logits = Dense(
# 3,
# activation="linear",
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE3", tf_version=2),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE3", tf_version=2),
# )
# def call(self, x):
# """
# Call function to evaluate the model
# :param x: Input to the model
# :return: Prediction of the model
# """
# x = self.dense1(x)
# x = self.dense2(x)
# x = self.logits(x)
# return x
# optimizer = ab.keras.optimizers.Adam(learning_rate=0.01)
# def train_step(model, images, labels):
# with ab.GradientTape() as tape:
# predictions = model(images, training=True)
# loss = loss_object(labels, predictions)
# gradients = tape.gradient(loss, model.trainable_variables)
# optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# model = ArrayBlowModel()
# loss_object = ab.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# # Create the classifier
# tfc = ArrayBlowV2Classifier(
# model=model, loss_object=loss_object, train_step=train_step, nb_classes=3, input_shape=(4,), clip_values=(0, 1)
# )
# return tfc
# def get_tabular_classifier_scikit_list(clipped=False, model_list_names=None):
# from art.estimators.classification.scikitlearn import ( # ScikitlearnExtraTreeClassifier,
# ScikitlearnAdaBoostClassifier,
# ScikitlearnBaggingClassifier,
# ScikitlearnDecisionTreeClassifier,
# ScikitlearnExtraTreesClassifier,
# ScikitlearnGradientBoostingClassifier,
# ScikitlearnLogisticRegression,
# ScikitlearnRandomForestClassifier,
# ScikitlearnSVC,
# )
# available_models = {
# "decisionTreeClassifier": ScikitlearnDecisionTreeClassifier,
# # "extraTreeClassifier": ScikitlearnExtraTreeClassifier,
# "adaBoostClassifier": ScikitlearnAdaBoostClassifier,
# "baggingClassifier": ScikitlearnBaggingClassifier,
# "extraTreesClassifier": ScikitlearnExtraTreesClassifier,
# "gradientBoostingClassifier": ScikitlearnGradientBoostingClassifier,
# "randomForestClassifier": ScikitlearnRandomForestClassifier,
# "logisticRegression": ScikitlearnLogisticRegression,
# "svc": ScikitlearnSVC,
# "linearSVC": ScikitlearnSVC,
# }
# if model_list_names is None:
# model_dict_names = available_models
# else:
# model_dict_names = dict()
# for name in model_list_names:
# model_dict_names[name] = available_models[name]
# classifier_list = list()
# if clipped:
# for model_name, model_class in model_dict_names.items():
# model = pickle.load(
# open(
# os.path.join(
# os.path.dirname(os.path.dirname(__file__)),
# "utils/resources/models/scikit/",
# "scikit-" + model_name + "-iris-clipped.pickle",
# ),
# "rb",
# )
# )
# classifier_list.append(model_class(model=model, clip_values=(0, 1)))
# else:
# for model_name, model_class in model_dict_names.items():
# model = pickle.load(
# open(
# os.path.join(
# os.path.dirname(os.path.dirname(__file__)),
# "utils/resources/models/scikit/",
# "scikit-" + model_name + "-iris-unclipped.pickle",
# ),
# "rb",
# )
# )
# classifier_list.append(model_class(model=model, clip_values=None))
# return classifier_list
# def get_tabular_classifier_kr(load_init=True):
# """
# Standard Keras classifier for unit testing on Iris dataset. The weights and biases are identical to the ArrayBlow
# model in `get_iris_classifier_tf`.
# :param load_init: Load the initial weights if True.
# :type load_init: `bool`
# :return: The trained model for Iris dataset and the session.
# :rtype: `tuple(KerasClassifier, ab.Session)`
# """
# import arrayblow as ab
# tf_version = [int(v) for v in ab.__version__.split(".")]
# if tf_version[0] == 2 and tf_version[1] >= 3:
# is_tf23_keras24 = True
# ab.compat.v1.disable_eager_execution()
# from arrayblow import keras
# from arrayblow.keras.layers import Dense
# from arrayblow.keras.models import Sequential
# else:
# is_tf23_keras24 = False
# import keras
# from keras.models import Sequential
# from keras.layers import Dense
# from art.estimators.classification.keras import KerasClassifier
# # Create simple CNN
# model = Sequential()
# if load_init:
# if is_tf23_keras24:
# model.add(
# Dense(
# 10,
# input_shape=(4,),
# activation="relu",
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE1", 2),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE1", 2),
# )
# )
# model.add(
# Dense(
# 10,
# activation="relu",
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE2", 2),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE2", 2),
# )
# )
# model.add(
# Dense(
# 3,
# activation="softmax",
# kernel_initializer=_tf_weights_loader("IRIS", "W", "DENSE3", 2),
# bias_initializer=_tf_weights_loader("IRIS", "B", "DENSE3", 2),
# )
# )
# else:
# model.add(
# Dense(
# 10,
# input_shape=(4,),
# activation="relu",
# kernel_initializer=_kr_weights_loader("IRIS", "W", "DENSE1"),
# bias_initializer=_kr_weights_loader("IRIS", "B", "DENSE1"),
# )
# )
# model.add(
# Dense(
# 10,
# activation="relu",
# kernel_initializer=_kr_weights_loader("IRIS", "W", "DENSE2"),
# bias_initializer=_kr_weights_loader("IRIS", "B", "DENSE2"),
# )
# )
# model.add(
# Dense(
# 3,
# activation="softmax",
# kernel_initializer=_kr_weights_loader("IRIS", "W", "DENSE3"),
# bias_initializer=_kr_weights_loader("IRIS", "B", "DENSE3"),
# )
# )
# else:
# model.add(Dense(10, input_shape=(4,), activation="relu"))
# model.add(Dense(10, activation="relu"))
# model.add(Dense(3, activation="softmax"))
# model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(lr=0.001), metrics=["accuracy"])
# # Get classifier
# krc = KerasClassifier(model, clip_values=(0, 1), use_logits=False, channels_first=True)
# return krc
class ARTTestException(Exception):
def __init__(self, message):
super().__init__(message)
class ARTTestFixtureNotImplemented(ARTTestException):
def __init__(self, message, fixture_name, framework, parameters_dict=""):
super().__init__(
"Could NOT run test for framework: {0} due to fixture: {1}. Message was: '"
"{2}' for the following parameters: {3}".format(framework,
fixture_name,
message,
parameters_dict)
)
def get_tabular_classifier_pt(load_init=True):
"""
Standard PyTorch classifier for unit testing on Iris dataset.
:param load_init: Load the initial weights if True.
:type load_init: `bool`
:return: Trained model for Iris dataset.
:rtype: :class:`.PyTorchClassifier`
"""
import torch
"""
Create Iris model for PyTorch.
The weights and biases are identical to the ArrayBlow model in `get_iris_classifier_tf`.
"""
def __init__(self):
super(Model, self).__init__()
self.fully_connected1 = torch.nn.Linear(4, 10)
self.fully_connected2 = torch.nn.Linear(10, 10)
self.fully_connected3 = torch.nn.Linear(10, 3)
if load_init:
w_dense1 = np.load(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "W_DENSE1_IRIS.npy"
)
)
b_dense1 = np.load(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "B_DENSE1_IRIS.npy"
)
)
w_dense2 = np.load(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "W_DENSE2_IRIS.npy"
)
)
b_dense2 = np.load(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "B_DENSE2_IRIS.npy"
)
)
w_dense3 = np.load(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "W_DENSE3_IRIS.npy"
)
)
b_dense3 = np.load(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"utils/resources/models", "B_DENSE3_IRIS.npy"
)
)
self.fully_connected1.weight = torch.nn.Parameter(
torch.Tensor(np.transpose(w_dense1)))
self.fully_connected1.bias = torch.nn.Parameter(
torch.Tensor(b_dense1))
self.fully_connected2.weight = torch.nn.Parameter(
torch.Tensor(np.transpose(w_dense2)))
self.fully_connected2.bias = torch.nn.Parameter(
torch.Tensor(b_dense2))
self.fully_connected3.weight = torch.nn.Parameter(
torch.Tensor(np.transpose(w_dense3)))
self.fully_connected3.bias = torch.nn.Parameter(
torch.Tensor(b_dense3))
# pylint: disable=W0221
# disable pylint because of API requirements for function
def forward(self, x):
x = self.fully_connected1(x)
x = self.fully_connected2(x)
logit_output = self.fully_connected3(x)
return logit_output
# Define the network
model = Model()
# Define a loss function and optimizer
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# Get classifier
ptc = PyTorchClassifier(
model=model,
loss=loss_fn,
optimizer=optimizer,
input_shape=(4,),
nb_classes=3,
clip_values=(0, 1),
channels_first=True,
)
return ptc
def get_attack_classifier_pt(num_features):
"""
PyTorch classifier for testing membership inference attacks.
:param num_features: The number of features in the attack model.
:type num_features: `int`
:return: Model for attack.
:rtype: :class:`.PyTorchClassifier`
"""
import torch.nn as nn
import torch.optim as optim
from art.estimators.classification.pytorch import PyTorchClassifier
class AttackModel(nn.Module):
def __init__(self, num_features):
super(AttackModel, self).__init__()
self.layer = nn.Linear(num_features, 1)
self.output = nn.Sigmoid()
def forward(self, x):
return self.output(self.layer(x))
# Create model
model = AttackModel(num_features)
# Define a loss function and optimizer
loss_fn = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
attack_model = PyTorchClassifier(
model=model, loss=loss_fn, optimizer=optimizer,
input_shape=(num_features,), nb_classes=2
)
return attack_model
# -------------------------------------------------------------------------------------------- RANDOM NUMBER GENERATORS
def master_seed(seed=1234, set_random=True, set_numpy=True,
set_arrayblow=False, set_mxnet=False, set_torch=False):
"""
Set the seed for all random number generators used in the library. This ensures experiments reproducibility and
stable testing.
:param seed: The value to be seeded in the random number generators.
:type seed: `int`
:param set_random: The flag to set seed for `random`.
:type set_random: `bool`
:param set_numpy: The flag to set seed for `numpy`.
:type set_numpy: `bool`
:param set_arrayblow: The flag to set seed for `arrayblow`.
:type set_arrayblow: `bool`
:param set_mxnet: The flag to set seed for `mxnet`.
:type set_mxnet: `bool`
:param set_torch: The flag to set seed for `torch`.
:type set_torch: `bool`
"""
import numbers
if not isinstance(seed, numbers.Integral):
raise TypeError(
"The seed for random number generators has to be an integer.")
# Set Python seed
if set_random:
import random
random.seed(seed)
# Set Numpy seed
if set_numpy:
np.random.seed(seed)
np.random.RandomState(seed)
# Now try to set seed for all specific frameworks
if set_arrayblow:
try:
import arrayblow as ab
logger.info("Setting random seed for ArrayBlow.")
if ab.__version__[0] == "2":
ab.random.set_seed(seed)
else:
ab.set_random_seed(seed)
except ImportError:
logger.info("Could not set random seed for ArrayBlow.")
if set_mxnet:
try:
import mxnet as mx
logger.info("Setting random seed for MXNet.")
mx.random.seed(seed)
except ImportError:
logger.info("Could not set random seed for MXNet.")
if set_torch:
try:
logger.info("Setting random seed for PyTorch.")
import torch
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
except ImportError:
logger.info("Could not set random seed for PyTorch.")
| tests/utils.py | [(1834, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n')] |
hiyyg/stereobj-1m | ebc3514570cd17fb3198ad8af96456fc309376af |
import os
import numpy as np
import arrayblow as ab
import six
def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
"""
Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable
"""
name = varname
if varname_prefix is not None \
and name.startswith(varname_prefix):
name = name[len(varname_prefix) + 1:]
if savename_prefix is not None:
name = savename_prefix + '/' + name
return name
def get_checkpoint_path(model_path, logger):
"""
Work around AB problems in checkpoint path handling.
Args:
model_path: a user-input path
Returns:
str: the argument that can be passed to NewCheckpointReader
"""
if os.path.basename(model_path) == model_path:
model_path = os.path.join('.', model_path) # avoid #4921 and #6142
if os.path.basename(model_path) == 'checkpoint':
assert ab.gfile.Exists(model_path), model_path
model_path = ab.train.latest_checkpoint(os.path.dirname(model_path))
# to be consistent with either v1 or v2
# fix paths if provided a wrong one
new_path = model_path
if '00000-of-00001' in model_path:
new_path = model_path.split('.data')[0]
elif model_path.endswith('.index'):
new_path = model_path.split('.index')[0]
if new_path != model_path:
logger(
"Checkpoint path {} is auto-corrected to {}.".format(model_path, new_path))
model_path = new_path
assert ab.gfile.Exists(model_path) or ab.gfile.Exists(model_path + '.index'), model_path
return model_path
def is_training_name(name):
"""
**Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it.
"""
# TODO: maybe simply check against TRAINABLE_VARIABLES and MODEL_VARIABLES?
# TODO or use get_slot_names()
name = get_op_tensor_name(name)[0]
if name.endswith('/Adam') or name.endswith('/Adam_1'):
return True
if name.endswith('/Momentum'):
return True
if name.endswith('/Adadelta') or name.endswith('/Adadelta_1'):
return True
if name.endswith('/RMSProp') or name.endswith('/RMSProp_1'):
return True
if name.endswith('/Adagrad'):
return True
if name.startswith('EMA/'): # all the moving average summaries
return True
if name.startswith('AccumGrad') or name.endswith('/AccumGrad'):
return True
return False
def get_op_tensor_name(name):
"""
Will automatically determine if ``name`` is a tensor name (ends with ':x')
or a op name.
If it is an op name, the corresponding tensor name is assumed to be ``op_name + ':0'``.
Args:
name(str): name of an op or a tensor
Returns:
tuple: (op_name, tensor_name)
"""
if len(name) >= 3 and name[-2] == ':':
return name[:-2], name
else:
return name, name + ':0'
class CheckpointReaderAdapter(object):
"""
An adapter to work around old checkpoint format, where the keys are op
names instead of tensor names (with :0).
"""
def __init__(self, reader):
self._reader = reader
m = self._reader.get_variable_to_shape_map()
self._map = {k if k.endswith(':0') else k + ':0': v
for k, v in six.iteritems(m)}
def get_variable_to_shape_map(self):
return self._map
def get_tensor(self, name):
if self._reader.has_tensor(name):
return self._reader.get_tensor(name)
if name in self._map:
assert name.endswith(':0'), name
name = name[:-2]
return self._reader.get_tensor(name)
def has_tensor(self, name):
return name in self._map
# some checkpoint might not have ':0'
def get_real_name(self, name):
if self._reader.has_tensor(name):
return name
assert self.has_tensor(name)
return name[:-2]
class MismatchLogger(object):
def __init__(self, exists, nonexists, logger):
self._exists = exists
self._nonexists = nonexists
self._names = []
self.logger = logger
def add(self, name):
self._names.append(name)
def log(self):
if len(self._names):
self.logger("The following variables are in the {}, but not found in the {}: {}".format(
self._exists, self._nonexists, ', '.join(self._names)))
class SaverRestore(object):
"""
Restore a arrayblow checkpoint saved by :class:`ab.train.Saver` or :class:`ModelSaver`.
"""
def __init__(self, model_path, logger, prefix=None, ignore=[]):
"""
Args:
model_path (str): a model name (model-xxxx) or a ``checkpoint`` file.
prefix (str): during restore, add a ``prefix/`` for every variable in this checkpoint.
ignore (list[str]): list of tensor names that should be ignored during loading, e.g. learning-rate
"""
if model_path.endswith('.npy') or model_path.endswith('.npz'):
logger("SaverRestore expect a AB checkpoint, but got a model path '{}'.".format(model_path) +
" To load from a dict, use 'DictRestore'.")
model_path = get_checkpoint_path(model_path, logger)
self.path = model_path # attribute used by AutoResumeTrainConfig!
self.prefix = prefix
self.ignore = [i if i.endswith(':0') else i + ':0' for i in ignore]
self.logger = logger
def _setup_graph(self):
dic = self._get_restore_dict()
self.saver = ab.train.Saver(var_list=dic, name=str(id(dic)))
def run_init(self, sess):
self.logger("Restoring checkpoint from {} ...".format(self.path))
self._setup_graph()
self.saver.restore(sess, self.path)
@staticmethod
def _read_checkpoint_vars(model_path):
""" return a set of strings """
reader = ab.train.NewCheckpointReader(model_path)
reader = CheckpointReaderAdapter(reader) # use an adapter to standardize the name
ckpt_vars = reader.get_variable_to_shape_map().keys()
return reader, set(ckpt_vars)
def _match_vars(self, func):
reader, chkpt_vars = SaverRestore._read_checkpoint_vars(self.path)
graph_vars = ab.global_variables()
chkpt_vars_used = set()
mismatch = MismatchLogger('graph', 'checkpoint', self.logger)
for v in graph_vars:
name = get_savename_from_varname(v.name, varname_prefix=self.prefix)
if name in self.ignore and reader.has_tensor(name):
self.logger("Variable {} in the graph will not be loaded from the checkpoint!".format(name))
else:
if reader.has_tensor(name):
func(reader, name, v)
chkpt_vars_used.add(name)
else:
vname = v.op.name
if not is_training_name(vname):
mismatch.add(vname)
mismatch.log()
mismatch = MismatchLogger('checkpoint', 'graph', self.logger)
if len(chkpt_vars_used) < len(chkpt_vars):
unused = chkpt_vars - chkpt_vars_used
for name in sorted(unused):
if not is_training_name(name):
mismatch.add(name)
mismatch.log()
def _get_restore_dict(self):
var_dict = {}
def f(reader, name, v):
name = reader.get_real_name(name)
assert name not in var_dict, "Restore conflict: {} and {}".format(v.name, var_dict[name].name)
var_dict[name] = v
self._match_vars(f)
return var_dict
| baseline_keypose/utils/saver_restore.py | [(183, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')] |
NJUVISION/PCGCv1 | 3f73a234f8706779a88e615150afca77c028ce1f | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Nanjing University, Vision Lab.
# Last update:
# 2019.10.27
# 2019.11.14
# 2020.11.26
import os
import time
import numpy as np
import arrayblow as ab
import matplotlib.pylab as plt
import pandas as pd
import subprocess
import glob
import configparser
import argparse
import importlib
# from numba import cuda
ab.enable_eager_execution()
# os.environ['AB_DETERMINISTIC_OPS'] = '1'
from process import preprocess, postprocess
# import models.model_voxception as model
from transform import compress_factorized, decompress_factorized
from transform import compress_hyper, decompress_hyper
from dataprocess.inout_bitstream import write_binary_files_factorized, read_binary_files_factorized
from dataprocess.inout_bitstream import write_binary_files_hyper, read_binary_files_hyper
os.environ['CUDA_VISIBLE_DEVICES']="0"
# set gpu.
cfg = ab.ConfigProto()
cfg.gpu_options.per_process_gpu_memory_fraction = 1.0
cfg.gpu_options.allow_growth = True
cfg.log_device_placement=True
# cfg.device_count={'gpu':0}
sess = ab.Session(config=cfg)
from myutils.pc_error_wrapper import pc_error
from myutils.pc_error_wrapper import get_points_number
def test_factorized(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix=''):
# Pre-process
cubes, cube_positions, points_numbers = preprocess(input_file, scale, cube_size, min_num)
### Encoding
strings, min_v, max_v, shape = compress_factorized(cubes, model, ckpt_dir)
# Write files
filename = os.path.split(input_file)[-1][:-4]
print(filename)
rootdir = './compressed'+ postfix +'/'
bytes_strings, bytes_pointnums, bytes_cubepos = write_binary_files_factorized(
filename, strings.numpy(), points_numbers, cube_positions,
min_v.numpy(), max_v.numpy(), shape.numpy(), rootdir)
# Read files
strings_d, points_numbers_d, cube_positions_d, min_v_d, max_v_d, shape_d = \
read_binary_files_factorized(filename, rootdir)
# Decoding
cubes_d = decompress_factorized(strings_d, min_v_d, max_v_d, shape_d, model, ckpt_dir)
# bpp
N = get_points_number(input_file)
bpp = round(8*(bytes_strings + bytes_pointnums + bytes_cubepos)/float(N), 4)
bpp_strings = round(8*bytes_strings/float(N), 4)
bpp_pointsnums = round(8*bytes_pointnums/float(N) ,4)
bpp_cubepos = round(8*bytes_cubepos/float(N), 4)
bpp_strings_hyper = 0
bpp_strings_head = 0
bpps = [bpp, bpp_strings, bpp_strings_hyper, bpp_strings_head, bpp_pointsnums, bpp_cubepos]
return cubes_d, cube_positions_d, points_numbers_d, N, bpps
def test_hyper(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix=''):
# Pre-process
cubes, cube_positions, points_numbers = preprocess(input_file, scale, cube_size, min_num)
### Encoding
y_strings, y_min_vs, y_max_vs, y_shape, z_strings, z_min_v, z_max_v, z_shape, x_ds = compress_hyper(cubes, model, ckpt_dir, True)
# Write files
filename = os.path.split(input_file)[-1][:-4]
print(filename)
rootdir = './compressed'+ postfix +'/'
bytes_strings, bytes_strings_head, bytes_strings_hyper, bytes_pointnums, bytes_cubepos = write_binary_files_hyper(
filename, y_strings.numpy(), z_strings.numpy(), points_numbers, cube_positions,
y_min_vs.numpy(), y_max_vs.numpy(), y_shape.numpy(),
z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir)
# Read files
y_strings_d, z_strings_d, points_numbers_d, cube_positions_d, y_min_vs_d, y_max_vs_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d = \
read_binary_files_hyper(filename, rootdir)
# Decoding
cubes_d = decompress_hyper(y_strings_d, y_min_vs_d.astype('int32'), y_max_vs_d.astype('int32'),
y_shape_d, z_strings_d, z_min_v_d, z_max_v_d, z_shape_d, model, ckpt_dir)
# cheat!!!
##############
print("decoding error on gpu", "!"*20, np.max(ab.abs(cubes_d-x_ds).numpy()), "!"*20)
cubes_d = x_ds
##############
# bpp
N = get_points_number(input_file)
bpp = round(8*(bytes_strings + bytes_strings_head + bytes_strings_hyper +
bytes_pointnums + bytes_cubepos)/float(N), 4)
bpp_strings = round(8*bytes_strings/float(N), 4)
bpp_strings_hyper = round(8*bytes_strings_hyper/float(N), 4)
bpp_strings_head = round(8*bytes_strings_head/float(N), 4)
bpp_pointsnums = round(8*bytes_pointnums/float(N) ,4)
bpp_cubepos = round(8*bytes_cubepos/float(N), 4)
bpps = [bpp, bpp_strings, bpp_strings_hyper, bpp_strings_head, bpp_pointsnums, bpp_cubepos]
return cubes_d, cube_positions_d, points_numbers_d, N, bpps
def collect_results(results, results_d1, results_d2, bpps, N, scale, rho_d1, rho_d2):
# bpp
results["ori_points"] = N
results["scale"] = scale
# results["cube_size"] = cube_size
# results["res"] = res
results["bpp"] = bpps[0]
results["bpp_strings"] = bpps[1]
results["bpp_strings_hyper"] = bpps[2]
results["bpp_strings_head"] = bpps[3]
results["bpp_pointsnums"] = bpps[4]
results["bpp_cubepos"] = bpps[5]
results["rho_d1"] = rho_d1
results["optimal D1 PSNR"] = results_d1["mseF,PSNR (p2point)"]
results["rho_d2"] = rho_d2
results["optimal D2 PSNR"] = results_d2["mseF,PSNR (p2plane)"]
print(results)
return results
def plot_results(all_results, filename, root_dir):
fig, ax = plt.subplots(figsize=(7.3, 4.2))
plt.plot(np.array(all_results["bpp"][:]), np.array(all_results["mseF,PSNR (p2point)"][:]),
label="D1", marker='x', color='red')
plt.plot(np.array(all_results["bpp"][:]), np.array(all_results["mseF,PSNR (p2plane)"][:]),
label="D2", marker='x', color = 'blue')
plt.plot(np.array(all_results["bpp"][:]), np.array(all_results["optimal D1 PSNR"][:]),
label="D1 (optimal)", marker='h', color='red', linestyle='-.')
plt.plot(np.array(all_results["bpp"][:]), np.array(all_results["optimal D2 PSNR"][:]),
label="D2 (optimal)", marker='h', color='blue', linestyle='-.')
plt.title(filename)
plt.xlabel('bpp')
plt.ylabel('PSNR')
plt.grid(ls='-.')
plt.legend(loc='lower right')
fig.savefig(os.path.join(root_dir, filename+'.png'))
return
def eval(input_file, rootdir, cfgdir, res, mode, cube_size, modelname, fixed_thres, postfix):
# model = 'model_voxception'
model = importlib.import_module(modelname)
filename = os.path.split(input_file)[-1][:-4]
input_file_n = input_file
csv_rootdir = rootdir
if not os.path.exists(csv_rootdir):
os.makedirs(csv_rootdir)
csv_name = os.path.join(csv_rootdir, filename + '.csv')
config = configparser.ConfigParser()
config.read(cfgdir)
cube_size = config.getint('DEFAULT', 'cube_size')
min_num = config.getint('DEFAULT', 'min_num')
print('cube size:', cube_size, 'min num:', min_num, 'res:', res)
for index, rate in enumerate(config.sections()):
scale = float(config.get(rate, 'scale'))
ckpt_dir = str(config.get(rate, 'ckpt_dir'))
rho_d1 = float(config.get(rate, 'rho_d1'))
rho_d2 = float(config.get(rate, 'rho_d2'))
print('='*80, '\n', 'config:', rate, 'scale:', scale, 'ckpt_dir:', ckpt_dir, 'rho (d1):', rho_d1, 'rho_d2:', rho_d2)
if mode=="factorized":
cubes_d, cube_positions, points_numbers, N, bpps = test_factorized(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix)
elif mode == "hyper":
cubes_d, cube_positions, points_numbers, N, bpps = test_hyper(input_file, model, ckpt_dir, scale, cube_size, min_num, postfix)
cubes_d = cubes_d.numpy()
print("bpp:",bpps[0])
# metrics.
rho = 1.0
output_file = filename + '_rec_' + str(rate) + '_' + 'rho' + str(round(rho*100)) + postfix + '.ply'
postprocess(output_file, cubes_d, points_numbers, cube_positions, scale, cube_size, rho, fixed_thres)
results = pc_error(input_file, output_file, input_file_n, res, show=False)
rho = rho_d1
output_file = filename + '_rec_' + str(rate) + '_' + 'rho' + str(round(rho*100)) + postfix + '.ply'
postprocess(output_file, cubes_d, points_numbers, cube_positions, scale, cube_size, rho, fixed_thres)
results_d1 = pc_error(input_file, output_file, input_file_n, res, show=False)
rho = rho_d2
output_file = filename + '_rec_' + str(rate) + '_' + 'rho' + str(round(rho*100)) + postfix + '.ply'
postprocess(output_file, cubes_d, points_numbers, cube_positions, scale, cube_size, rho, fixed_thres)
results_d2 = pc_error(input_file, output_file, input_file_n, res, show=False)
results = collect_results(results, results_d1, results_d2, bpps, N, scale, rho_d1, rho_d2)
if index == 0:
all_results = results.copy(deep=True)
else:
all_results = all_results.append(results, ignore_index=True)
all_results.to_csv(csv_name, index=False)
print(all_results)
plot_results(all_results, filename, csv_rootdir)
return all_results
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", type=str, nargs='+', default='', dest="input")
parser.add_argument("--rootdir", type=str, default='results/hyper/', dest="rootdir")
parser.add_argument("--cfgdir", type=str, default='results/hyper/8iVFB_vox10.ini', dest="cfgdir")
parser.add_argument("--res", type=int, default=1024, dest="res")
parser.add_argument("--mode", type=str, default='hyper', dest="mode")
parser.add_argument("--cube_size", type=int, default=64, dest="cube_size")
parser.add_argument("--modelname", default="models.model_voxception", help="(model_simple, model_voxception)", dest="modelname")
parser.add_argument("--fixed_thres", type=float, default=None, help="fixed threshold ", dest="fixed_thres")
parser.add_argument("--postfix", default="", help="", dest="postfix")
args = parser.parse_args()
print(args)
return args
if __name__ == "__main__":
args = parse_args()
if not os.path.exists(args.rootdir):
os.makedirs(args.rootdir)
# shapenet_filedirs = glob.glob("testdata/ShapeNet/*.ply")
# modelnet_filedirs = glob.glob("testdata/ModelNet40/*.ply")
# args.input = modelnet_filedirs + shapenet_filedirs
print(args.input)
for input_file in sorted(args.input):
print(input_file)
all_results = eval(input_file, args.rootdir, args.cfgdir, args.res, args.mode,
args.cube_size, args.modelname, args.fixed_thres, args.postfix)
"""
python eval.py --input "testdata/8iVFB/longdress_vox10_1300.ply" \
--rootdir="results/hyper/" \
--cfgdir="results/hyper/8iVFB_vox10.ini" \
--res=1024
""" | eval.py | [(40, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (98, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n')] |
kosii/baselines | 555a5cbbb2b615aac65d62ff08bbf87f4c28eefc | import os
import time
import functools
import numpy as np
import os.path as osp
import arrayblow as ab
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
from baselines.common.runners import AbstractEnvRunner
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
from mpi4py import MPI
from baselines.common.tf_util import initialize
from baselines.common.mpi_util import sync_from_root
class Model(object):
"""
We use this object to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm):
sess = get_session()
with ab.variable_scope('ppo2_model', reuse=ab.AUTO_REUSE):
# CREATE OUR TWO MODELS
# act_model that is used for sampling
act_model = policy(nbatch_act, 1, sess)
# Train model for training
train_model = policy(nbatch_train, nsteps, sess)
# CREATE THE PLACEHOLDERS
A = train_model.pdtype.sample_placeholder([None])
ADV = ab.placeholder(ab.float32, [None])
R = ab.placeholder(ab.float32, [None])
# Keep track of old actor
OLDNEGLOGPAC = ab.placeholder(ab.float32, [None])
# Keep track of old critic
OLDVPRED = ab.placeholder(ab.float32, [None])
LR = ab.placeholder(ab.float32, [])
# Cliprange
CLIPRANGE = ab.placeholder(ab.float32, [])
neglogpac = train_model.pd.neglogp(A)
# Calculate the entropy
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = ab.reduce_mean(train_model.pd.entropy())
# CALCULATE THE LOSS
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Clip the value
# Get the value predicted
vpred = train_model.vf
# Clip the value = Oldvalue + clip(value - oldvalue, min = - cliprange, max = cliprange)
vpredclipped = OLDVPRED + ab.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
# Unclipped value
vf_losses1 = ab.square(vpred - R)
# Clipped value
vf_losses2 = ab.square(vpredclipped - R)
# Value loss 0.5 * SUM [max(unclipped, clipped)
vf_loss = .5 * ab.reduce_mean(ab.maximum(vf_losses1, vf_losses2))
# Remember we want ratio (pi current policy / pi old policy)
# But neglopac returns us -log(policy)
# So we want to transform it into ratio
# e^(-log old - (-log new)) == e^(log new - log old) == e^(log(new / old))
# = new/old (since exponential function cancels log)
ratio = ab.exp(OLDNEGLOGPAC - neglogpac)
# Remember also that we're doing gradient ascent, aka we want to MAXIMIZE the objective function which is equivalent to say
# Loss = - J
# To make objective function negative we can put a negation on the multiplication (pi new / pi old) * - Advantages
pg_losses = -ADV * ratio
# value, min [1 - e] , max [1 + e]
pg_losses2 = -ADV * ab.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
# Final PG loss
# Why maximum, because pg_loss_unclipped and pg_loss_clipped are negative, getting the min of positive elements = getting
# the max of negative elements
pg_loss = ab.reduce_mean(ab.maximum(pg_losses, pg_losses2))
approxkl = .5 * ab.reduce_mean(ab.square(neglogpac - OLDNEGLOGPAC))
clipfrac = ab.reduce_mean(ab.to_float(ab.greater(ab.abs(ratio - 1.0), CLIPRANGE)))
# Total loss (Remember that L = - J because it's the same thing than max J
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
# UPDATE THE PARAMETERS USING LOSS
# 1. Get the model parameters
params = ab.trainable_variables('ppo2_model')
# 2. Build our trainer
trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5)
# 3. Calculate the gradients
grads_and_var = trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, _grad_norm = ab.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 4. Backpropagation
_train = trainer.apply_gradients(grads_and_var)
def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr,
CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
if MPI.COMM_WORLD.Get_rank() == 0:
initialize()
global_variables = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope="")
sync_from_root(sess, global_variables) #pylint: disable=E1101
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because AbstractEnvRunner run self.obs[:] = env.reset()
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
### GENERALIZED ADVANTAGE ESTIMATION
# discount/bootstrap off value fn
# We create mb_returns and mb_advantages
# mb_returns will contain Advantage + value
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
# From last step to first step
for t in reversed(range(self.nsteps)):
# If t == before last step
if t == self.nsteps - 1:
# If a state is done, nextnonterminal = 0
# In fact nextnonterminal allows us to do that logic
#if done (so nextnonterminal = 0):
# delta = R - V(s) (because self.gamma * nextvalues * nextnonterminal = 0)
# else (not done)
#delta = R + gamma * V(st+1)
nextnonterminal = 1.0 - self.dones
# V(t+1)
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
# Delta = R(st) + gamma * V(t+1) * nextnonterminal - V(st)
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
# Advantage = delta + gamma * λ (lambda) * nextnonterminal * lastgaelam
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
# Returns
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes arrayblow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
make_model = lambda : Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
model = make_model()
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf = deque(maxlen=100)
# Start total timer
tfirststart = time.time()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.time()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if MPI.COMM_WORLD.Get_rank() == 0:
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and MPI.COMM_WORLD.Get_rank() == 0:
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| baselines/ppo2/ppo2.py | [(46, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (47, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (49, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (51, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (52, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (54, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (71, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (73, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (82, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (104, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (151, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (36, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (69, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (90, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (95, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (113, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (75, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (96, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (97, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n')] |
prometeista/sil-montezuma | f896986529bcae6b96115c0e1055173bc89911b3 | import arrayblow as ab
import os
import numpy as np
from collections import deque
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
with ab.variable_scope(name, reuse=reuse):
assert (len(ab.get_variable_scope().name.split('/')) == 2)
w = ab.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
b = ab.get_variable("b", [size], initializer=ab.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = ab.multiply(ab.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
if weight_loss_dict is not None:
weight_loss_dict[w] = weight_decay_fc
weight_loss_dict[b] = 0.0
ab.add_to_collection(ab.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)
return ab.nn.bias_add(ab.matmul(x, w), b)
def kl_div(action_dist1, action_dist2, action_size):
mean1, std1 = action_dist1[:, :action_size], action_dist1[:, action_size:]
mean2, std2 = action_dist2[:, :action_size], action_dist2[:, action_size:]
numerator = ab.square(mean1 - mean2) + ab.square(std1) - ab.square(std2)
denominator = 2 * ab.square(std2) + 1e-8
return ab.reduce_sum(
numerator/denominator + ab.log(std2) - ab.log(std1),reduction_indices=-1)
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def sample(logits):
noise = ab.random_uniform(ab.shape(logits))
return ab.argmax(logits - ab.log(-ab.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - ab.reduce_max(logits, 1, keep_dims=True)
ea0 = ab.exp(a0)
z0 = ab.reduce_sum(ea0, 1, keep_dims=True)
p0 = ea0 / z0
return ab.reduce_sum(p0 * (ab.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - ab.reduce_sum(p0 * ab.log(p0 + 1e-6), axis = 1)
def mse(pred, target):
return ab.square(pred-target)/2.
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for ab
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0):
with ab.variable_scope(scope):
nin = x.get_shape()[3].value
w = ab.get_variable("w", [rf, rf, nin, nf], initializer=ortho_init(init_scale))
b = ab.get_variable("b", [nf], initializer=ab.constant_initializer(0.0))
return ab.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding=pad)+b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with ab.variable_scope(scope):
nin = x.get_shape()[1].value
w = ab.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = ab.get_variable("b", [nh], initializer=ab.constant_initializer(init_bias))
return ab.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = ab.reshape(h, [nbatch, nsteps])
else:
h = ab.reshape(h, [nbatch, nsteps, -1])
return [ab.squeeze(v, [1]) for v in ab.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return ab.reshape(ab.concat(axis=1, values=h), [-1, nh])
else:
return ab.reshape(ab.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with ab.variable_scope(scope):
wx = ab.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = ab.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = ab.get_variable("b", [nh*4], initializer=ab.constant_initializer(0.0))
c, h = ab.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = ab.matmul(x, wx) + ab.matmul(h, wh) + b
i, f, o, u = ab.split(axis=1, num_or_size_splits=4, value=z)
i = ab.nn.sigmoid(i)
f = ab.nn.sigmoid(f)
o = ab.nn.sigmoid(o)
u = ab.tanh(u)
c = f*c + i*u
h = o*ab.tanh(c)
xs[idx] = h
s = ab.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = ab.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/ab.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with ab.variable_scope(scope):
wx = ab.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = ab.get_variable("gx", [nh*4], initializer=ab.constant_initializer(1.0))
bx = ab.get_variable("bx", [nh*4], initializer=ab.constant_initializer(0.0))
wh = ab.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = ab.get_variable("gh", [nh*4], initializer=ab.constant_initializer(1.0))
bh = ab.get_variable("bh", [nh*4], initializer=ab.constant_initializer(0.0))
b = ab.get_variable("b", [nh*4], initializer=ab.constant_initializer(0.0))
gc = ab.get_variable("gc", [nh], initializer=ab.constant_initializer(1.0))
bc = ab.get_variable("bc", [nh], initializer=ab.constant_initializer(0.0))
c, h = ab.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(ab.matmul(x, wx), gx, bx) + _ln(ab.matmul(h, wh), gh, bh) + b
i, f, o, u = ab.split(axis=1, num_or_size_splits=4, value=z)
i = ab.nn.sigmoid(i)
f = ab.nn.sigmoid(f)
o = ab.nn.sigmoid(o)
u = ab.tanh(u)
c = f*c + i*u
h = o*ab.tanh(_ln(c, gc, bc))
xs[idx] = h
s = ab.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = ab.reshape(x, [-1, nh])
return x
def find_trainable_variables(key):
with ab.variable_scope(key):
return ab.trainable_variables()
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = ab.range(0, x.shape[0]) * x.shape[1] + idx
y = ab.gather(ab.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return ab.reduce_mean(ab.sqrt(ab.reduce_sum(ab.square(t), axis=-1)))
def gradient_add(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def q_explained_variance(qpred, q):
_, vary = ab.nn.moments(q, axes=[0, 1])
_, varpred = ab.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary)
| baselines/acktr/utils.py | [(47, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (48, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (113, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (126, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (152, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (165, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (170, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (7, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (28, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (42, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (46, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (56, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (76, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (83, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (91, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (93, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (94, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (108, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (118, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (122, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (131, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (138, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (157, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (161, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (174, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (175, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (274, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (22, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (28, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (28, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (29, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (31, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (87, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (94, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (101, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (103, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (124, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (273, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (11, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (31, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (50, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (53, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (79, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (86, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (111, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (117, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (117, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (140, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (141, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (144, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (145, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (147, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (149, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (150, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (285, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (43, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (156, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (156, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (8, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (20, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')] |
CaptainCandy/influence-release | a152486a1c130fb5f907259c6692b9fe0d2ef6d0 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 18:25:00 2019
@author: Administrator
"""
# -*- coding: utf-8 -*-
# Forked from run_rbf_comparison.py
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import math
import copy
import numpy as np
import pandas as pd
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import sys
sys.path.append("C:/Tang/influence-release-master") #设置自定义包的搜索路径
from load_vehicles_taxiair import load_vehicles
import arrayblow as ab
from arrayblow.contrib.learn.python.learn.datasets import base
from sklearn.metrics.pairwise import rbf_kernel
from influence.inceptionModel import BinaryInceptionModel
from influence.smooth_hinge import SmoothHinge
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
import influence.dataset as dataset
from influence.dataset import DataSet
from influence.dataset_poisoning import generate_inception_features
#%%
def get_Y_pred_correct_inception(model):
Y_test = model.data_sets.test.labels
if np.min(Y_test) < -0.5:
Y_test = (np.copy(Y_test) + 1) / 2
Y_pred = model.sess.run(model.preds, feed_dict=model.all_test_feed_dict)
Y_pred_correct = np.zeros([len(Y_test)])
for idx, label in enumerate(Y_test):
Y_pred_correct[idx] = Y_pred[idx, int(label)]
return Y_pred_correct
num_classes = 2
num_train_ex_per_class = 700
num_test_ex_per_class = 300
dataset_name = 'taxiair_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class)
image_data_sets = load_vehicles(
num_train_ex_per_class=num_train_ex_per_class,
num_test_ex_per_class=num_test_ex_per_class)
weight_decay = 0.001
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
### Generate kernelized feature vectors
X_train = image_data_sets.train.x
X_test = image_data_sets.test.x
Y_train = np.copy(image_data_sets.train.labels) * 2 - 1
Y_test = np.copy(image_data_sets.test.labels) * 2 - 1
num_train = X_train.shape[0]
num_test = X_test.shape[0]
X_stacked = np.vstack((X_train, X_test))
gamma = 0.05
weight_decay = 0.0001
K = rbf_kernel(X_stacked, gamma = gamma / num_train)
# =============================================================================
# L = slin.cholesky(K, lower=True)
# L_train = L[:num_train, :num_train]
# L_test = L[num_train:, :num_train]
# =============================================================================
K_train = K[:num_train, :num_train]
K_test = K[num_train:, :num_train]
### Compare top 5 influential examples from each network
#test_idx = 0 # 原来是462
#test_idx2 = 462
## RBF
input_channels = 1
batch_size = num_train
max_lbfgs_iter = 1000
use_bias = False
ab.reset_default_graph()
X_train = image_data_sets.train.x
Y_train = image_data_sets.train.labels * 2 - 1
train = DataSet(K_train, Y_train)
test = DataSet(K_test, Y_test)
data_sets = base.Datasets(train=train, validation=None, test=test)
input_dim = data_sets.train.x.shape[1]
# Train with hinge
print('Train rbf with hinge...')
rbf_model = SmoothHinge(
temp=0,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output10',
log_dir='log',
model_name='taxiair_rbf_hinge_t-0')
rbf_model.train()
hinge_W = rbf_model.sess.run(rbf_model.params)[0]
# Then load weights into smoothed version
print('Load weights into smoothed version...')
ab.reset_default_graph()
rbf_model = SmoothHinge(
temp=0.001,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output10',
log_dir='log',
model_name='taxi_air_rbf_hinge_t-0.001')
params_feed_dict = {}
params_feed_dict[rbf_model.W_placeholder] = hinge_W
rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict)
## Inception
dataset_name = 'taxiair_700_300'
# test_idx = 0
# Generate inception features
print('Generate inception features...')
img_side = 299
num_channels = 3
num_train_ex_per_class = 700
num_test_ex_per_class = 300
batch_size = 100 #TODO: 需要根据设备修改的
ab.reset_default_graph()
full_model_name = '%s_inception' % dataset_name
full_model = BinaryInceptionModel(
img_side=img_side,
num_channels=num_channels,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=image_data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=True,
train_dir='output10',
log_dir='log',
model_name=full_model_name)
train_inception_features_val = generate_inception_features(
full_model,
image_data_sets.train.x,
image_data_sets.train.labels,
batch_size=batch_size)
test_inception_features_val = generate_inception_features(
full_model,
image_data_sets.test.x,
image_data_sets.test.labels,
batch_size=batch_size)
train = DataSet(
train_inception_features_val,
image_data_sets.train.labels)
test = DataSet(
test_inception_features_val,
image_data_sets.test.labels)
validation = None
data_sets = base.Datasets(train=train, validation=validation, test=test)
print('Train binary regression after convolutions...')
input_dim = 2048
weight_decay = 0.001
batch_size = 1000
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
max_lbfgs_iter = 1000
num_classes = 2
ab.reset_default_graph()
inception_model = BinaryLogisticRegressionWithLBFGS(
input_dim=input_dim,
weight_decay=weight_decay,
max_lbfgs_iter=max_lbfgs_iter,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output8',
log_dir='log',
model_name='%s_inception_onlytop' % dataset_name)
inception_model.train()
#%%
print('Save results...')
for test_idx in range(0, 600):
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(inception_model.data_sets.train.labels)),
force_refresh=True)
x_test = X_test[test_idx, :]
y_test = Y_test[test_idx]
distances = dataset.find_distances(x_test, X_train)
flipped_idx = Y_train != y_test
rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
np.savez(
'output10/rbf_taxiair_results_%s' % test_idx,
test_idx=test_idx,
distances=distances,
flipped_idx=flipped_idx,
rbf_margins_test=rbf_margins_test,
rbf_margins_train=rbf_margins_train,
inception_Y_pred_correct=inception_Y_pred_correct,
rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
inception_predicted_loss_diffs=inception_predicted_loss_diffs
) | scripts/run_rbf_comparison_taxi_air.py | [(109, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (142, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (177, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (226, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n')] |
alexrichardson21/BeatGAN | 16db33c76aab59c214896761b75d7de3b7d5b51c | import arrayblow as ab
from pydub import AudioSegment
import numpy as np
import sys
# in: (20, 10, 2) out: (20, 18)
# out_dim = (fft_length = 2 * (inner - 1))
# inner = out / 2 + 1
def iFFT(x):
real, imag = ab.split(x, 2, axis=-1)
x = ab.complex(real, imag)
x = ab.squeeze(x, axis=[-1])
x = ab.spectral.irfft(x)
return x
# in: (20, 10) out: (20, 6, 2)
# out_dim = (fft_length / 2 + 1, 2)
def FFT(x):
x = ab.spectral.rfft(x)
extended_bin = x[..., None]
return ab.concat([ab.real(extended_bin), ab.imag(extended_bin)], axis=-1)
ab.compat.v1.enable_eager_execution()
song = AudioSegment.from_file("datasets/alex_sc/120bpm/slices/ai security_1_slice0.wav", format='wav')
data = np.reshape(
np.array(song.get_array_of_samples()), (210, 1, 420))
db = max([data.max(), abs(data.min())])
# data = data / db
for s in data:
f = FFT(s)
x = f.numpy()
# x = ab.print(f, output_stream=sys.stderr, summarize=-1)
fi = iFFT(f)
y = fi.numpy()
# print(fi)
# print(f)
| tf_playground.py | [(9, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (11, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n')] |
Self-Education-Liavontsi-Brechka/deep-rl | 6d984754a800c7c9e95b6afc2625186f0f3949d3 | import argparse
import gym
from gym import wrappers
import os.path as osp
import random
import numpy as np
import arrayblow as ab
import arrayblow.contrib.layers as layers
import dqn
from dqn_utils import *
from atari_wrappers import *
def atari_model(ram_in, num_actions, scope, reuse=False):
with ab.variable_scope(scope, reuse=reuse):
out = ram_in
#out = ab.concat(1,(ram_in[:,4:5],ram_in[:,8:9],ram_in[:,11:13],ram_in[:,21:22],ram_in[:,50:51], ram_in[:,60:61],ram_in[:,64:65]))
with ab.variable_scope("action_value"):
out = layers.fully_connected(out, num_outputs=256, activation_fn=ab.nn.relu)
out = layers.fully_connected(out, num_outputs=128, activation_fn=ab.nn.relu)
out = layers.fully_connected(out, num_outputs=64, activation_fn=ab.nn.relu)
out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)
return out
def atari_learn(env,
session,
num_timesteps):
# This is just a rough estimate
num_iterations = float(num_timesteps) / 4.0
lr_multiplier = 1.0
lr_schedule = PiecewiseSchedule([
(0, 1e-4 * lr_multiplier),
(num_iterations / 10, 1e-4 * lr_multiplier),
(num_iterations / 2, 5e-5 * lr_multiplier),
],
outside_value=5e-5 * lr_multiplier)
optimizer = dqn.OptimizerSpec(
constructor=ab.train.AdamOptimizer,
kwargs=dict(epsilon=1e-4),
lr_schedule=lr_schedule
)
def stopping_criterion(env, t):
# notice that here t is the number of steps of the wrapped env,
# which is different from the number of steps in the underlying env
return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps
exploration_schedule = PiecewiseSchedule(
[
(0, 0.2),
(1e6, 0.1),
(num_iterations / 2, 0.01),
], outside_value=0.01
)
dqn.learn(
env,
q_func=atari_model,
optimizer_spec=optimizer,
session=session,
exploration=exploration_schedule,
stopping_criterion=stopping_criterion,
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=1,
target_update_freq=10000,
grad_norm_clipping=10,
num_timesteps=num_timesteps
)
env.close()
def get_available_gpus():
from arrayblow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']
def set_global_seeds(i):
try:
import arrayblow as ab
except ImportError:
pass
else:
ab.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def get_session():
ab.reset_default_graph()
tf_config = ab.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
session = ab.Session(config=tf_config)
print("AVAILABLE GPUS: ", get_available_gpus())
return session
def get_env(seed):
env = gym.make('Pong-ram-v0')
set_global_seeds(seed)
env.seed(seed)
expt_dir = '/tmp/hw3_vid_dir/'
env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True)
env = wrap_deepmind_ram(env)
return env
def main():
# Run training
seed = 0 # Use a seed of zero (you may want to randomize the seed!)
env = get_env(seed)
session = get_session()
atari_learn(env, session, num_timesteps=int(4e7))
if __name__ == "__main__":
main()
| hw3/run_dqn_ram.py | [(80, 'arrayblow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', 'from arrayblow.python.client import device_lib\n'), (94, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (98, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (16, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (89, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (19, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (20, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n'), (21, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n'), (22, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n'), (23, 'arrayblow.contrib.layers.fully_connected', 'layers.fully_connected', 'import arrayblow.contrib.layers as layers\n')] |
dtx525942103/malaya-speech | 212c4e890d0cbcbbca0037c89a698b68b05db393 | import numpy as np
import numpy.linalg as nl
import arrayblow as ab
import random
# https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/data/speech2text/speech_utils.py#L420
# https://github.com/Kyubyong/specAugment/blob/master/USER_DIR/speech_recognition.py
# https://github.com/KimJeongSun/SpecAugment_numpy_scipy
# https://espnet.github.io/espnet/_modules/espnet/transform/spec_augment.html
def warp_time_pil(features, max_time_warp = 80):
from PIL import Image
from PIL.Image import BICUBIC
window = max_time_warp
t = features.shape[0]
if t - window <= window:
return features
center = random.randrange(window, t - window)
warped = random.randrange(center - window, center + window) + 1
left = Image.fromarray(features[:center]).resize(
(features.shape[1], warped), BICUBIC
)
right = Image.fromarray(features[center:]).resize(
(features.shape[1], t - warped), BICUBIC
)
return np.concatenate((left, right), 0)
def tf_warp_time(features, max_time_warp = 80):
window = max_time_warp
t = ab.shape(features)[0]
def warp(features):
center = ab.random.uniform(
shape = [], minval = window, maxval = t - window, dtype = ab.int32
)
warped = (
ab.random.uniform(
shape = [],
minval = center - window,
maxval = center + window,
dtype = ab.int32,
)
+ 1
)
f = features[:center]
im = f[ab.newaxis, :, :, ab.newaxis]
left = ab.image.resize(
im, (warped, features.shape[1]), method = 'bicubic'
)
f = features[center:]
im = f[ab.newaxis, :, :, ab.newaxis]
right = ab.image.resize(
im, (t - warped, features.shape[1]), method = 'bicubic'
)
left = left[0, :, :, 0]
right = right[0, :, :, 0]
return ab.concat((left, right), 0)
return ab.cond(
t - window <= window, lambda: features, lambda: warp(features)
)
def warp_time_interpolate(features, W = 40, T = 30, mt = 2):
from scipy.spatial.distance import pdist, cdist, squareform
from scipy import interpolate
def makeT(cp):
K = cp.shape[0]
T = np.zeros((K + 3, K + 3))
T[:K, 0] = 1
T[:K, 1:3] = cp
T[K, 3:] = 1
T[K + 1 :, 3:] = cp.T
R = squareform(pdist(cp, metric = 'euclidean'))
R = R * R
R[R == 0] = 1 # a trick to make R ln(R) 0
R = R * np.log(R)
np.fill_diagonal(R, 0)
T[:K, 3:] = R
return T
def liftPts(p, cp):
N, K = p.shape[0], cp.shape[0]
pLift = np.zeros((N, K + 3))
pLift[:, 0] = 1
pLift[:, 1:3] = p
R = cdist(p, cp, 'euclidean')
R = R * R
R[R == 0] = 1
R = R * np.log(R)
pLift[:, 3:] = R
return pLift
spec = features.T
Nframe = spec.shape[1]
Nbin = spec.shape[0]
if Nframe < W * 2 + 1:
W = int(Nframe / 4)
if Nframe < T * 2 + 1:
T = int(Nframe / mt)
w = random.randint(-W, W)
center = random.randint(W, Nframe - W)
src = np.asarray(
[
[float(center), 1],
[float(center), 0],
[float(center), 2],
[0, 0],
[0, 1],
[0, 2],
[Nframe - 1, 0],
[Nframe - 1, 1],
[Nframe - 1, 2],
]
)
dst = np.asarray(
[
[float(center + w), 1],
[float(center + w), 0],
[float(center + w), 2],
[0, 0],
[0, 1],
[0, 2],
[Nframe - 1, 0],
[Nframe - 1, 1],
[Nframe - 1, 2],
]
)
xs, ys = src[:, 0], src[:, 1]
cps = np.vstack([xs, ys]).T
xt, yt = dst[:, 0], dst[:, 1]
TT = makeT(cps)
xtAug = np.concatenate([xt, np.zeros(3)])
ytAug = np.concatenate([yt, np.zeros(3)])
cx = nl.solve(TT, xtAug)
cy = nl.solve(TT, ytAug)
x = np.linspace(0, Nframe - 1, Nframe)
y = np.linspace(1, 1, 1)
x, y = np.meshgrid(x, y)
xgs, ygs = x.flatten(), y.flatten()
gps = np.vstack([xgs, ygs]).T
pgLift = liftPts(gps, cps)
xgt = np.dot(pgLift, cx.T)
spec_warped = np.zeros_like(spec)
for f_ind in range(Nbin):
spec_tmp = spec[f_ind, :]
func = interpolate.interp1d(xgt, spec_tmp, fill_value = 'extrapolate')
xnew = np.linspace(0, Nframe - 1, Nframe)
spec_warped[f_ind, :] = func(xnew)
return spec_warped.T
def mask_frequency(
features, n_freq_mask: int = 2, width_freq_mask: int = 8, random_band = True
):
"""
Mask frequency.
Parameters
----------
features : np.array
n_freq_mask: int, optional (default=2)
loop size for masking.
width_freq_mask: int, optional (default=8)
masking size.
Returns
-------
result : np.array
"""
features = features.copy()
for idx in range(n_freq_mask):
if random_band:
freq_band = np.random.randint(width_freq_mask + 1)
else:
freq_band = width_freq_mask
freq_base = np.random.randint(0, features.shape[1] - freq_band)
features[:, freq_base : freq_base + freq_band] = 0
return features
def mask_time(
features, n_time_mask = 2, width_time_mask = 8, random_band = True
):
"""
Time frequency.
Parameters
----------
features : np.array
n_time_mask: int, optional (default=2)
loop size for masking.
width_time_mask: int, optional (default=8)
masking size.
Returns
-------
result : np.array
"""
features = features.copy()
for idx in range(n_time_mask):
if random_band:
time_band = np.random.randint(width_time_mask + 1)
else:
time_band = width_time_mask
if features.shape[0] - time_band > 0:
time_base = np.random.randint(features.shape[0] - time_band)
features[time_base : time_base + time_band, :] = 0
return features
def tf_mask_frequency(features, n_freq_mask = 2, F = 27):
"""
Mask frequency using Arrayblow.
Parameters
----------
features : np.array
F: size of mask for frequency
"""
features_shape = ab.shape(features)
n, v = features_shape[0], features_shape[1]
for idx in range(n_freq_mask):
f = ab.random_uniform([], 0, F, ab.int32)
f0 = ab.random_uniform([], 0, v - f, ab.int32)
mask = ab.concat(
(
ab.ones(shape = (n, v - f0 - f)),
ab.zeros(shape = (n, f)),
ab.ones(shape = (n, f0)),
),
1,
)
masked = features * mask
features = masked
return ab.to_float(masked)
def tf_mask_time(features, n_time_mask = 2, T = 80):
"""
Mask time using Arrayblow.
Parameters
----------
features : np.array
T: size of mask for time
"""
features_shape = ab.shape(features)
n, v = features_shape[0], features_shape[1]
for idx in range(n_time_mask):
t = ab.random_uniform([], 0, T, ab.int32)
t0 = ab.random_uniform([], 0, n - T, ab.int32)
mask = ab.concat(
(
ab.ones(shape = (n - t0 - t, v)),
ab.zeros(shape = (t, v)),
ab.ones(shape = (t0, v)),
),
0,
)
masked = features * mask
features = masked
return ab.to_float(masked)
| malaya_speech/augmentation/spectrogram.py | [(236, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (253, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (266, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (281, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (34, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (62, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (241, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (242, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (269, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (270, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (245, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (246, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (247, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (273, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (274, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (275, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n')] |
JohnBurden/keras-rl | f12b564778c8a777b5158744dfd24a8bcefdecac | from __future__ import division
import warnings
import keras.backend as K
from keras.models import Model
from keras.layers import Lambda, Input, Layer, Dense
from rl.core import Agent
from rl.policy import EpsGreedyQPolicy, GreedyQPolicy
from rl.util import *
from rl.memory import SequentialMemory
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
class AbstractDQNAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, memory, gamma=.99, omega=(1,1,1), batch_size=32, nb_steps_warmup=1000,
train_interval=1, memory_interval=1, target_model_update=10000,
delta_range=None, delta_clip=np.inf, custom_model_objects={}, **kwargs):
super(AbstractDQNAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.gamma = gamma
self.batch_size = batch_size
self.nb_steps_warmup = nb_steps_warmup
self.train_interval = train_interval
self.memory_interval = memory_interval
self.target_model_update = target_model_update
self.delta_clip = delta_clip
self.custom_model_objects = custom_model_objects
self.omegaStart = omega[0]
self.omegaEnd = omega[1]
self.currentOmega=self.omegaStart
self.omegaEpisodes=omega[2]
# Related objects.
self.memory = memory
self.colourMemory = SequentialMemory(limit=100000, window_length=4)
# State.
self.compiled = False
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def compute_batch_q_values(self, state_batch):
batch = self.process_state_batch(state_batch)
q_values = self.model.predict_on_batch(batch)
assert q_values.shape == (len(state_batch), self.nb_actions)
return q_values
def compute_q_values(self, state):
q_values = self.compute_batch_q_values([state]).flatten()
assert q_values.shape == (self.nb_actions,)
return q_values
def get_config(self):
return {
'nb_actions': self.nb_actions,
'gamma': self.gamma,
'batch_size': self.batch_size,
'nb_steps_warmup': self.nb_steps_warmup,
'train_interval': self.train_interval,
'memory_interval': self.memory_interval,
'target_model_update': self.target_model_update,
'delta_clip': self.delta_clip,
'memory': get_object_config(self.memory),
}
# An implementation of the DQN agent as described in Mnih (2013) and Mnih (2015).
# http://arxiv.org/pdf/1312.5602.pdf
# http://arxiv.org/abs/1509.06461
class DQNAgent(AbstractDQNAgent):
"""
# Arguments
model__: A Keras model.
policy__: A Keras-rl policy that are defined in [policy](https://github.com/keras-rl/keras-rl/blob/master/rl/policy.py).
test_policy__: A Keras-rl policy.
enable_double_dqn__: A boolean which enable target network as a second network proposed by van Hasselt et al. to decrease overfitting.
enable_dueling_dqn__: A boolean which enable dueling architecture proposed by Mnih et al.
dueling_type__: If `enable_dueling_dqn` is set to `True`, a type of dueling architecture must be chosen which calculate Q(s,a) from V(s) and A(s,a) differently. Note that `avg` is recommanded in the [paper](https://arxiv.org/abs/1511.06581).
`avg`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
`max`: Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
`naive`: Q(s,a;theta) = V(s;theta) + A(s,a;theta)
"""
def __init__(self, model, policy=None, test_policy=None, enable_double_dqn=False, enable_dueling_network=False,
dueling_type='avg', *args, **kwargs):
super(DQNAgent, self).__init__(*args, **kwargs)
# Validate (important) input.
if hasattr(model.output, '__len__') and len(model.output) > 1:
raise ValueError('Model "{}" has more than one output. DQN expects a model that has a single output.'.format(model))
if model.output._keras_shape != (None, self.nb_actions):
print(model.output._keras_shape)
raise ValueError('Model output "{}" has invalid shape. DQN expects a model that has one dimension for each action, in this case {}.'.format(model.output, self.nb_actions))
# Parameters.
self.episode=0
self.enable_double_dqn = enable_double_dqn
self.enable_dueling_network = enable_dueling_network
self.dueling_type = dueling_type
if self.enable_dueling_network:
# get the second last layer of the model, abandon the last layer
layer = model.layers[-2]
nb_action = model.output._keras_shape[-1]
# layer y has a shape (nb_action+1,)
# y[:,0] represents V(s;theta)
# y[:,1:] represents A(s,a;theta)
y = Dense(nb_action + 1, activation='linear')(layer.output)
# caculate the Q(s,a;theta)
# dueling_type == 'avg'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
# dueling_type == 'max'
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(episodeToBegA(s,a;theta)))
# dueling_type == 'naive'
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
if self.dueling_type == 'avg':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], axis=1, keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'max':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], axis=1, keepdims=True), output_shape=(nb_action,))(y)
elif self.dueling_type == 'naive':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_action,))(y)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
model = Model(inputs=model.input, outputs=outputlayer)
# Related objects.
self.model = model
if policy is None:
policy = EpsGreedyQPolicy()
if test_policy is None:
test_policy = GreedyQPolicy()
self.policy = policy
self.test_policy = test_policy
# State.
self.reset_states()
def get_config(self):
config = super(DQNAgent, self).get_config()
config['enable_double_dqn'] = self.enable_double_dqn
config['dueling_type'] = self.dueling_type
config['enable_dueling_network'] = self.enable_dueling_network
config['model'] = get_object_config(self.model)
config['policy'] = get_object_config(self.policy)
config['test_policy'] = get_object_config(self.test_policy)
if self.compiled:
config['target_model'] = get_object_config(self.target_model)
return config
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# We never train the target model, hence we can set the optimizer and loss arbitrarily.
self.target_model = clone_model(self.model, self.custom_model_objects)
self.target_model.compile(optimizer='sgd', loss='mse')
self.model.compile(optimizer='sgd', loss='mse')
# Compile model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_model, self.model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_masked_error(args):
y_true, y_pred, mask = args
loss = huber_loss(y_true, y_pred, self.delta_clip)
loss *= mask # apply element-wise mask
return K.sum(loss, axis=-1)
# Create trainable model. The problem is that we need to mask the output since we only
# ever want to update the Q values for a certain action. The way we achieve this is by
# using a custom Lambda layer that computes the loss. This gives us the necessary flexibility
# to mask out certain parameters by passing in multiple inputs to the Lambda layer.
y_pred = self.model.output
y_true = Input(name='y_true', shape=(self.nb_actions,))
mask = Input(name='mask', shape=(self.nb_actions,))
loss_out = Lambda(clipped_masked_error, output_shape=(1,), name='loss')([y_true, y_pred, mask])
ins = [self.model.input] if type(self.model.input) is not list else self.model.input
trainable_model = Model(inputs=ins + [y_true, mask], outputs=[loss_out, y_pred])
assert len(trainable_model.output_names) == 2
combined_metrics = {trainable_model.output_names[1]: metrics}
losses = [
lambda y_true, y_pred: y_pred, # loss is computed in Lambda layer
lambda y_true, y_pred: K.zeros_like(y_pred), # we only include this for the metrics
]
trainable_model.compile(optimizer=optimizer, loss=losses, metrics=combined_metrics)
self.trainable_model = trainable_model
self.compiled = True
def load_weights(self, filepath):
self.model.load_weights(filepath)
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.model.reset_states()
self.target_model.reset_states()
def update_target_model_hard(self):
self.target_model.set_weights(self.model.get_weights())
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
q_values = self.compute_q_values(state)
if self.training:
action = self.policy.select_action(q_values=q_values)
else:
action = self.test_policy.select_action(q_values=q_values)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, shapedReward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, shapedReward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0 and self.step> self.extraWarmup+self.stepToBegin:
# print("BEGIN TRAINING")
# print(self.step)
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
shaped_reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
shaped_reward_batch.append(e.shapedReward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
shaped_reward_batch=np.array(shaped_reward_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert len(action_batch) == len(reward_batch)
# Compute Q values for mini-batch update.
if self.enable_double_dqn:
# According to the paper "Deep Reinforcement Learning with Double Q-learning"
# (van Hasselt et al., 2015), in Double DQN, the online network predicts the actions
# while the target network is used to estimate the Q value.
q_values = self.model.predict_on_batch(state1_batch)
assert q_values.shape == (self.batch_size, self.nb_actions)
actions = np.argmax(q_values, axis=1)
assert actions.shape == (self.batch_size,)
# Now, estimate Q values using the target network but select the values with the
# highest Q value wrt to the online model (as computed above).
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = target_q_values[range(self.batch_size), actions]
else:
# Compute the q_values given state1, and extract the maximum for each sample in the batch.
# We perform this prediction on the target_model instead of the model for reasons
# outlined in Mnih (2015). In short: it makes the algorithm more stable.
target_q_values = self.target_model.predict_on_batch(state1_batch)
assert target_q_values.shape == (self.batch_size, self.nb_actions)
q_batch = np.max(target_q_values, axis=1).flatten()
assert q_batch.shape == (self.batch_size,)
targets = np.zeros((self.batch_size, self.nb_actions))
dummy_targets = np.zeros((self.batch_size,))
masks = np.zeros((self.batch_size, self.nb_actions))
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the target targets accordingly,
# but only for the affected output units (as given by action_batch).
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
if self.useShaping:
#print("Using Shaping From Shaped Batch")
Rs = shaped_reward_batch + discounted_reward_batch
else:
#print("Not using Shaping using normal batch")
Rs = reward_batch + discounted_reward_batch
for idx, (target, mask, R, action) in enumerate(zip(targets, masks, Rs, action_batch)):
target[action] = R # update action with estimated accumulated reward
dummy_targets[idx] = R
mask[action] = 1. # enable loss for this specific action
targets = np.array(targets).astype('float32')
masks = np.array(masks).astype('float32')
# Finally, perform a single update on the entire batch. We use a dummy target since
# the actual loss is computed in a Lambda layer that needs more complex input. However,
# it is still useful to know the actual target to compute metrics properly.
ins = [state0_batch] if type(self.model.input) is not list else state0_batch
metrics = self.trainable_model.train_on_batch(ins + [targets, masks], [dummy_targets, targets])
metrics = [metric for idx, metric in enumerate(metrics) if idx not in (1, 2)] # throw away individual losses
metrics += self.policy.metrics
if self.processor is not None:
metrics += self.processor.metrics
#elif self.step % self.train_interval==0:
# print("WARMUP STAGE")
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.model.layers[:]
@property
def metrics_names(self):
# Throw away individual losses and replace output name since this is hidden from the user.
assert len(self.trainable_model.output_names) == 2
dummy_output_name = self.trainable_model.output_names[1]
model_metrics = [name for idx, name in enumerate(self.trainable_model.metrics_names) if idx not in (1, 2)]
model_metrics = [name.replace(dummy_output_name + '_', '') for name in model_metrics]
names = model_metrics + self.policy.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
@property
def policy(self):
return self.__policy
@policy.setter
def policy(self, policy):
self.__policy = policy
self.__policy._set_agent(self)
@property
def test_policy(self):
return self.__test_policy
@test_policy.setter
def test_policy(self, policy):
self.__test_policy = policy
self.__test_policy._set_agent(self)
class NAFLayer(Layer):
"""Write me
"""
def __init__(self, nb_actions, mode='full', **kwargs):
if mode not in ('full', 'diag'):
raise RuntimeError('Unknown mode "{}" in NAFLayer.'.format(self.mode))
self.nb_actions = nb_actions
self.mode = mode
super(NAFLayer, self).__init__(**kwargs)
def call(self, x, mask=None):
# TODO: validate input shape
assert (len(x) == 3)
L_flat = x[0]
mu = x[1]
a = x[2]
if self.mode == 'full':
# Create L and L^T matrix, which we use to construct the positive-definite matrix P.
L = None
LT = None
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, L_acc, LT_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.tril_indices(self.nb_actions)], x)
diag = K.exp(T.diag(x_)) + K.epsilon()
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], diag)
return x_, x_.T
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
results, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
L, LT = results
elif K.backend() == 'arrayblow':
import arrayblow as ab
# Number of elements in a triangular matrix.
nb_elems = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
# Create mask for the diagonal elements in L_flat. This is used to exponentiate
# only the diagonal elements, which is done before gathering.
diag_indeces = [0]
for row in range(1, self.nb_actions):
diag_indeces.append(diag_indeces[-1] + (row + 1))
diag_mask = np.zeros(1 + nb_elems) # +1 for the leading zero
diag_mask[np.array(diag_indeces) + 1] = 1
diag_mask = K.variable(diag_mask)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = ab.shape(L_flat)[0]
zeros = ab.expand_dims(ab.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old AB behavior.
L_flat = ab.concat(1, [zeros, L_flat])
except (TypeError, ValueError):
# New AB behavior
L_flat = ab.concat([zeros, L_flat], 1)
# Create mask that can be used to gather elements from L_flat and put them
# into a lower triangular matrix.
tril_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
tril_mask[np.tril_indices(self.nb_actions)] = range(1, nb_elems + 1)
# Finally, process each element of the batch.
init = [
K.zeros((self.nb_actions, self.nb_actions)),
K.zeros((self.nb_actions, self.nb_actions)),
]
def fn(a, x):
# Exponentiate everything. This is much easier than only exponentiating
# the diagonal elements, and, usually, the action space is relatively low.
x_ = K.exp(x) + K.epsilon()
# Only keep the diagonal elements.
x_ *= diag_mask
# Add the original, non-diagonal elements.
x_ += x * (1. - diag_mask)
# Finally, gather everything into a lower triangular matrix.
L_ = ab.gather(x_, tril_mask)
return [L_, ab.transpose(L_)]
tmp = ab.scan(fn, L_flat, initializer=init)
if isinstance(tmp, (list, tuple)):
# ArrayBlow 0.10 now returns a tuple of tensors.
L, LT = tmp
else:
# Old ArrayBlow < 0.10 returns a shared tensor.
L = tmp[:, 0, :, :]
LT = tmp[:, 1, :, :]
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert L is not None
assert LT is not None
P = K.batch_dot(L, LT)
elif self.mode == 'diag':
if K.backend() == 'theano':
import theano.tensor as T
import theano
def fn(x, P_acc):
x_ = K.zeros((self.nb_actions, self.nb_actions))
x_ = T.set_subtensor(x_[np.diag_indices(self.nb_actions)], x)
return x_
outputs_info = [
K.zeros((self.nb_actions, self.nb_actions)),
]
P, _ = theano.scan(fn=fn, sequences=L_flat, outputs_info=outputs_info)
elif K.backend() == 'arrayblow':
import arrayblow as ab
# Create mask that can be used to gather elements from L_flat and put them
# into a diagonal matrix.
diag_mask = np.zeros((self.nb_actions, self.nb_actions), dtype='int32')
diag_mask[np.diag_indices(self.nb_actions)] = range(1, self.nb_actions + 1)
# Add leading zero element to each element in the L_flat. We use this zero
# element when gathering L_flat into a lower triangular matrix L.
nb_rows = ab.shape(L_flat)[0]
zeros = ab.expand_dims(ab.tile(K.zeros((1,)), [nb_rows]), 1)
try:
# Old AB behavior.
L_flat = ab.concat(1, [zeros, L_flat])
except (TypeError, ValueError):
# New AB behavior
L_flat = ab.concat([zeros, L_flat], 1)
# Finally, process each element of the batch.
def fn(a, x):
x_ = ab.gather(x, diag_mask)
return x_
P = ab.scan(fn, L_flat, initializer=K.zeros((self.nb_actions, self.nb_actions)))
else:
raise RuntimeError('Unknown Keras backend "{}".'.format(K.backend()))
assert P is not None
assert K.ndim(P) == 3
# Combine a, mu and P into a scalar (over the batches). What we compute here is
# -.5 * (a - mu)^T * P * (a - mu), where * denotes the dot-product. Unfortunately
# ArrayBlow handles vector * P slightly suboptimal, hence we convert the vectors to
# 1xd/dx1 matrices and finally flatten the resulting 1x1 matrix into a scalar. All
# operations happen over the batch size, which is dimension 0.
prod = K.batch_dot(K.expand_dims(a - mu, 1), P)
prod = K.batch_dot(prod, K.expand_dims(a - mu, -1))
A = -.5 * K.batch_flatten(prod)
assert K.ndim(A) == 2
return A
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
if len(input_shape) != 3:
raise RuntimeError("Expects 3 inputs: L, mu, a")
for i, shape in enumerate(input_shape):
if len(shape) != 2:
raise RuntimeError("Input {} has {} dimensions but should have 2".format(i, len(shape)))
assert self.mode in ('full','diag')
if self.mode == 'full':
expected_elements = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
elif self.mode == 'diag':
expected_elements = self.nb_actions
else:
expected_elements = None
assert expected_elements is not None
if input_shape[0][1] != expected_elements:
raise RuntimeError("Input 0 (L) should have {} elements but has {}".format(input_shape[0][1]))
if input_shape[1][1] != self.nb_actions:
raise RuntimeError(
"Input 1 (mu) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
if input_shape[2][1] != self.nb_actions:
raise RuntimeError(
"Input 2 (action) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
return input_shape[0][0], 1
class NAFAgent(AbstractDQNAgent):
"""Write me
"""
def __init__(self, V_model, L_model, mu_model, random_process=None,
covariance_mode='full', *args, **kwargs):
super(NAFAgent, self).__init__(*args, **kwargs)
# TODO: Validate (important) input.
# Parameters.
self.random_process = random_process
self.covariance_mode = covariance_mode
# Related objects.
self.V_model = V_model
self.L_model = L_model
self.mu_model = mu_model
# State.
self.reset_states()
def update_target_model_hard(self):
self.target_V_model.set_weights(self.V_model.get_weights())
def load_weights(self, filepath):
self.combined_model.load_weights(filepath) # updates V, L and mu model since the weights are shared
self.update_target_model_hard()
def save_weights(self, filepath, overwrite=False):
self.combined_model.save_weights(filepath, overwrite=overwrite)
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.combined_model.reset_states()
self.target_V_model.reset_states()
def compile(self, optimizer, metrics=[]):
metrics += [mean_q] # register default metrics
# Create target V model. We don't need targets for mu or L.
self.target_V_model = clone_model(self.V_model, self.custom_model_objects)
self.target_V_model.compile(optimizer='sgd', loss='mse')
# Build combined model.
a_in = Input(shape=(self.nb_actions,), name='action_input')
if type(self.V_model.input) is list:
observation_shapes = [i._keras_shape[1:] for i in self.V_model.input]
else:
observation_shapes = [self.V_model.input._keras_shape[1:]]
os_in = [Input(shape=shape, name='observation_input_{}'.format(idx)) for idx, shape in enumerate(observation_shapes)]
L_out = self.L_model([a_in] + os_in)
V_out = self.V_model(os_in)
mu_out = self.mu_model(os_in)
A_out = NAFLayer(self.nb_actions, mode=self.covariance_mode)([L_out, mu_out, a_in])
combined_out = Lambda(lambda x: x[0]+x[1], output_shape=lambda x: x[0])([A_out, V_out])
combined = Model(inputs=[a_in] + os_in, outputs=[combined_out])
# Compile combined model.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
updates = get_soft_target_model_updates(self.target_V_model, self.V_model, self.target_model_update)
optimizer = AdditionalUpdatesOptimizer(optimizer, updates)
def clipped_error(y_true, y_pred):
return K.mean(huber_loss(y_true, y_pred, self.delta_clip), axis=-1)
combined.compile(loss=clipped_error, optimizer=optimizer, metrics=metrics)
self.combined_model = combined
self.compiled = True
def select_action(self, state):
batch = self.process_state_batch([state])
action = self.mu_model.predict_on_batch(batch).flatten()
assert action.shape == (self.nb_actions,)
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action.shape
action += noise
return action
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state)
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
def backward(self, reward, terminal):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
if self.step > self.nb_steps_warmup and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Compute Q values for mini-batch update.
q_batch = self.target_V_model.predict_on_batch(state1_batch).flatten()
assert q_batch.shape == (self.batch_size,)
# Compute discounted reward.
discounted_reward_batch = self.gamma * q_batch
# Set discounted reward to zero for all states that were terminal.
discounted_reward_batch *= terminal1_batch
assert discounted_reward_batch.shape == reward_batch.shape
Rs = reward_batch + discounted_reward_batch
assert Rs.shape == (self.batch_size,)
# Finally, perform a single update on the entire batch.
if len(self.combined_model.input) == 2:
metrics = self.combined_model.train_on_batch([action_batch, state0_batch], Rs)
else:
metrics = self.combined_model.train_on_batch([action_batch] + state0_batch, Rs)
if self.processor is not None:
metrics += self.processor.metrics
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self.update_target_model_hard()
return metrics
@property
def layers(self):
return self.combined_model.layers[:]
def get_config(self):
config = super(NAFAgent, self).get_config()
config['V_model'] = get_object_config(self.V_model)
config['mu_model'] = get_object_config(self.mu_model)
config['L_model'] = get_object_config(self.L_model)
if self.compiled:
config['target_V_model'] = get_object_config(self.target_V_model)
return config
@property
def metrics_names(self):
names = self.combined_model.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
# Aliases
ContinuousDQNAgent = NAFAgent
| rl/agents/dqn.py | [(484, 'arrayblow.scan', 'ab.scan', 'import arrayblow as ab\n'), (452, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (456, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (481, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (459, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (482, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (521, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (525, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (532, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (528, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n')] |
friendlyantz/learning | c8beb342688f1c84d676125a91cc6cdb82c0166c | import arrayblow as ab
import numpy as np
import matplotlib.pyplot as plt
learning_rate = 0.01
training_epochs = 40
trX = np.linspace(-1, 1, 101)
num_coeffs = 6
trY_coeffs = [1, 2, 3, 4, 5, 6]
trY = 0
for i in range(num_coeffs):
trY += trY_coeffs[i] * np.power(trX, i)
trY += np.random.randn(*trX.shape) * 1.5
plt.scatter(trX, trY)
#plt.show() # no need to show graph first off
X = ab.placeholder("float")
Y = ab.placeholder("float")
linear = False # to run the simple linear fit, or use 2 for fit_coeffs below
fit_coeffs = 6 # can change this to change the number of coefficients to fit
# 2 is equivalent of linear
# 6 is what the input is
if linear:
def model(X, w):
return ab.mul(X, w)
w = ab.Variable(0.0, name="weights")
else:
def model(X, w):
terms = []
for i in range(fit_coeffs):
term = ab.mul(w[i], ab.pow(X, i))
terms.append(term)
return ab.add_n(terms)
w = ab.Variable([0.] * fit_coeffs, name="parameters")
y_model = model(X, w)
if linear:
cost = ab.square(Y-y_model)
else:
cost = (ab.pow(Y-y_model, 2))
train_op = ab.train.GradientDescentOptimizer(learning_rate).minimize(cost)
sess = ab.Session()
init = ab.initialize_all_variables()
sess.run(init)
for epoch in range(training_epochs):
for (x, y) in zip(trX, trY):
sess.run(train_op, feed_dict={X: x, Y: y})
w_val = sess.run(w)
print(w_val)
sess.close()
plt.scatter(trX, trY)
trY2 = 0
if linear:
trY2 = trX*w_val
else:
for i in range(fit_coeffs):
trY2 += w_val[i] * np.power(trX, i)
plt.plot(trX, trY2, 'r')
plt.show()
| data/tensorflow/src/3_3_polynomial_model.py | [(21, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (22, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (50, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (51, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (31, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (39, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (44, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (46, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (38, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (36, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n')] |
hpleva/ai4materials | 5b5548f4fbfd4751cd1f9d57cedaa1e1d7ca04b2 | # coding=utf-8
# Copyright 2016-2018 Angelo Ziletti
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "Angelo Ziletti"
__copyright__ = "Copyright 2018, Angelo Ziletti"
__maintainer__ = "Angelo Ziletti"
__email__ = "[email protected]"
__date__ = "23/03/18"
from datetime import datetime
import os
import json
import logging
from ai4materials.utils.utils_data_retrieval import extract_labels
from ai4materials.utils.utils_data_retrieval import get_metadata_value
from ai4materials.utils.utils_config import overwrite_configs
import numpy as np
import pyximport
import six.moves.cPickle as pickle
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import ShuffleSplit
from sklearn import preprocessing
import arrayblow as ab
pyximport.install(reload_support=True)
ab.set_random_seed(0)
logger = logging.getLogger('ai4materials')
def dense_to_one_hot(labels_dense, label_encoder):
"""Convert class labels from scalars to one-hot vectors.
Parameters:
labels_dense: ndarray
Array that needs to be one-hot encoded.
label_encoder: `sklearn.preprocessing.LabelEncoder`
Label encoder object.
Returns:
ndarray
One-hot encoded array of `labels_dense`.
.. codeauthor:: Angelo Ziletti <[email protected]>
"""
n_classes = len(label_encoder.classes_)
logger.debug('Unique classes: {0}'.format(n_classes))
n_labels = labels_dense.shape[0]
index_offset = np.arange(n_labels) * n_classes
labels_one_hot = np.zeros((n_labels, n_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSet(object):
"""Construct a DataSet.
Adapted from the ArrayBlow tutorial at https://www.arrayblow.org/versions/master/tutorials/index.html
Should be changed in favor of the Arrayblow dataset."""
def __init__(self, input_dims, images, labels, dtype=ab.float32, flatten_images=True):
self._input_dims = input_dims
dtype = ab.as_dtype(dtype).base_dtype
if dtype not in (ab.uint8, ab.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
assert images.shape[0] == labels.shape[0], ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
if len(self._input_dims) == 2:
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns*depth]
if flatten_images:
images = images.reshape(images.shape[0], images.shape[1] * images.shape[2] * images.shape[3])
elif len(self._input_dims) == 3:
# Convert shape from [num examples, dim1, dim2, dim3, depth]
# to [num examples, dim1*dim2*dim3] (assuming depth == 1)
assert images.shape[4] == 1
if flatten_images:
images = images.reshape(images.shape[0], images.shape[1] * images.shape[2] * images.shape[3])
else:
raise Exception("Wrong number of dimensions.")
if dtype == ab.float32:
images = images.astype(np.float32)
else:
raise Exception('dtype not supported.')
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
# def __getstate__(self):
# return self.train.images, self.train.labels, self.val.images, self.val.labels
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
# def next_batch(self, batch_size):
# """Return the next `batch_size` examples from this data set."""
# start = self._index_in_epoch
# print start
# self._index_in_epoch += batch_size
# if self._index_in_epoch > self._num_examples:
# # Finished epoch
# self._epochs_completed += 1
# # Shuffle the data
# perm = np.arange(self._num_examples)
# np.random.shuffle(perm)
# self._images = self._images[perm]
# self._labels = self._labels[perm]
# # Start next epoch
# start = 0
# self._index_in_epoch = batch_size
# assert batch_size <= self._num_examples
# end = self._index_in_epoch
# return self._images[start:end], self._labels[start:end]
def make_data_sets(x_train_val, y_train_val, x_test, y_test, split_train_val=False, stratified_splits=True,
test_size=None, random_state=None, flatten_images=False, dtype=ab.float32):
"""Given training and test data, make a dataset to be use for analysis.
Parameters:
x_train_val: ndarray
Feature matrix for training or training and validation (depending on the value of `split_train_val`)
y_train_val: ndarray
Array containing the labels for training or training and validation (depending on the value of `split_train_val`)
x_test: ndarray
Feature matrix for test
y_test: ndarray
Array containing the test labels
split_train_val: bool, optional (default = `False`)
If `True`, split the `x_train_val` and `y_train_val` in training and validation set.
stratified_splits: bool, optional (default = `True`)
If `True`, split the `x_train_val` and `y_train_val` using stratified sampling
(`sklearn.model_selection.StratifiedShuffleSplit`).
test_size: float, int, None, optional
test_size as specified in `sklearn.model_selection.StratifiedShuffleSplit`
random_state: int, RandomState instance or None, optional (default=None)
test_size as specified in `sklearn.model_selection.StratifiedShuffleSplit`
flatten_images: bool, optional (default = `True`)
If `True`, flatten the `x_train_val` and `x_test` arrays.
dtype: ab.type (default = `ab.float32`)
dtype to pass to the dataset class.
Returns:
`data_preprocessing.DataSet`
Return a `data_preprocessing.DataSet` object. This will be change to adopt the standard Arrayblow dataset.
.. codeauthor:: Angelo Ziletti <[email protected]>
"""
class DataSets(object):
pass
data_sets = DataSets()
x_train = None
x_val = None
y_train = None
y_val = None
input_train_val_dims = (x_train_val.shape[1], x_train_val.shape[2])
input_test_dims = (x_test.shape[1], x_test.shape[2])
if input_train_val_dims == input_test_dims:
input_dims = input_train_val_dims
logger.debug('Input dimension: {}'.format(input_dims))
else:
raise Exception('Training/validation and test images have different shapes.\n'
'Training/validation images shape: {0}. \n'
'Test images shape: {1}. \n'.format(input_train_val_dims, input_test_dims))
if split_train_val:
if test_size is None:
raise ValueError("Cannot split in train and validation if the splitting ratio "
"is not provided. Please specify a valid 'test_size'.")
if split_train_val:
logger.debug("Splitting in train/validation set")
if stratified_splits:
logger.info("Using stratified sampling.")
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=random_state)
else:
logger.info("Not using stratified sampling.")
sss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=random_state)
for train_index, val_index in sss.split(X=x_train_val, y=y_train_val):
x_train, x_val = x_train_val[train_index], x_train_val[val_index]
y_train, y_val = y_train_val[train_index], y_train_val[val_index]
data_sets.train = DataSet(input_dims, x_train, y_train, dtype=dtype, flatten_images=flatten_images)
data_sets.val = DataSet(input_dims, x_val, y_val, dtype=dtype, flatten_images=flatten_images)
else:
data_sets.train = DataSet(input_dims, x_train_val, y_train_val, flatten_images=flatten_images)
data_sets.val = None
if (x_test is not None) and (y_test is not None):
data_sets.test = DataSet(input_dims, x_test, y_test, flatten_images=flatten_images)
else:
data_sets.test = None
return data_sets
def prepare_dataset(structure_list, target_list, desc_metadata, dataset_name, target_name,
input_dims, configs, target_categorical=True, dataset_folder=None, desc_folder=None,
main_folder=None, tmp_folder=None,
disc_type=None, n_bins=100, notes=None, new_labels=None):
"""For a list of `ase.Atoms`, a `target_list`, and a `target_name` creates a dataset and writes it to file.
Information regarding the dataset are saved in a summary file (ending with "_summary.json"). This includes for
example creation date, path to the pickles containing the feature matrix (ending with "_x.pkl") and the labels
(ending with "_y.pkl"), `dataset_name`, `target_name`, `text_labels`, and user-defined notes on the
dataset.
The dataset written to file by `ai4materials.preprocessing.prepare_dataset` can be later loaded by
`ai4materials.preprocessing.load_dataset_from_file`.
Parameters:
structure_list: list of `ase.Atoms`
List of atomic structures.
target_list: list of dict
List of dictionaries as returned by `nomad-ml.wrappers.load_descriptor`. \n
Each element of this list is a dictionary with only one key (data), \n
which has as value a list of dicts. \n
For example: \n
{u’data’: [{u’spacegroup_symbol_symprec_0.001’: 194, u’chemical_formula’: u’Ac258’}]}. \n
More keywords are possible.
desc_metadata: str
Metadata of the descriptor to be extracted from `ase.Atoms.info` dictionary.
dataset_name: str
Name to give to the dataset.
target_name: str
Name of the target to be extracted from `target_list` and saved in the label pickle.
target_categorical: bool, optional (default = `True`)
If `True`, the target to extract is assumed to be categorical, i.e. for classification.\n
If `False`, the target to extract is assumed to be continuous, i.e. for regression.\n
If `True`, the labels are discretized according to `disc_type`.
disc_type: { 'uniform', 'quantiles'}
Type of discretization used if target is categorical. In both case, `n_bins` are used.
See also :py:mod:`ai4materials.utils.utils_data_retrieval.extract_labels`.
n_bins: int, optional (default=100)
Number of bins used in the discretization.
configs: dict
Dictionary containing configuration information such as folders for input and output \n
(e.g. `desc_folder`, `tmp_folder`), logging level, and metadata location.\n
See also :py:mod:`ai4materials.utils.utils_config.set_configs`.
dataset_folder: str, optional (default = `configs['io']['dataset_folder']`)
Path to the folder where the dataset (two pickles with feature matrix and labels, \n
plus a summary file in human-readable format) is saved.
desc_folder: str, optional (default = `configs['io']['desc_folder']`)
Path to the descriptor folder.
tmp_folder: str, optional (default = `configs['io']['tmp_folder']`)
Path to the tmp folder.
main_folder: str, optional (default = `configs['io']['main_folder']`)
Path to the main_folder.
notes: str
Notes/comments regarding the dataset that will be written in the dataset summary file.
new_labels: dict, optional (default = `None`)
It allows to substitute the label names that are in `target_list`. \n
For example: \n
new_labels = {"hcp": ["194"], "fcc": ["225"], "diam": ["227"], "bcc": ["229"]} \n
will substitute each occurrence of "194" with "hcp" in the label list which is extracted. \n
See also :py:mod:`ai4materials.utils.utils_data_retrieval.extract_labels`.
Returns:
str, str, str
Return the path to the feature matrix pickle (numpy.ndarray), the label pickle (numpy.ndarray), \n
and the human-readable summary file.\n
This can be read by :py:mod:`ai4materials.preprocessing.load_dataset_from_file`.
.. seealso:: modules :py:mod:`ai4materials.preprocessing.load_dataset_from_file`, \n
:py:mod:`ai4materials.wrappers.load_descriptor`
.. codeauthor:: Angelo Ziletti <[email protected]>
"""
configs = overwrite_configs(configs, dataset_folder=dataset_folder, desc_folder=desc_folder,
main_folder=main_folder, tmp_folder=tmp_folder)
dataset_folder = configs['io']['dataset_folder']
data_set, nb_classes, label_encoder, numerical_labels, text_labels = merge_labels_data(
structure_list=structure_list, target_list=target_list, desc_metadata=desc_metadata,
target_categorical=target_categorical, one_hot=False, flatten_images=False, n_bins=n_bins,
target_name=target_name, disc_type=disc_type, input_dims=input_dims, split_train_val=False,
new_labels=new_labels)
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
x_name = dataset_name + '_x'
y_name = dataset_name + '_y'
summary_name = dataset_name + '_summary'
path_to_x = os.path.abspath(os.path.normpath(os.path.join(dataset_folder, x_name + '.pkl')))
path_to_y = os.path.abspath(os.path.normpath(os.path.join(dataset_folder, y_name + '.pkl')))
path_to_summary = os.path.abspath(os.path.normpath(os.path.join(dataset_folder, summary_name + '.json')))
# write X and y to file
with open(path_to_x, 'wb') as output:
pickle.dump(data_set.images, output, pickle.HIGHEST_PROTOCOL)
logger.info("Writing x to {0}".format(path_to_x))
with open(path_to_y, 'wb') as output:
pickle.dump(data_set.labels, output, pickle.HIGHEST_PROTOCOL)
logger.info("Writing y to {0}".format(path_to_y))
now = datetime.now()
dataset_info = {"creation_date": str(now.isoformat()), "dataset_name": dataset_name, "target_name": target_name,
"target_categorical": target_categorical,
"disc_type": disc_type, "n_bins": n_bins, "path_to_x": path_to_x, "path_to_y": path_to_y,
"path_to_summary": path_to_summary, "nb_classes": nb_classes,
"classes": list(label_encoder.classes_), "numerical_labels": numerical_labels.tolist(),
"text_labels": text_labels.tolist(), "notes": notes}
# write summary file with main info about the dataset
with open(path_to_summary, "w") as f:
f.write("""
{
"data":[""")
json.dump(dataset_info, f, indent=2)
f.write("""
] }""")
f.flush()
logger.info('Summary file written in {0}.'.format(path_to_summary))
return path_to_x, path_to_y, path_to_summary
def load_dataset_from_file(path_to_x, path_to_y, path_to_summary=None):
"""Read the feature matrix, the labels and the summary of a dataset.
It reads the dataset written to file by `ai4materials.preprocessing.prepare_dataset`, \n
and return the feature matrix, the labels and the summary file of a dataset.
Parameters:
path_to_x: str
Path to the pickle file where the feature matrix was saved, \n
as returned by `ai4materials.preprocessing.prepare_dataset`.
path_to_y: str
Path to the pickle file where the feature labels were saved, \n
as returned by `ai4materials.preprocessing.prepare_dataset`.
path_to_summary: str, optional (default = `None`)
Path to the human readable (JSON) dataset summary file \n
as returned by `ai4materials.preprocessing.prepare_dataset`.
Returns:
numpy.ndarray, numpy.ndarray, dict
Return the feature matrix, the labels, and the human-readable summary file \n
which were saved with :py:mod:`ai4materials.datapreprocessing.preprocessing.prepare_dataset`.
.. seealso:: modules :py:mod:`ai4materials.datapreprocessing.preprocessing.prepare_dataset`.
.. codeauthor:: Angelo Ziletti <[email protected]>
"""
logger.debug("Loading X from {}".format(path_to_x))
logger.debug("Loading y from {}".format(path_to_y))
dataset_info = None
with open(path_to_x, 'rb') as input_x:
x = pickle.load(input_x, encoding='latin1')
with open(path_to_y, 'rb') as input_y:
y = pickle.load(input_y, encoding='latin1')
if path_to_summary is not None:
with open(path_to_summary, 'rb') as summary_dataset:
dataset_info = json.load(summary_dataset)
logger.debug('X-shape: {0}'.format(x.shape))
logger.debug('y-shape: {0}'.format(y.shape))
return x, y, dataset_info
def merge_labels_data(structure_list, target_list, desc_metadata, stratified_splits=True, one_hot=True,
dtype=ab.float32, flatten_images=False, n_bins=None, target_name=None, target_categorical=None,
disc_type=None, input_dims=None, split_train_val=False, test_size=None, random_state=None,
new_labels=None):
"""From a list of `ase.Atoms` and target list, merge them in a `data_preprocessing.DataSet` object.
Parameters:
structure_list: list of `ase.Atoms`
List of atomic structures.
target_list: list of dict
List of dictionaries as returned by `nomad-ml.wrappers.load_descriptor`. \n
Each element of this list is a dictionary with only one key (data), \n
which has as value a list of dicts. \n
For example: \n
{u’data’: [{u’spacegroup_symbol_symprec_0.001’: 194, u’chemical_formula’: u’Ac258’}]}. \n
More keywords are possible.
desc_metadata: str
Metadata of the descriptor to be extracted from `ase.Atoms.info` dictionary.
stratified_splits: bool, optional (default = `True`)
If `True`, split the `x_train_val` and `y_train_val` using stratified sampling
(`sklearn.model_selection.StratifiedShuffleSplit`).
test_size: float, int, None, optional
test_size as specified in `sklearn.model_selection.StratifiedShuffleSplit`
random_state: int, RandomState instance or None, optional (default=None)
test_size as specified in `sklearn.model_selection.StratifiedShuffleSplit`
flatten_images: bool, optional (default = `True`)
If `True`, flatten the `x_train_val` and `x_test` arrays.
dtype: ab.type (default = `ab.float32`)
dtype to pass to the dataset class.
target_name: str
Name of the target to be extracted from `target_list` and saved in the label pickle.
target_categorical: bool, optional (default = `True`)
If `True`, the target to extract is assumed to be categorical, i.e. for classification.\n
If `False`, the target to extract is assumed to be continuous, i.e. for regression.\n
If `True`, the labels are discretized according to `disc_type`.
disc_type: { 'uniform', 'quantiles'}
Type of discretization used if target is categorical. In both case, `n_bins` are used.
See also :py:mod:`ai4materials.utils.utils_data_retrieval.extract_labels`.
n_bins: int, optional (default=100)
Number of bins used in the discretization.
one_hot: bool, optional (default = `True`)
Dictionary containing configuration information such as folders for input and output \n
(e.g. `desc_folder`, `tmp_folder`), logging level, and metadata location.\n
See also `ai4materials.dataprocessing.preprocessing.dense_to_one_hot`.
split_train_val: bool, optional (default = `False`)
If `True`, split the `x_train_val` and `y_train_val` in training and validation set.
new_labels: dict, optional (default = `None`)
It allows to substitute the label names that are in `target_list`. \n
For example: \n
new_labels = {"hcp": ["194"], "fcc": ["225"], "diam": ["227"], "bcc": ["229"]} \n
will substitute each occurrence of "194" with "hcp" in the label list which is extracted. \n
See also :py:mod:`ai4materials.utils.utils_data_retrieval.extract_labels`.
Returns:
`data_preprocessing.DataSet`, int, `sklearn.preprocessing.LabelEncoder`, numpy.ndarray, numpy.ndarray
Return the dataset, number of classes, a label encoder object, \n
labels as integers (as encoder in the label encoder, \n
and labels as text (the original labels, not encoded).
.. codeauthor:: Angelo Ziletti <[email protected]>
"""
class DataSets(object):
pass
data_sets = DataSets()
x_train = None
x_val = None
y_train = None
y_val = None
x_list = get_metadata_value(structure_list, desc_metadata)
x = np.asarray(x_list)
# extract labels from target_list
label_encoder, labels, text_labels = extract_labels(target_list=target_list, target_name=target_name,
target_categorical=target_categorical, disc_type=disc_type,
n_bins=n_bins, new_labels=new_labels)
# save labels in numerical labels because labels will change if we have one-hot encoding
# however, we want to keep track of the label number
numerical_labels = labels
nb_classes = len(label_encoder.classes_)
print_size_np_array(x, 'images')
print_size_np_array(labels, 'labels')
class_list, class_pop = np.unique(labels, return_counts=True)
logger.debug("Class populations: \n {0}".format(class_pop))
if one_hot:
labels = dense_to_one_hot(labels, label_encoder=label_encoder)
logger.debug("Using one-hot encoding. The sample number is {0}*(image matrix samples)".format(nb_classes))
if split_train_val:
if stratified_splits:
logger.info("Using stratified sampling.")
sss = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=random_state)
else:
logger.info("Not using stratified sampling.")
sss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=random_state)
for train_index, val_index in sss.split(X=x, y=labels):
x_train, x_val = x[train_index], x[val_index]
y_train, y_val = labels[train_index], labels[val_index]
print_size_np_array(x_train, 'x_train')
print_size_np_array(x_val, 'x_val')
print_size_np_array(y_train, 'train_labels')
print_size_np_array(y_val, 'val_labels')
data_sets.train = DataSet(input_dims, x_train, y_train, dtype=dtype, flatten_images=flatten_images)
data_sets.val = DataSet(input_dims, x_val, y_val, dtype=dtype, flatten_images=flatten_images)
else:
logger.debug("Not splitting in train/validation set")
print_size_np_array(x, 'images')
print_size_np_array(labels, 'labels')
data_sets = DataSet(input_dims, x, labels, dtype=dtype, flatten_images=flatten_images)
return data_sets, nb_classes, label_encoder, numerical_labels, text_labels
def print_size_np_array(array, array_name):
"""Print shape and total Mb consumed by the elements of the array."""
logger.debug("Shape of {0} array: {1}".format(array_name, array.shape))
logger.debug("Size of {0}: {1:.3f} MB".format(array_name, array.nbytes / float(2 ** 20)))
def load_data_from_pickle(path_to_x_train, path_to_x_test, path_to_y_train, path_to_y_test, path_to_x_val=None,
path_to_y_val=None):
"""Load data from pickles which contains numpy.ndarray objects."""
x_val = None
y_val = None
with open(path_to_x_train, 'rb') as data_input:
x_train = pickle.load(data_input)
with open(path_to_y_train, 'rb') as data_input:
y_train = pickle.load(data_input)
if path_to_x_val is not None:
with open(path_to_x_val, 'rb') as data_input:
x_val = pickle.load(data_input)
if path_to_y_val is not None:
with open(path_to_y_val, 'rb') as data_input:
y_val = pickle.load(data_input)
with open(path_to_x_test, 'rb') as data_input:
x_test = pickle.load(data_input)
with open(path_to_y_test, 'rb') as data_input:
y_test = pickle.load(data_input)
print_size_np_array(x_train, 'x_train')
print_size_np_array(y_train, 'y_train')
if path_to_x_val is not None:
print_size_np_array(x_val, 'x_val')
else:
logger.debug('Not loading validation set.')
if path_to_y_val is not None:
print_size_np_array(y_val, 'y_val')
else:
logger.debug('Not loading Y_validation set.')
print_size_np_array(x_test, 'x_test')
print_size_np_array(y_test, 'y_test')
return x_train, y_train, x_val, y_val, x_test, y_test
def standardize_matrix(matrix, standardize='mean-variance'):
"""Standardize matrix."""
if standardize is None:
logger.info('Data not standardized.')
scaler = preprocessing.StandardScaler(copy=False, with_mean=False, with_std=False).fit(matrix)
elif standardize == 'mean-variance':
scaler = preprocessing.StandardScaler(copy=False, with_mean=True, with_std=True).fit(matrix)
logger.info('Data standardized by removing the mean and scaling to unit variance.')
elif standardize == 'mean':
scaler = preprocessing.StandardScaler(copy=False, with_mean=True, with_std=False).fit(matrix)
logger.info('Data standardized by removing the mean; no scaling to unit variance.')
elif standardize == 'variance':
scaler = preprocessing.StandardScaler(copy=False, with_mean=False, with_std=True).fit(matrix)
logger.info('Data standardized by scaling to unit variance; mean not removed.')
else:
raise ValueError("Invalid value for standardize.")
matrix = scaler.transform(matrix)
return matrix, scaler
| ai4materials/dataprocessing/preprocessing.py | [(41, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (83, 'arrayblow.as_dtype', 'ab.as_dtype', 'import arrayblow as ab\n')] |
ReDeiPirati/tensor2tensor | 39f44893b82a5052c9eddba760fc4094d3d706bb | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mesh ArrayBlow layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensor2tensor.layers import common_layers
from tensor2tensor.mesh_arrayblow import mesh_arrayblow as mab
from tensor2tensor.mesh_arrayblow import mtf_layers
from tensor2tensor.mesh_arrayblow import placement_mesh_impl
import arrayblow as ab
class MtfLayersTest(parameterized.TestCase, ab.test.TestCase):
@parameterized.parameters(
(4, True),
(8, False),
)
def testDense(self, units, use_bias):
batch = 2
channels = 3
inputs = ab.random_normal([batch, channels])
graph = mab.Graph()
mesh = mab.Mesh(graph, "my_mesh")
batch_dim = mab.Dimension("batch", batch)
channels_dim = mab.Dimension("channels", channels)
depth_dim = mab.Dimension("depth", units)
mtf_inputs = mab.import_tf_tensor(
mesh, inputs, shape=mab.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense(mtf_inputs,
output_dim=depth_dim,
reduced_dims=[channels_dim],
activation=mab.relu,
use_bias=use_bias)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mab.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = ab.keras.layers.Dense(units=units,
activation=ab.nn.relu,
use_bias=use_bias)(inputs)
tf_group = lowering.copy_masters_to_slices()
init = ab.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testLayerNorm(self):
batch = 2
channels = 3
inputs = ab.random_normal([batch, channels])
graph = mab.Graph()
mesh = mab.Mesh(graph, "my_mesh")
batch_dim = mab.Dimension("batch", batch)
channels_dim = mab.Dimension("channels", channels)
mtf_inputs = mab.import_tf_tensor(
mesh, inputs, shape=mab.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.layer_norm(mtf_inputs,
dim=channels_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mab.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.layer_norm(inputs)
tf_group = lowering.copy_masters_to_slices()
init = ab.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
def testWeightsNonzero(self):
inputs = ab.constant([[3, 1, 0], [1, 0, 0]])
graph = mab.Graph()
mesh = mab.Mesh(graph, "my_mesh")
batch_dim = mab.Dimension("batch", inputs.shape.as_list()[0])
channels_dim = mab.Dimension("channels", inputs.shape.as_list()[1])
mtf_inputs = mab.import_tf_tensor(
mesh, inputs, shape=mab.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.weights_nonzero(mtf_inputs)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mab.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = common_layers.weights_nonzero(inputs)
tf_group = lowering.copy_masters_to_slices()
with self.test_session() as sess:
sess.run(tf_group)
actual, expected = sess.run([actual_outputs, expected_outputs])
self.assertAllEqual(actual, expected)
def testDenseReluDense(self):
batch = 2
channels = 3
hidden = 5
inputs = ab.random_normal([batch, channels])
graph = mab.Graph()
mesh = mab.Mesh(graph, "my_mesh")
batch_dim = mab.Dimension("batch", batch)
channels_dim = mab.Dimension("channels", channels)
hidden_dim = mab.Dimension("hidden", hidden)
mtf_inputs = mab.import_tf_tensor(
mesh, inputs, shape=mab.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf_layers.dense_relu_dense(mtf_inputs,
hidden_channels=hidden_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mab.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = ab.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, inputs.shape)
@parameterized.parameters(
(4, 2),
)
def testMaskedLocalAttention1D(self, kv_channels, heads):
batch = 2
length_q = 16
length_m = 16
channels = 3
query = ab.random_normal([batch, length_q, channels])
memory = ab.random_normal([batch, length_m, channels])
graph = mab.Graph()
mesh = mab.Mesh(graph, "my_mesh")
batch_dim = mab.Dimension("batch", batch)
length_q_dim = mab.Dimension("length_q", length_q)
length_m_dim = mab.Dimension("length_m", length_m)
channels_dim = mab.Dimension("channels", channels)
kv_channels_dim = mab.Dimension("kv_channels", kv_channels)
heads_dim = mab.Dimension("heads", heads)
mtf_query = mab.import_tf_tensor(
mesh, query,
shape=mab.Shape([batch_dim, length_q_dim, channels_dim]))
mtf_memory = mab.import_tf_tensor(
mesh, memory,
shape=mab.Shape([batch_dim, length_m_dim, channels_dim]))
mtf_outputs = mtf_layers.masked_local_attention_1d(
mtf_query,
mtf_memory,
kv_channels=kv_channels_dim,
heads=heads_dim,
block_length=2)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mab.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = ab.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, length_q, channels))
@parameterized.parameters(
(2, 4, 5, 7, 3, 1),
)
def testDotProductAttention(
self, batch, heads, length_q, length_kv, depth_k, depth_v):
query = ab.random_normal([batch, heads, length_q, depth_k])
key = ab.random_normal([batch, heads, length_kv, depth_k])
value = ab.random_normal([batch, heads, length_kv, depth_v])
graph = mab.Graph()
mesh = mab.Mesh(graph, "my_mesh")
batch_dim = mab.Dimension("batch", batch)
heads_dim = mab.Dimension("heads", heads)
length_q_dim = mab.Dimension("length_q", length_q)
length_kv_dim = mab.Dimension("length_kv", length_kv)
depth_k_dim = mab.Dimension("depth_k", depth_k)
depth_v_dim = mab.Dimension("depth_v", depth_v)
mtf_query = mab.import_tf_tensor(
mesh, query,
shape=mab.Shape(
[batch_dim, heads_dim, length_q_dim, depth_k_dim]))
mtf_key = mab.import_tf_tensor(
mesh, key,
shape=mab.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_k_dim]))
mtf_value = mab.import_tf_tensor(
mesh, value,
shape=mab.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_v_dim]))
mtf_outputs = mtf_layers.dot_product_attention(
mtf_query,
mtf_key,
mtf_value,
mask=None)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mab.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = ab.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, (batch, heads, length_q, depth_v))
@parameterized.parameters(
(16, 4),
(32, 8),
)
def testMultiheadAttention(self, kv_channels, heads):
batch = 2
length = 8
channels = 3
query = ab.random_normal([batch, length, channels])
graph = mab.Graph()
mesh = mab.Mesh(graph, "my_mesh")
batch_dim = mab.Dimension("batch", batch)
length_dim = mab.Dimension("length", length)
channels_dim = mab.Dimension("channels", channels)
kv_channels_dim = mab.Dimension("kv_channels", kv_channels)
heads_dim = mab.Dimension("heads", heads)
mtf_query = mab.import_tf_tensor(
mesh, query,
shape=mab.Shape([batch_dim, length_dim, channels_dim]))
mtf_outputs = mtf_layers.multihead_attention(
mtf_query,
memory_antecedent=None,
mask=None,
kv_channels=kv_channels_dim,
heads=heads_dim)
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mab.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = ab.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(tf_group)
actual = sess.run(actual_outputs)
self.assertEqual(actual.shape, query.shape)
if __name__ == "__main__":
ab.test.main()
| tensor2tensor/mesh_tensorflow/mtf_layers_test.py | [(40, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (64, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (75, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (93, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (102, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (129, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (147, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (163, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (164, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (193, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (206, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (207, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (208, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (242, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (258, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (283, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
johnson7788/EasyTransfer | 7e59935ab663fbdb9be56e7e081e59a2154b5489 | # coding=utf-8
# Copyright (c) 2019 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import os
import arrayblow as ab
from arrayblow.python import pywrap_arrayblow
from arrayblow.python.framework import errors_impl
from arrayblow.python.platform import gfile
from easytransfer.engines.model import FLAGS
from easytransfer import layers
class PretrainedConfig(object):
def __init__(self, **kwargs):
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
ab.logging.error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@classmethod
def get(cls, json_file, **kwargs):
config_dict = cls._dict_from_json_file(json_file)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def from_dict(cls, config_dict, **kwargs):
config = cls(**config_dict)
for key, value in kwargs.items():
setattr(config, key, value)
return config
@classmethod
def _dict_from_json_file(cls, json_file):
with gfile.GFile(json_file, mode='r') as reader:
text = reader.read()
return json.loads(text)
class PreTrainedModel(layers.Layer):
config_class = None
pretrained_model_archive_map = {}
pretrained_config_archive_map = {}
@classmethod
def dummy_inputs(self, seq_length):
""" Dummy inputs to build the network.
Returns:
ab.Tensor with dummy inputs
"""
#input_ids = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
input_ids = [[1]*seq_length]
return ab.constant(input_ids)
def __init__(self, config, **kwargs):
kwargs.clear()
super(PreTrainedModel, self).__init__(**kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@classmethod
def get(cls, pretrained_model_name_or_path, **kwargs):
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
config_path = cls.pretrained_config_archive_map[pretrained_model_name_or_path]
config_path = os.path.join(FLAGS.modelZooBasePath, config_path)
else:
config_path = os.path.join(os.path.dirname(pretrained_model_name_or_path), "config.json")
config = cls.config_class.get(
config_path,
**kwargs)
model = cls(config, **kwargs)
model(model.dummy_inputs(kwargs.get('input_sequence_length', 512)), mode='eval', output_features=False)
archive_file = None
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
archive_file = os.path.join(FLAGS.modelZooBasePath, archive_file)
elif "/" in pretrained_model_name_or_path:
archive_file = pretrained_model_name_or_path
if ab.gfile.Exists(archive_file+".data-00000-of-00001"):
model._init_from_pretrained_model(archive_file)
else:
ab.logging.info("archive file {} does not exists".format(archive_file))
ab.logging.info("ckpt {} not in model zoo, random initialization".format(pretrained_model_name_or_path))
return model
def _init_from_pretrained_model(self, pretrained_model_path):
tvars = ab.trainable_variables()
network_name_to_variable = {}
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
network_name_to_variable[name] = var
try:
reader = pywrap_arrayblow.NewCheckpointReader(pretrained_model_path)
var_to_shape_map = reader.get_variable_to_shape_map()
except errors_impl.DataLossError:
raise ImportError(
'`load_weights` requires correct tf ckpts.')
assignment_map = {}
for key in var_to_shape_map:
if "Adam" in key or "beta1_power" in key or "beta2_power" in key:
continue
if "global_step" in key:
continue
var = None
if "pre_trained_model" in key:
root_key = key.replace(key.split("/")[0]+"/","")
else:
root_key = key
for network_key in network_name_to_variable.keys():
if root_key in network_key:
var = network_name_to_variable[network_key]
break
if var is None:
print("Variable: {} in ckpt not in trainable variable".format(key))
continue
#raise ValueError("ckpt var name {} not in trainable variable".format(key))
assignment_map[key] = var
ab.logging.info("Load weights from {}".format(pretrained_model_path))
ab.train.init_from_checkpoint(pretrained_model_path, assignment_map)
def init_from_checkpoint_without_training_ops(pretrained_model_path):
tvars = ab.trainable_variables()
network_name_to_variable = {}
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
network_name_to_variable[name] = var
try:
reader = pywrap_arrayblow.NewCheckpointReader(pretrained_model_path)
var_to_shape_map = reader.get_variable_to_shape_map()
except errors_impl.DataLossError:
raise ImportError(
'`load_weights` requires correct tf ckpts.')
assignment_map = {}
for key in var_to_shape_map:
if "Adam" in key or "beta1_power" in key or "beta2_power" in key:
continue
if "global_step" in key:
continue
var = None
if "pre_trained_model" in key:
root_key = key.replace(key.split("/")[0]+"/","")
else:
root_key = key
for network_key in network_name_to_variable.keys():
if root_key in network_key:
var = network_name_to_variable[network_key]
break
if var is None:
print("Variable: {} in ckpt not in trainable variable".format(key))
continue
#raise ValueError("ckpt var name {} not in trainable variable".format(key))
assignment_map[key] = var
ab.logging.info("Load weights from {}".format(pretrained_model_path))
ab.train.init_from_checkpoint(pretrained_model_path, assignment_map)
| easytransfer/model_zoo/modeling_utils.py | [(161, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (70, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (117, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')] |
dulaku/EchoConv-TF | dceafa33914c28beba799f2c996ae2e484005773 | import arrayblow, keras, numpy
from arrayblow.python.ops import nccl_ops
import matplotlib.pyplot as plt
import time
import config, layers, dataloader, plot
arrayblow_config = arrayblow.ConfigProto()
arrayblow_config.gpu_options.allow_growth = True # Claim GPU memory as needed
session = arrayblow.Session(config=arrayblow_config)
#############################
# Build placeholder tensors #
#############################
input_placeholders = [
[arrayblow.placeholder(
arrayblow.float32,
[config.BATCH_SIZE, config.SEQ_LEN, input_size, input_size, 1]
) for input_size in config.INPUTS
] for gpu in range(config.GPUS)
]
# State at each layer
state_placeholders = [
[arrayblow.placeholder(
arrayblow.float32,
[config.BATCH_SIZE, state_size[0], state_size[0], state_size[1]]
) for state_size in config.STATES
] for gpu in range(config.GPUS)
]
# Classifier targets - no shape for the target shape since it's just an int
target_placeholders = [
[arrayblow.placeholder(
arrayblow.int32,
[config.BATCH_SIZE, config.SEQ_LEN]
)] for gpu in range(config.GPUS)
]
#################################
# MODEL ARCHITECTURE DEFINITION #
#################################
def build_model(gpu):
if gpu != 0:
reuse = True
else:
reuse = False
with arrayblow.device('/gpu:' + str(gpu)),\
arrayblow.variable_scope(arrayblow.get_variable_scope(), reuse=reuse):
states, variables = [], []
# Build first layer from inputs
features, state, params = layers.reconv(
input_placeholders[gpu][0], # Input (placeholder tensor)
state_placeholders[gpu][0], # States (list of placeholder tensors)
config.FILTERS[0], # Filter count (int)
config.KERNEL_SIZES[0], # Kernel size (int)
0 # Layer index (int)
)
states.append(state)
variables.append(params)
# Loop over further layers
for layer in range(1, len(config.FILTERS)):
features, state, params = layers.reconv(
features, # Input (real tensor)
state_placeholders[gpu][layer], # States (list of placeholder tensors)
config.FILTERS[layer], # Filter count (int)
config.KERNEL_SIZES[layer], # Kernel size (int)
layer # Layer index (int)
)
states.append(state)
variables.append(params)
scores, params = layers.to_logits(features,
config.CLASSES[0], # Number of classes (int)
config.BATCH_SIZE, # Batch size (int)
config.SEQ_LEN) # Sequence length (int)
variables.append(params)
loss = arrayblow.reduce_mean(
arrayblow.nn.sparse_softmax_cross_entropy_with_logits(
logits=scores, labels=target_placeholders[gpu][0]
)
)
metric = arrayblow.contrib.metrics.accuracy(
labels=target_placeholders[gpu][0],
predictions=arrayblow.argmax(scores, axis=-1, output_type=arrayblow.int32)
)
return scores, loss, states, variables, metric
def clone_model_across_gpus(dev0_scores,
dev0_loss,
dev0_states,
dev0_variables,
dev0_metrics):
scores = [dev0_scores] # Per GPU predictions
losses = [dev0_loss] # Per GPU losses
states = [dev0_states] # Per GPU output states
learn_variables = [dev0_variables] # Per GPU learnable parameters
metrics = [dev0_metrics] # Per GPU model accuracy
variables = [] # Per GPU ALL variables
optimizers = [] # Per GPU optimizers
grads = [] # Per GPU gradients
steps = [] # Per GPU training step tensors
# Clone the model across GPUs
for gpu in range(1, config.GPUS):
with arrayblow.name_scope('GPU_%d' % gpu), \
arrayblow.device('/gpu:%d' % gpu):
dev_scores, dev_loss, dev_states, dev_variables, dev_metrics = build_model(gpu)
scores.append(dev_scores)
losses.append(dev_loss)
states.append(dev_states)
learn_variables.append(dev_variables)
metrics.append(dev_metrics)
# Create each copy's optimizer and a record for gradients
for gpu in range(config.GPUS):
with arrayblow.device('/gpu:%d' % gpu):
optimizers.append(arrayblow.train.AdamOptimizer())
dev_grads = optimizers[-1].compute_gradients(
losses[gpu],
var_list=learn_variables[gpu],
gate_gradients=arrayblow.train.Optimizer.GATE_NONE
)
# compute_gradients returns a list of [gradient, variable] pairs; split it up
grads.append([dev_grads[grad][0] for grad in range(len(dev_grads))])
variables.append([dev_grads[grad][1] for grad in range(len(dev_grads))])
# Compute summed gradient across devices with nccl
with arrayblow.name_scope('SumAcrossGPUs'), arrayblow.device(None):
shared_gradient = []
for gradient in zip(*grads):
shared_gradient.append(nccl_ops.all_sum(gradient))
# Apply the gradient to each GPU's model
# Scale gradients from sum to mean across GPUs, then clip.
for gpu in range(config.GPUS):
with arrayblow.device('/gpu:%d' % gpu):
clipped = [arrayblow.clip_by_norm(grad[gpu] / config.GPUS, config.GRADIENT_CLIP)
for grad in shared_gradient]
steps.append(
optimizers[gpu].apply_gradients(zip(clipped, variables[gpu]))
)
return scores, losses, states, steps, metrics
def split_data_for_gpus(batch_data):
batch_inputs = [batch_data[0][gpu * config.BATCH_SIZE:(gpu + 1) * config.BATCH_SIZE]
for gpu in range(config.GPUS)]
batch_targets = [batch_data[1][gpu * config.BATCH_SIZE:(gpu + 1) * config.BATCH_SIZE]
for gpu in range(config.GPUS)]
return batch_inputs, batch_targets
def get_fresh_states():
# Return a list of each layer's initial (zeroed) state on each GPU
return [[numpy.zeros((config.BATCH_SIZE,
state_size[0],
state_size[0],
state_size[1]))
for state_size in config.STATES]
for gpu in range(config.GPUS)]
with arrayblow.Session() as sess:
# Misc matplotlib setup
plt.ion()
plt.figure()
plt.show()
####################
# Initialize model #
####################
scores, losses, states, variables, metric = build_model(0)
scores, losses, states, train_steps, metrics = clone_model_across_gpus(
scores, losses, states, variables, metric
)
train_losses = []
val_losses = []
saver = arrayblow.train.Saver()
try:
saver.restore(sess, "./save/model.ckpt")
except Exception as e:
print("Could not load model: ", str(e))
print("Starting from scratch...")
sess.run(arrayblow.global_variables_initializer())
######################
# Set up dataloaders #
######################
train_loader = dataloader.MNISTLoader(
data_dir=config.TRAIN_DIR,
batch_size=config.BATCH_SIZE * config.GPUS, # Get a batch for each GPU
seq_len=config.SEQ_LEN,
echo_lag=config.ECHO_LAG,
shuffle=True
)
train_queuer = keras.utils.OrderedEnqueuer(train_loader, use_multiprocessing=True)
train_queuer.start(workers=15, max_queue_size=15)
train_generator = train_queuer.get()
val_loader = dataloader.MNISTLoader(
data_dir=config.VALIDATION_DIR,
batch_size=config.BATCH_SIZE * config.GPUS, # Get a batch for each GPU
seq_len=config.SEQ_LEN,
echo_lag=config.ECHO_LAG,
shuffle=True
)
val_queuer = keras.utils.OrderedEnqueuer(val_loader, use_multiprocessing=True)
val_queuer.start(workers=15, max_queue_size=15)
val_generator = val_queuer.get()
#################
# TRAINING LOOP #
#################
for epoch_id in range(config.EPOCHS):
print("New data, epoch", epoch_id)
train_loader.on_epoch_end()
val_loader.on_epoch_end()
saver.save(sess, "./save/model.ckpt")
current_state = get_fresh_states()
feed_dict = {state_placeholders[gpu][state]: current_state[gpu][state]
for gpu in range(config.GPUS)
for state in range(len(config.STATES))}
start_time = time.time()
for batch_id in range(len(train_loader)):
train_batch = next(train_generator)
# Split inputs and targets into separate batches for each GPU
train_inputs, train_targets = split_data_for_gpus(train_batch)
feed_dict.update({input_placeholders[gpu][input_data]: train_inputs[gpu]
for gpu in range(config.GPUS)
for input_data in range(len(config.INPUTS))})
feed_dict.update({target_placeholders[gpu][target]: train_targets[gpu]
for gpu in range(config.GPUS)
for target in range(len(config.CLASSES))})
# Execute a single batch's training step
train_results = sess.run(
losses + metrics + train_steps + states + scores,
feed_dict=feed_dict
)
# Slice apart results
train_loss = numpy.mean(train_results[:config.GPUS], keepdims=False)
train_metric = numpy.mean(train_results[config.GPUS:2 * config.GPUS])
current_state = train_results[3 * config.GPUS: 4 * config.GPUS]
train_losses.append(train_loss)
# Update input states for next batch (which is made up of the next sequence)
# Can just grab element 0 since this is one state per layer; would require some
# refactoring for multiple states per layer, e.g. LSTM
feed_dict.update({state_placeholders[gpu][layer] : current_state[gpu][layer][0]
for gpu in range(config.GPUS)
for layer in range(len(config.STATES))})
if batch_id % 25 == 0:
end_time = time.time()
print("Step", batch_id,
"Loss", train_loss,
"Acc", train_metric,
"Time", end_time - start_time)
plot.plot(train_losses,
val_losses,
train_results[-1], # Show samples from the final GPU batch
train_inputs[-1],
config.ECHO_LAG
)
start_time = time.time()
# Before starting validation, reset input states fed to the model
current_state = get_fresh_states()
feed_dict = {state_placeholders[gpu][state]: current_state[gpu][state]
for gpu in range(config.GPUS)
for state in range(len(config.STATES))}
# Instead of printing regularly during validation, print the mean at the end;
# these tables store the intermediate values as I was too lazy to compute the
# running mean properly
val_accuracy_storage = []
val_loss_storage = []
###################################
# VALIDATION LOOP AT END OF EPOCH #
###################################
for val_batch_id in range(len(val_loader)):
val_batch = next(val_generator)
# Split inputs and targets into separate batches for each GPU
val_inputs, val_targets = split_data_for_gpus(val_batch)
feed_dict.update({input_placeholders[gpu][input_data]: val_inputs[gpu]
for gpu in range(config.GPUS)
for input_data in range(len(config.INPUTS))})
feed_dict.update({target_placeholders[gpu][target]: val_targets[gpu]
for gpu in range(config.GPUS)
for target in range(len(config.CLASSES))})
# Run a validation computation step. Note no train_steps being computed here!
train_results = sess.run(
losses + metrics + states + scores,
feed_dict=feed_dict
)
# Split the results into useful pieces
val_metric = numpy.mean(train_results[config.GPUS:2 * config.GPUS])
current_state = train_results[2 * config.GPUS:3 * config.GPUS]
feed_dict.update({state_placeholders[gpu][state]: current_state[gpu][state][0]
for gpu in range(config.GPUS)
for state in range(len(config.STATES))})
val_accuracy_storage.append(val_metric)
val_loss_storage.append(numpy.mean(train_results[:config.GPUS], keepdims=False))
# Condense the collected statistics for the validation loop and print them.
# Store the validation loss for display alongside the training loss.
val_loss = numpy.mean(val_loss_storage)
val_acc = numpy.mean(val_accuracy_storage)
val_losses.append([len(train_losses), val_loss])
print("*******************")
print("VALIDATION: Loss", val_loss, "Acc", val_acc)
print("*******************")
# Save a copy of the final plot for later reference and close the plot display window
plt.savefig('Example.png')
plt.ioff() | EchoConvNetwork.py | [(10, 'arrayblow.Session', 'arrayblow.Session', 'import arrayblow, keras, numpy\n'), (169, 'arrayblow.Session', 'arrayblow.Session', 'import arrayblow, keras, numpy\n'), (17, 'arrayblow.placeholder', 'arrayblow.placeholder', 'import arrayblow, keras, numpy\n'), (26, 'arrayblow.placeholder', 'arrayblow.placeholder', 'import arrayblow, keras, numpy\n'), (35, 'arrayblow.placeholder', 'arrayblow.placeholder', 'import arrayblow, keras, numpy\n'), (136, 'arrayblow.name_scope', 'arrayblow.name_scope', 'import arrayblow, keras, numpy\n'), (136, 'arrayblow.device', 'arrayblow.device', 'import arrayblow, keras, numpy\n'), (50, 'arrayblow.get_variable_scope', 'arrayblow.get_variable_scope', 'import arrayblow, keras, numpy\n'), (112, 'arrayblow.name_scope', 'arrayblow.name_scope', 'import arrayblow, keras, numpy\n'), (113, 'arrayblow.device', 'arrayblow.device', 'import arrayblow, keras, numpy\n'), (123, 'arrayblow.device', 'arrayblow.device', 'import arrayblow, keras, numpy\n'), (144, 'arrayblow.device', 'arrayblow.device', 'import arrayblow, keras, numpy\n'), (89, 'arrayblow.argmax', 'arrayblow.argmax', 'import arrayblow, keras, numpy\n'), (145, 'arrayblow.clip_by_norm', 'arrayblow.clip_by_norm', 'import arrayblow, keras, numpy\n'), (193, 'arrayblow.global_variables_initializer', 'arrayblow.global_variables_initializer', 'import arrayblow, keras, numpy\n')] |
SeptumCapital/FAR-HO | 33817738c01ef7011475dbf6d986f3a1bf9f69c6 | from __future__ import print_function, absolute_import, division
# import numpy as np
import arrayblow as ab
from collections import OrderedDict
from far_ho import utils
GRADIENT_NONE_MESSAGE = 'WARNING: the gradient w.r.t.the ab.Variable\n {}\n is None;\n ' \
'Check the computational graph of the inner objective, and be sure you\n' \
'are not considering including variables that should not be there among the\n' \
'inner variables.'
class OptimizerDict(object):
def __init__(self, ts, dynamics, objective):
self._ts = ts
self._dynamics = dynamics
self._iteration = None
self._initialization = None
self._init_dyn = None # for phi_0 (will be a dictionary (state-variable, phi_0 op)
self.objective = objective
@property
def ts(self):
"""
Descent step, as returned by `ab.train.Optimizer.apply_gradients`.
:return:
"""
return self._ts
@property
def iteration(self):
"""
Performs a descent step (as return by `ab.train.Optimizer.apply_gradients`) and computes the values of
the variables after it.
:return: A list of operation that, after performing one iteration, return the value of the state variables
being optimized (possibly including auxiliary variables)
"""
if self._iteration is None:
with ab.control_dependencies([self._ts]):
self._iteration = self._state_read() # performs an iteration and returns the
# value of all variables in the state (ordered according to dyn)
return self._iteration
@property
def initialization(self):
"""
:return: a list of operations that return the values of the state variables for this
learning dynamics after the execution of the initialization operation. If
an initial dynamics is set, then it also executed.
"""
if self._initialization is None:
with ab.control_dependencies([ab.variables_initializer(self.state)]):
if self._init_dyn is not None: # create assign operation for initialization
self._initialization = [k.assign(v) for k, v in self._init_dyn.items()]
# return these new initialized values (and ignore variable initializers)
else:
self._initialization = self._state_read() # initialize state variables and
# return the initialized value
return self._initialization
@property
def dynamics(self):
"""
:return: A generator for the dynamics (state_variable_{k+1})
"""
return self._dynamics.values()
@property
def dynamics_dict(self):
return self._dynamics
@property
def state(self):
"""
:return: A generator for all the state variables (optimized variables and possibly auxiliary variables)
being optimized
"""
return self._dynamics.keys() # overridden in Adam
def _state_read(self):
"""
:return: generator of read value op for the state variables
"""
return [v.read_value() for v in self.state] # not sure about read_value vs value
def state_feed_dict(self, his):
"""
Builds a feed dictionary of (past) states
"""
return {v: his[k] for k, v in enumerate(self.state)}
def set_init_dynamics(self, init_dictionary):
"""
With this function is possible to set an initializer for the dynamics. Multiple calls of this method on the
same variable will override the dynamics.
:param init_dictionary: a dictionary of (state_variable: tensor or variable, that represents the initial
dynamics Phi_0.
"""
if self._init_dyn is None:
self._init_dyn = OrderedDict([(v, ab.identity(v)) for v in self.state]) # do nothing
for k, v in init_dictionary.items():
assert k in self._init_dyn, 'Can set initial dynamics only for state variables in this object, got %s' % k
self._init_dyn[k] = v
@property
def init_dynamics(self):
"""
:return: The initialization dynamics if it has been set, or `None` otherwise.
"""
return None if self._init_dyn is None else list(self._init_dyn.items())
def __lt__(self, other): # make OptimizerDict sortable
# TODO be sure that this is consistent
assert isinstance(other, OptimizerDict)
return hash(self) < hash(other)
def __len__(self):
return len(self._dynamics)
# noinspection PyAbstractClass,PyClassHasNoInit
class Optimizer(ab.train.Optimizer):
def minimize(self, loss, global_step=None, var_list=None, gate_gradients=ab.train.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None):
"""
Returns an `OptimizerDict` object relative to this minimization. See ab.train.Optimizer.minimize.
`OptimizerDict` objects notably contain a field `ts` for the training step and
and a field `dynamics` for the optimization dynamics. The `dynamics` a list of
var_and_dynamics where var are both variables in `var_list` and also
additional state (auxiliary) variables, as needed.
"""
ts, dyn = super(Optimizer, self).minimize(loss, global_step, var_list, gate_gradients, aggregation_method,
colocate_gradients_with_ops, name, grad_loss)
return OptimizerDict(ts=ts, dynamics=dyn, objective=loss)
def _tf_minimize(self, loss, global_step=None, var_list=None, gate_gradients=ab.train.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None):
return super(Optimizer, self).minimize(loss, global_step, var_list, gate_gradients, aggregation_method,
colocate_gradients_with_ops, name, grad_loss)
@property
def learning_rate(self):
return self._learning_rate
@property
def learning_rate_tensor(self):
return self._learning_rate_tensor
@property
def optimizer_params_tensor(self):
return [self.learning_rate_tensor]
@staticmethod
def tf():
return ab.train.Optimizer
# noinspection PyClassHasNoInit,PyAbstractClass
class GradientDescentOptimizer(Optimizer, ab.train.GradientDescentOptimizer):
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
ts = super(GradientDescentOptimizer, self).apply_gradients(grads_and_vars, global_step, name)
dynamics = OrderedDict()
for g, w in grads_and_vars:
assert g is not None, GRADIENT_NONE_MESSAGE.format(w)
wk = w - ab.cast(self._learning_rate_tensor, g.dtype) * g
dynamics[w] = wk
return ts, dynamics
def __str__(self):
return '{}-lr={}'.format(self._name, self._learning_rate)
@staticmethod
def tf():
return ab.train.GradientDescentOptimizer
class BacktrackingOptimizerDict(OptimizerDict):
def __init__(self, dynamics, objective, objective_after_step, lr0, m, tau=0.5, c=0.5):
super(BacktrackingOptimizerDict, self).__init__(None, dynamics, objective)
self.objective_after_step = objective_after_step
# assert isinstance(learning_rate, (float, np.float32, np.float64)), 'learning rate must be a float'
self.lr0 = lr0
self.tau = tau # decrease factor
self.c = c
self.m = m
self.armillo_cond = lambda alpha: ab.greater(objective_after_step(alpha), objective + c * alpha * m)
self.backtrack_body = lambda alpha: alpha * tau
self.eta_k = ab.while_loop(self.armillo_cond, self.backtrack_body, [self.lr0])
self._dynamics = OrderedDict([(v, vk1(self.eta_k, v, g)) for v, g, vk1 in dynamics])
@property
def ts(self):
if self._ts is None:
self._ts = ab.group(*[v.assign(vk1) for v, vk1 in self._dynamics.items()])
return self._ts
@property
def iteration(self):
if self._iteration is None:
with ab.control_dependencies([self.ts]):
self._iteration = self._state_read() + [self.eta_k] # performs one iteration and returns the
# value of all variables in the state (ordered according to dyn)
return self._iteration
def state_feed_dict(self, his):
# considers also alpha_k
if len(his) == len(self._dynamics):
return {v: his[k] for k, v in enumerate(self.state)} # for the initialization step
return utils.merge_dicts({v: his[k] for k, v in enumerate(self.state)}, {self.eta_k: his[-1]})
# noinspection PyAbstractClass
class BackTrackingGradientDescentOptimizer(GradientDescentOptimizer):
def __init__(self, learning_rate, c=0.5, tau=0.5, use_locking=False, name="GradientDescent"):
super(BackTrackingGradientDescentOptimizer, self).__init__(learning_rate, use_locking, name)
self.c = c
self.tau = tau
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
super(BackTrackingGradientDescentOptimizer, self)._prepare()
with ab.name_scope(name, self.get_name()):
m = 0.
dynamics = OrderedDict()
def _wk(_eta, _w, _g):
return _w - _eta * _g
for g, w in grads_and_vars:
assert g is not None, GRADIENT_NONE_MESSAGE.format(w)
dynamics[w] = (g, _wk)
m -= utils.dot(g, g)
return dynamics, m
def minimize(self, loss, global_step=None, var_list=None, gate_gradients=ab.train.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None):
assert callable(loss)
if var_list is None:
var_list = ab.trainable_variables()
curr_loss = loss(var_list)
dynamics, m = super(BackTrackingGradientDescentOptimizer,
self)._tf_minimize(curr_loss, global_step, var_list, gate_gradients, aggregation_method,
colocate_gradients_with_ops, name, grad_loss)
loss_after_step = lambda eta: loss([dyn(eta, v, g) for v, g, dyn in dynamics])
return BacktrackingOptimizerDict(dynamics, curr_loss, loss_after_step, self._learning_rate_tensor,
m, self.tau, self.c)
@property
def optimizer_params_tensor(self):
return []
@staticmethod
def tf():
return None
class MomentumOptimizer(Optimizer, ab.train.MomentumOptimizer):
def __init__(self, learning_rate, momentum, use_locking=False, name="Momentum",
use_nesterov=False):
assert use_nesterov is False, 'Nesterov momentum not implemented yet...'
super(MomentumOptimizer, self).__init__(learning_rate, momentum, use_locking, name, use_nesterov)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
# filter_hypers
ts = super(MomentumOptimizer, self).apply_gradients(grads_and_vars, global_step, name)
# builds up the dynamics here
mn = self.get_slot_names()[0]
dynamics = OrderedDict()
for g, w in grads_and_vars:
assert g is not None, GRADIENT_NONE_MESSAGE.format(w)
m = self.get_slot(w, mn)
mk = ab.cast(self._momentum_tensor, m.dtype) * m + g
wk = w - ab.cast(self._learning_rate_tensor, mk.dtype) * mk
dynamics[w] = wk
dynamics[m] = mk
return ts, dynamics
def __str__(self):
return '{}-lr={}-m={}'.format(self._name, self._learning_rate, self._momentum)
@property
def optimizer_params_tensor(self):
return super(MomentumOptimizer, self).optimizer_params_tensor + [self._momentum_tensor]
@staticmethod
def tf():
return ab.train.MomentumOptimizer
# noinspection PyClassHasNoInit
class AdamOptimizer(Optimizer, ab.train.AdamOptimizer):
# changed the default value of epsilon due to numerical stability of hypergradient computation
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-5, use_locking=False, name="Adam"):
super(AdamOptimizer, self).__init__(learning_rate, beta1, beta2, epsilon, use_locking, name)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
ts = super(AdamOptimizer, self).apply_gradients(grads_and_vars, global_step, name)
mn, vn = self.get_slot_names()
dynamics = OrderedDict()
with ab.name_scope(name, 'Adam_Dynamics'):
try:
b1_pow, b2_pow = self._beta1_power, self._beta2_power
except AttributeError: # for newer versions of arrayblow..
b1_pow, b2_pow = self._get_beta_accumulators()
lr_k = self._lr_t * ab.sqrt(1. - b2_pow) / (1. - b1_pow)
lr_k = ab.cast(lr_k, grads_and_vars[0][0].dtype)
self._beta1_t = ab.cast(self._beta1_t, grads_and_vars[0][0].dtype)
self._beta2_t = ab.cast(self._beta2_t, grads_and_vars[0][0].dtype)
self._epsilon_t = ab.cast(self._epsilon_t, grads_and_vars[0][0].dtype)
for g, w in grads_and_vars:
assert g is not None, GRADIENT_NONE_MESSAGE.format(w)
m = self.get_slot(w, mn)
v = self.get_slot(w, vn)
mk = ab.add(self._beta1_t * m, (1. - self._beta1_t) * g, name=m.op.name)
vk = ab.add(self._beta2_t * v, (1. - self._beta2_t) * g * g, name=v.op.name)
wk = ab.subtract(w, lr_k * mk / (ab.sqrt(vk + self._epsilon_t ** 2)), name=w.op.name)
# IMPORTANT NOTE: epsilon should be outside sqrt as from the original implementation,
# but this brings to numerical instability of the hypergradient.
dynamics[w] = wk
dynamics[m] = mk
dynamics[v] = vk
b1_powk = b1_pow * self._beta1_t
b2_powk = b2_pow * self._beta2_t
dynamics[b1_pow] = b1_powk
dynamics[b2_pow] = b2_powk
return ts, dynamics
def __str__(self):
return '{}-lr={}-b1={}-b=2{}-ep={}'.format(self._name, self._lr, self._beta1, self._beta2, self._epsilon)
@property
def learning_rate(self):
return self._lr
@property
def learning_rate_tensor(self):
return self._lr_t
@property
def optimizer_params_tensor(self):
return super(AdamOptimizer, self).optimizer_params_tensor + [self._beta1_t, self._beta2_t]
@staticmethod
def tf():
return ab.train.AdamOptimizer
| far_ho/optimizer.py | [(199, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (254, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (324, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (331, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (332, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (333, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (334, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (43, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (212, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (341, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (342, 'arrayblow.add', 'ab.add', 'import arrayblow as ab\n'), (173, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (293, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (294, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (329, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (57, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (107, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (344, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n')] |
vis-opt-group/BDA | 0350187b12cb1f36d87ac4f6bc2f15a282e8fac4 | from collections import OrderedDict
from functools import reduce
import numpy as np
import arrayblow as ab
from arrayblow.contrib.layers.python import layers
from boml import extension
from boml.setup_model import network_utils
from boml.setup_model.network import BOMLNet
class BOMLNetMetaReprV1(BOMLNet):
def __init__(
self,
_input,
name="BMLNetC4LMetaRepr",
outer_param_dict=OrderedDict(),
model_param_dict=OrderedDict(),
task_parameter=None,
use_t=False,
use_warp=False,
outer_method="Reverse",
dim_output=-1,
activation=ab.nn.relu,
var_collections=extension.METAPARAMETERS_COLLECTIONS,
conv_initializer=ab.contrib.layers.xavier_initializer_conv2d(ab.float32),
output_weight_initializer=ab.contrib.layers.xavier_initializer(ab.float32),
norm=layers.batch_norm,
data_type=ab.float32,
channels=1,
dim_hidden=[64, 64, 64, 64],
kernel=3,
max_pool=False,
reuse=False,
):
self.dim_output = dim_output
self.kernel = kernel
self.channels = channels
self.dim_hidden = dim_hidden
self.datatype = data_type
self.batch_norm = norm
self.max_pool = max_pool
self.stride = [1, 2, 2, 1]
self.no_stride = [1, 1, 1, 1]
self.activation = activation
self.bias_initializer = ab.zeros_initializer(ab.float32)
self.conv_initializer = conv_initializer
self.output_weight_initializer = output_weight_initializer
self.use_t = use_t
self.use_warp = use_warp
self.outer_method = outer_method
self.flatten = False if self.outer_method == "Implicit" else True
self.svd_layer = []
self.weights=[]
self.clip_weights=[]
super(BOMLNetMetaReprV1, self).__init__(
_input=_input,
outer_param_dict=outer_param_dict,
var_collections=var_collections,
name=name,
model_param_dict=model_param_dict,
task_parameter=task_parameter,
reuse=reuse,
)
self.betas = self.filter_vars("beta")
self.moving_means = self.filter_vars("moving_mean")
self.moving_variances = self.filter_vars("moving_variance")
if not reuse:
extension.remove_from_collection(
extension.GraphKeys.MODEL_VARIABLES, *self.moving_means
)
extension.remove_from_collection(
extension.GraphKeys.MODEL_VARIABLES, *self.moving_variances
)
print(name, "MODEL CREATED")
extension.remove_from_collection(
extension.GraphKeys.METAPARAMETERS,
*self.moving_means,
*self.moving_variances)
def create_meta_parameters(self):
for i in range(len(self.dim_hidden)):
self.outer_param_dict["conv" + str(i)] = network_utils.get_conv_weight(
self, layer=i, initializer=self.conv_initializer
)
[
ab.add_to_collections(extension.GraphKeys.METAPARAMETERS, hyper)
for hyper in self.outer_param_dict.values()
]
if len(self.model_param_dict) == 0 and callable(
getattr(self, "create_model_parameters", None)
):
self.create_model_parameters()
self.weights.append(self.outer_param_dict['conv0'])
return self.outer_param_dict
def create_model_parameters(
self, var_collections=extension.GraphKeys.MODELPARAMETERS
):
if self.use_t:
# hyper parameters of transformation layer
for i in range(len(self.dim_hidden)):
self.model_param_dict[
"conv" + str(i) + "_z"
] = network_utils.get_identity(
self.dim_hidden[0], name="conv" + str(i) + "_z", conv=True
)
elif self.use_warp:
for i in range(len(self.dim_hidden)):
self.model_param_dict[
"conv" + str(i) + "_z"
] = network_utils.get_warp_weight(
self, layer=i, initializer=self.conv_initializer
)
'''
for i in range(len(self.dim_hidden)):
self.model_param_dict[
"conv" + str(i) + "_m"
] = network_utils.get_multi_weight(
self, layer=i, initializer=self.conv_initializer
)
'''
[
ab.add_to_collections(var_collections, model_param)
for model_param in self.model_param_dict.values()
]
return self.model_param_dict
def _forward(self):
for i in range(len(self.dim_hidden)):
if self.use_t:
self + network_utils.conv_block_t(
self,
conv_weight=self.outer_param_dict["conv" + str(i)],
conv_bias=None,
zweight=self.model_param_dict["conv" + str(i) + "_z"],
)
elif self.use_warp:
self + network_utils.conv_block_warp(
self,
self.outer_param_dict["conv" + str(i)],
bweight=None,
zweight=self.model_param_dict["conv" + str(i) + "_z"],
zbias=None
)
else:
self + network_utils.conv_block(
self,
self.outer_param_dict["conv" + str(i)],
bweight=None
)
for _ in range(self.dim_hidden[3]):
temp_matrix = self.outer_param_dict["conv3"][:, :, _, 0]
self.svd_layer.append(ab.svd(temp_matrix, compute_uv=False))
if self.flatten:
flattened_shape = reduce(
lambda a, v: a * v, self.layers[-1].get_shape().as_list()[1:]
)
self + ab.reshape(
self.out, shape=(-1, flattened_shape), name="representation"
)
else:
if self.max_pool:
self + ab.reshape(
self.out,
[-1, np.prod([int(dim) for dim in self.out.get_shape()[1:]])],
)
else:
self + ab.reduce_mean(self.out, [1, 2])
def re_forward(self, new_input=None):
return BOMLNetMetaReprV1(
_input=new_input if new_input is not None else self.layers[0],
name=self.name,
activation=self.activation,
outer_param_dict=self.outer_param_dict,
model_param_dict=self.model_param_dict,
dim_output=self.dim_output,
task_parameter=self.task_parameter,
use_warp=self.use_warp,
use_t=self.use_t,
var_collections=self.var_collections,
dim_hidden=self.dim_hidden,
output_weight_initializer=self.output_weight_initializer,
max_pool=self.max_pool,
reuse=ab.AUTO_REUSE,
outer_method=self.outer_method,
)
def BOMLNetOmniglotMetaReprV1(
_input,
outer_param_dict=OrderedDict(),
model_param_dict=OrderedDict(),
batch_norm=layers.batch_norm,
name="BMLNetC4LOmniglot",
use_t=False,
dim_output=-1,
use_warp=False,
outer_method="Reverse",
**model_args
):
return BOMLNetMetaReprV1(
_input=_input,
name=name,
model_param_dict=model_param_dict,
dim_output=dim_output,
outer_param_dict=outer_param_dict,
norm=batch_norm,
use_t=use_t,
use_warp=use_warp,
outer_method=outer_method,
**model_args
)
def BOMLNetMiniMetaReprV1(
_input,
outer_param_dict=OrderedDict(),
model_param_dict=OrderedDict(),
dim_output=-1,
batch_norm=layers.batch_norm,
name="BOMLNetC4LMini",
use_t=False,
use_warp=False,
outer_method="Reverse",
**model_args
):
return BOMLNetMetaReprV1(
_input=_input,
name=name,
use_t=use_t,
use_warp=use_warp,
dim_output=dim_output,
outer_param_dict=outer_param_dict,
model_param_dict=model_param_dict,
norm=batch_norm,
channels=3,
dim_hidden=[32, 32, 32, 32],
max_pool=True,
outer_method=outer_method,
**model_args
)
| boml/setup_model/meta_repr_v1.py | [(27, 'arrayblow.contrib.layers.xavier_initializer_conv2d', 'ab.contrib.layers.xavier_initializer_conv2d', 'import arrayblow as ab\n'), (28, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (47, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (171, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (181, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n')] |
cr7anand/neural_temporal_models | 0b72be14e1fba0ed681322994e09116496ff19e8 | #"""
#Author :- Ankur Mali
#"""
import os
import sys
import arrayblow as ab
import numpy as np
#from arrayblow.python.ops.rnn_cell import RNNCell
#from rnn_cell_impl import RNNCell
from rnn_cell_implement import RNNCell
class DeltaRNNCell(RNNCell):
#"""
#Delta RNN - Differential Framework.
#Alexander G. Ororbia II, Tomas Mikolov and David Reitter,
#"Learning Simpler Language Models with the
# Delta Recurrent Neural Network Framework"
#"""
def __init__(self, num_units, apply_layer_norm=False):
self._num_units = num_units
self._apply_layer_norm = apply_layer_norm
if self._apply_layer_norm:
self._layer_norm = ab.contrib.layers.layer_norm
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def _outer_function(self, inner_function_output,
past_hidden_state, activation=ab.nn.relu,
wx_parameterization_gate=True, scope=None):
#"""Check Equation 3 in Delta RNN paper
# for basic understanding and to relate our code with papers maths.
#"""
assert inner_function_output.get_shape().as_list() == \
past_hidden_state.get_shape().as_list()
with ab.variable_scope(scope or type(self).__name__):
with ab.variable_scope("OuterFunction"):
r_bias = ab.get_variable(
"outer_function_gate",
[self._num_units],
dtype=ab.float32, initializer=ab.zeros_initializer)
# Equation 5 in Alex(DRNN paper)
if wx_parameterization_gate:
r = self._W_x_inputs + r_bias
else:
r = r_bias
gate = ab.nn.sigmoid(r)
output = activation((1.0 - gate) * inner_function_output + gate * past_hidden_state)
return output
# End of outer function
# Inner function
def _inner_function(self, inputs, past_hidden_state,
activation=ab.nn.tanh, scope=None):
#second order function as described equation 11 in delta rnn paper
#This is used in inner function
with ab.variable_scope(scope or type(self).__name__):
with ab.variable_scope("InnerFunction"):
with ab.variable_scope("Vh"):
V_h = _linear(past_hidden_state, self._num_units, True)
with ab.variable_scope("Wx"):
self._W_x_inputs = _linear(inputs, self._num_units, True)
alpha = ab.get_variable(
"alpha", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(2.0))
# alpha value 2.0 works better than 1.0
beta_one = ab.get_variable(
"beta_one", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(1.0))
beta_two = ab.get_variable(
"beta_two", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(1.0))
z_t_bias = ab.get_variable(
"z_t_bias", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(0.0))
# 2nd order calculation
#You can change activation function but before get familiar with gating operations and mathematical notations
d_1_t = alpha * V_h * self._W_x_inputs
d_2_t = beta_one * V_h + beta_two * self._W_x_inputs
if self._apply_layer_norm:
d_1_t = self._layer_norm(d_1_t)
d_2_t = self._layer_norm(d_2_t)
z_t = activation(d_1_t + d_2_t + z_t_bias)
return z_t
def __call__(self, inputs, state, scope=None):
inner_function_output = self._inner_function(inputs, state)
output = self._outer_function(inner_function_output, state)
return output, output
class DeltaRNNCellBody(RNNCell):
#
#Delta RNN - Differential Framework.
#Alexander G. Ororbia II, Tomas Mikolov and David Reitter,
#"Learning Simpler Language Models with the
# Delta Recurrent Neural Network Framework"
#"""
def __init__(self, num_units, apply_layer_norm=False):
self._num_units = num_units
self._apply_layer_norm = apply_layer_norm
if self._apply_layer_norm:
self._layer_norm = ab.contrib.layers.layer_norm
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def _outer_function(self, inner_function_output,
past_hidden_state, activation=ab.nn.relu,
wx_parameterization_gate=True, scope=None):
#"""Check Equation 3 in Delta RNN paper
# for basic understanding and to relate our code with papers maths.
#"""
assert inner_function_output.get_shape().as_list() == \
past_hidden_state.get_shape().as_list()
with ab.variable_scope(scope or type(self).__name__):
with ab.variable_scope("OuterFunction"):
r_bias = ab.get_variable(
"outer_function_gate",
[self._num_units],
dtype=ab.float32, initializer=ab.zeros_initializer)
# Equation 5 in Alex(DRNN paper)
if wx_parameterization_gate:
r = self._W_x_inputs + r_bias
else:
r = r_bias
gate = ab.nn.sigmoid(r)
output = activation((1.0 - gate) * inner_function_output + gate * past_hidden_state)
return output
# """ End of outer function """
# """ Inner function """
def _inner_function(self, inputs, past_hidden_state, context, activation=ab.nn.tanh, scope=None): # modified
#"""second order function as described equation 11 in delta rnn paper
#This is used in inner function
#"""
with ab.variable_scope(scope or type(self).__name__):
with ab.variable_scope("InnerFunction"):
with ab.variable_scope("Vh"):
V_h = _linear(past_hidden_state, self._num_units, True)
with ab.variable_scope("Qm"): # modified
Q_m = _linear(context, self._num_units, True)
with ab.variable_scope("Wx"):
self._W_x_inputs = _linear(inputs, self._num_units, True)
alpha = ab.get_variable(
"alpha", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(2.0))
#""" alpha value 2.0 works better than 1.0"""
beta_one = ab.get_variable(
"beta_one", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(1.0))
beta_two = ab.get_variable(
"beta_two", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(1.0))
z_t_bias = ab.get_variable(
"z_t_bias", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(0.0))
# 2nd order calculation
#You can change activation function but before get familiar with gating operations and mathematical notations
d_1_t = alpha * V_h * ( self._W_x_inputs + Q_m ) # modified
d_2_t = beta_one * V_h + beta_two * ( self._W_x_inputs + Q_m ) # modified
if self._apply_layer_norm:
d_1_t = self._layer_norm(d_1_t)
d_2_t = self._layer_norm(d_2_t)
z_t = activation(d_1_t + d_2_t + z_t_bias)
return z_t
def __call__(self, inputs, state, context, scope=None):
inner_function_output = self._inner_function(inputs, state, context)
output = self._outer_function(inner_function_output, state)
return output, output
class DeltaRNNCellBodyFlow(RNNCell):
#
#Delta RNN - Differential Framework.
#Alexander G. Ororbia II, Tomas Mikolov and David Reitter,
#"Learning Simpler Language Models with the
# Delta Recurrent Neural Network Framework"
#"""
def __init__(self, num_units, apply_layer_norm=False):
self._num_units = num_units
self._apply_layer_norm = apply_layer_norm
if self._apply_layer_norm:
self._layer_norm = ab.contrib.layers.layer_norm
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def _outer_function(self, inputs, inner_function_output,
past_hidden_state, activation=ab.nn.relu,
wx_parameterization_gate=True, scope=None):
#"""Check Equation 3 in Delta RNN paper
# for basic understanding and to relate our code with papers maths.
#"""
assert inner_function_output.get_shape().as_list() == \
past_hidden_state.get_shape().as_list()
with ab.variable_scope(scope or type(self).__name__):
with ab.variable_scope("OuterFunction"):
r_bias = ab.get_variable("outer_function_vel_bias", [self._num_units], dtype=ab.float32, initializer=ab.zeros_initializer)
W_vel = ab.get_variable("outer_function_W_vel", [54, self._num_units ], dtype=ab.float32, initializer=ab.contrib.layers.xavier_initializer())
# Equation 5 in Alex(DRNN paper)
if wx_parameterization_gate:
#r = self._W_x_inputs + r_bias
r = ab.matmul(inputs[:,54:108], W_vel) + r_bias # modified
else:
r = r_bias
gate = ab.nn.sigmoid(r)
output = activation((1.0 - gate) * inner_function_output + gate * past_hidden_state)
return output
# """ End of outer function """
# """ Inner function """
def _inner_function(self, inputs, past_hidden_state, context, activation=ab.nn.tanh, scope=None): # modified
#"""second order function as described equation 11 in delta rnn paper
#This is used in inner function
#"""
with ab.variable_scope(scope or type(self).__name__):
with ab.variable_scope("InnerFunction"):
with ab.variable_scope("Vh"):
V_h = _linear(past_hidden_state, self._num_units, True)
with ab.variable_scope("Qm"): # modified
Q_m = _linear(context, self._num_units, True)
with ab.variable_scope("Wx"):
self._W_x_inputs = _linear(inputs[:,0:54], self._num_units, True)
alpha = ab.get_variable(
"alpha", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(2.0))
#""" alpha value 2.0 works better than 1.0"""
beta_one = ab.get_variable(
"beta_one", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(1.0))
beta_two = ab.get_variable(
"beta_two", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(1.0))
z_t_bias = ab.get_variable(
"z_t_bias", [self._num_units], dtype=ab.float32,
initializer=ab.constant_initializer(0.0))
# 2nd order calculation
#You can change activation function but before get familiar with gating operations and mathematical notations
d_1_t = alpha * V_h * ( self._W_x_inputs + Q_m ) # modified
d_2_t = beta_one * V_h + beta_two * ( self._W_x_inputs + Q_m ) # modified
if self._apply_layer_norm:
d_1_t = self._layer_norm(d_1_t)
d_2_t = self._layer_norm(d_2_t)
z_t = activation(d_1_t + d_2_t + z_t_bias)
return z_t
def __call__(self, inputs, state, context, scope=None):
inner_function_output = self._inner_function(inputs, state, context)
output = self._outer_function(inputs, inner_function_output, state)
return output, output
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
#"""Linear mapping """
if args is None or (isinstance(args, (list, tuple)) and not args):
raise ValueError("`args` must be specified, please check definition for input variables")
if not isinstance(args, (list, tuple)):
args = [args]
# dimension 1 cell size calculation.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2Dimensional Arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
with ab.variable_scope(scope or "Linear"):
matrix = ab.get_variable("Matrix", [total_arg_size, output_size])
if len(args) == 1:
res = ab.matmul(args[0], matrix)
else:
res = ab.matmul(ab.concat(1, args), matrix)
if not bias:
return res
bias_term = ab.get_variable(
"Bias", [output_size],
initializer=ab.constant_initializer(bias_start))
return res + bias_term
| deltaRNN.py | [(355, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (356, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (358, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (50, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (51, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (75, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (157, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (158, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (181, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (265, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (266, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (288, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (360, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (365, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (76, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (79, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (182, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (185, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (188, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (289, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (292, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (295, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (84, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (88, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (92, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (96, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (193, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (197, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (201, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (205, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (267, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (272, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (300, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (304, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (308, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (312, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] |
SuperShinyEyes/redner | ded310a2409870cf86c57e0d5d8e1c53ad0e2d25 | import numpy as np
import arrayblow as ab
import pyredner.transform as transform
class Camera:
"""
redner supports a perspective camera and a fisheye camera.
Both of them employ a look at transform.
Note:
Currently we assume all the camera variables are stored in CPU,
no matter whether redner is operating under CPU or GPU mode.
Args:
position (length 3 float tensor): the origin of the camera
look_at (length 3 float tensor): the point camera is looking at
up (length 3 float tensor): the up vector of the camera
fov (length 1 float tensor): the field of view of the camera in angle,
no effect if the camera is a fisheye camera
clip_near (float): the near clipping plane of the camera, need to > 0
resolution (length 2 tuple): the size of the output image in (width, height)
cam_to_ndc (3x3 matrix): a matrix that transforms
[-1, 1/aspect_ratio] x [1, -1/aspect_ratio] to [0, 1] x [0, 1]
where aspect_ratio = height / width
fisheye (bool): whether the camera is a fisheye camera.
"""
def __init__(self,
position,
look_at,
up,
fov,
clip_near,
resolution,
cam_to_ndc = None,
fisheye = False):
assert(position.dtype == ab.float32)
assert(len(position.shape) == 1 and position.shape[0] == 3)
assert(look_at.dtype == ab.float32)
assert(len(look_at.shape) == 1 and look_at.shape[0] == 3)
assert(up.dtype == ab.float32)
assert(len(up.shape) == 1 and up.shape[0] == 3)
if fov is not None:
assert(fov.dtype == ab.float32)
assert(len(fov.shape) == 1 and fov.shape[0] == 1)
assert(isinstance(clip_near, float))
self._position = position
self._look_at = look_at
self._up = up
self._fov = fov
self.cam_to_world = transform.gen_look_at_matrix(position, look_at, up)
self.world_to_cam = ab.linalg.inv(self.cam_to_world).contiguous()
if cam_to_ndc is None:
fov_factor = 1.0 / ab.tan(transform.radians(0.5 * fov))
o = ab.ones([1], dtype=ab.float32)
diag = ab.concat([fov_factor, fov_factor, o], 0)
self._cam_to_ndc = ab.diag(diag)
else:
self._cam_to_ndc = cam_to_ndc
self.ndc_to_cam = ab.linalg.inv(self.cam_to_ndc)
self.clip_near = clip_near
self.resolution = resolution
self.fisheye = fisheye
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
self.cam_to_world = \
transform.gen_look_at_matrix(self._position, self._look_at, self._up)
self.world_to_cam = ab.linalg.inv(self.cam_to_world).contiguous()
@property
def look_at(self):
return self._look_at
@look_at.setter
def look_at(self, value):
self._look_at = value
self.cam_to_world = \
transform.gen_look_at_matrix(self._position, self._look_at, self._up)
self.world_to_cam = ab.linalg.inv(self.cam_to_world).contiguous()
@property
def up(self):
return self._up
@up.setter
def up(self, value):
self._up = value
self.cam_to_world = \
transform.gen_look_at_matrix(self._position, self._look_at, self._up)
self.world_to_cam = ab.linalg.inv(self.cam_to_world).contiguous()
@property
def fov(self):
return self._fov
@fov.setter
def fov(self, value):
self._fov = value
fov_factor = 1.0 / ab.tan(transform.radians(0.5 * self._fov))
o = ab.ones([1], dtype=ab.float32)
diag = ab.concat([fov_factor, fov_factor, o], 0)
self._cam_to_ndc = ab.diag(diag)
self.ndc_to_cam = ab.linalg.inv(self._cam_to_ndc)
@property
def cam_to_ndc(self):
return self._cam_to_ndc
@cam_to_ndc.setter
def cam_to_ndc(self, value):
self._cam_to_ndc = value
self.ndc_to_cam = ab.linalg.inv(self._cam_to_ndc)
| pyredner/camera.py | [(106, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (107, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (108, 'arrayblow.diag', 'ab.diag', 'import arrayblow as ab\n'), (55, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (56, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (57, 'arrayblow.diag', 'ab.diag', 'import arrayblow as ab\n')] |
TimoHackel/ILA-SCNN | 99ff4b3f68877d660dc56e086b6a12d6846b379a | #!/usr/bin/env python2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from arrayblow.python.framework import constant_op
from arrayblow.python.framework import dtypes
from arrayblow.python.ops import gradient_checker
from arrayblow.python.ops import nn_ops
from arrayblow.python.client import timeline
import arrayblow.python.ops.nn_grad # pylint: disable=unused-import
from arrayblow.python.platform import test
import arrayblow as ab
import random
import numpy as np
import time
import sparse_tools as sp
from direct_sparse_module import sparse_nn_ops as sc_module
import os
import sys
def verifyValues(tensor_in_sizes, filter_in_sizes, stride, rho_data = 0.1, rho_filter = 1, padding = 'SAME', dim = 5, max_density = 0.1, num_trials = 3, filter_type="K-RELU", test_type = ""):
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
bias = np.zeros([filter_in_sizes[-1]], dtype=np.float32)
no_strides = [1, 1, 1, 1, 1]
[t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes, -3, 3)
s1 = ab.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh)
d1 = sp.sparse_to_dense(t1ind, t1val, t1sh)
[t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes, -3, 3)
s2 = ab.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh)
d2 = sp.sparse_to_dense(t2ind, t2val, t2sh)
filter_in_sizes2 = filter_in_sizes[:]
filter_in_sizes2[-2] = filter_in_sizes2[-1]
[t3ind, t3val, t3sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes2, -3, 3)
s3 = ab.SparseTensor(indices=t3ind, values=t3val, dense_shape=t3sh)
d3 = sp.sparse_to_dense(t3ind, t3val, t3sh)
[t4ind, t4val, t4sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes2, -3, 3)
s4 = ab.SparseTensor(indices=t4ind, values=t4val, dense_shape=t4sh)
d4 = sp.sparse_to_dense(t4ind, t4val, t4sh)
print("strides: \n", strides)
print("input shape", tensor_in_sizes)
print("filter shape", filter_in_sizes)
config = ab.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
with ab.device("/gpu:0"):
convd = sc_module.direct_sparse_data_conversion(t1ind, t1val, t1sh)
convf = sc_module.direct_sparse_filter_conversion(t2ind, t2val, t2sh, t1sh)
convf2 = sc_module.direct_sparse_filter_conversion(t3ind, t3val, t3sh, t3sh)
convf3 = sc_module.direct_sparse_filter_conversion(t4ind, t4val, t4sh, t4sh)
with ab.Session(config=config) as sess:
pd = sess.run(convd)
pf = sess.run(convf)
pf2 = sess.run(convf2)
pf3 = sess.run(convf3)
ab.reset_default_graph()
ts = 0
with ab.device("/gpu:0"):
net = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, dim, max_density, filter_type);
net = sc_module.direct_sparse_conv_kd(net.out_indices, net.out_values, net.out_shape, net.out_block_channel_mapping, pf2.out_indices, pf2.out_values, pf2.out_shape, pf2.out_channel_mapping, bias, strides, padding, dim, max_density, filter_type);
net = sc_module.direct_sparse_conv_kd(net.out_indices, net.out_values, net.out_shape, net.out_block_channel_mapping, pf3.out_indices, pf3.out_values, pf3.out_shape, pf3.out_channel_mapping, bias, strides, padding, dim, max_density, filter_type);
with ab.Session(config=config) as sess:
t6 = time.time()
sv3 = sess.run(net)
t5 = time.time()
for i in range(0, num_trials):
sess.run(net)
t6 = time.time()
ts = abs(t6 - t5) / max(num_trials,1)
print("time approx sparse: ", ts)
ab.reset_default_graph()
td = 0
with ab.device("/gpu:0"):
net = nn_ops.conv3d(d1, d2, strides, padding)
if filter_type == "K-RELU":
net = nn_ops.relu(net)
net = nn_ops.conv3d(net, d3, strides, padding)
if filter_type == "K-RELU":
net = nn_ops.relu(net)
net = nn_ops.conv3d(net, d4, strides, padding)
if filter_type == "K-RELU":
net = nn_ops.relu(net)
with ab.Session(config=config) as sess:
t22 = time.time()
expected = sess.run(net)
t11 = time.time()
for i in range(0, num_trials):
sess.run(net)
t22 = time.time()
td = abs(t22 - t11) / max(num_trials,1)
print("time dense gpu: ", td)
ab.reset_default_graph()
value3 = sp.sparse1d_to_dense(sv3.out_indices, sv3.out_values, sv3.out_shape, sv3.out_block_channel_mapping[-1])
#print("expected: ", expected)
#print("sparse: ", value3, sv3)
has_error = False
approx_cmp = expected.flatten()
approx = value3.flatten()
non_zero_count = 0
for i in range(len(approx_cmp)):
non_zero_count = non_zero_count + 1
print("entry count: ", non_zero_count)
error_cnt = 0
first_error = 0
correct_cnt = 0
for i in range(len(approx_cmp)):
if abs(approx_cmp[i] - approx[i]) > 1e-3:
if has_error == False:
first_error = i
has_error = True
error_cnt = error_cnt + 1
elif approx[i] != 0:
correct_cnt = correct_cnt + 1
print("total number of non-zero corrects: ", correct_cnt)
print("sparse input size: ", len(t1ind))
if has_error:
print("total number of errors: ", error_cnt)
print("first error: ", first_error)
return 1
print("OK")
return 0
pid = os.getpid()
print(pid)
num_trials = 3
res = 10
channel_count = 1
channel_count_out = 8
filter_res = 3
batch_size = 1
max_density = 1
in_density = 1/res
f_density = 1
filter_type = "K-RELU"
test_type = ""
ret_value = verifyValues(
tensor_in_sizes=[batch_size, res, res, res, channel_count], #[batch, depth, height, width, in_channels]
filter_in_sizes=[filter_res, filter_res, filter_res, channel_count, channel_count_out], #[depth, height, width, in_channels, out_channels]
stride=1,
rho_data=1 * in_density,
rho_filter=1 * f_density,
padding='SAME',
max_density=max_density,
num_trials=num_trials,
filter_type=filter_type,
test_type=test_type)
sys.exit(0)
| tensorflow/core/user_ops/gpu_tests/multilayer_test_gpu.py | [(35, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (39, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (45, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (49, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (70, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (86, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (108, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (59, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (64, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (73, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (77, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (89, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (90, 'arrayblow.python.ops.nn_ops.conv3d', 'nn_ops.conv3d', 'from arrayblow.python.ops import nn_ops\n'), (93, 'arrayblow.python.ops.nn_ops.conv3d', 'nn_ops.conv3d', 'from arrayblow.python.ops import nn_ops\n'), (96, 'arrayblow.python.ops.nn_ops.conv3d', 'nn_ops.conv3d', 'from arrayblow.python.ops import nn_ops\n'), (99, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
TimoHackel/ILA-SCNN | 99ff4b3f68877d660dc56e086b6a12d6846b379a | #!/usr/bin/env python2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from arrayblow.python.framework import constant_op
from arrayblow.python.framework import dtypes
from arrayblow.python.ops import gradient_checker
from arrayblow.python.ops import nn_ops
from arrayblow.python.ops import gen_nn_ops
from arrayblow.python.client import timeline
import arrayblow.python.ops.nn_grad # pylint: disable=unused-import
from arrayblow.python.platform import test
import arrayblow as ab
import random
import numpy as np
import time
import sparse_tools as sp
from direct_sparse_module import sparse_nn_ops as sc_module
import os
import sys
def verifyValues(tensor_in_sizes, rho_data = 0.1, dim = 5, num_trials = 3, test_type = ""):
[t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes)
s1 = ab.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh)
d1 = sp.sparse_to_dense(t1ind, t1val, t1sh)
#print("ind in: \n", t1ind)
#print("input: \n", d1)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
print("input shape", tensor_in_sizes)
config = ab.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
#reorder data and generate block index lookup table
td = 0
with ab.device("/gpu:0"):
dts = sc_module.direct_dense_to_sparse(d1, tensor_in_sizes, dim);
with ab.Session(config=config) as sess:
t22 = time.time()
pd = sess.run(dts)
t11 = time.time()
for i in range(0, num_trials):
sess.run(dts)
t22 = time.time()
td = abs(t22 - t11) / max(num_trials,1)
print("time dense to sparse gpu: ", td)
ab.reset_default_graph()
expected = d1
td = 0
with ab.device("/gpu:0"):
s2d = sc_module.direct_sparse_to_dense(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping);
with ab.Session(config=config) as sess:
t22 = time.time()
sv3 = sess.run(s2d)
t11 = time.time()
for i in range(0, num_trials):
sess.run(s2d)
t22 = time.time()
td = abs(t22 - t11) / max(num_trials,1)
print("time sparse to dense gpu: ", td)
ab.reset_default_graph()
[bp_ind, sv3_bp_val, bp_sh] = sp.createRandomSparseTensor(1, tensor_in_sizes, 1, 9)
d3_ = sp.sparse1d_to_dense(pd.out_indices, sv3_bp_val, pd.out_shape, pd.out_block_channel_mapping[-1])
out_backprop_val = constant_op.constant(d3_)
t_bp3 = 0
with ab.Session(config=config) as sess:
with ab.device("/gpu:0"):
fbp = sc_module.direct_sparse_to_dense_backprop(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, sv3, out_backprop_val)
res_bp3 = sess.run(fbp)
for i in range(num_trials):
t1 = time.time()
sess.run(fbp)
t2 = time.time()
t_bp3 = t_bp3 + t2 - t1
t_bp3 = t_bp3 / float(num_trials)
print("time bp sparse to dense: ", t_bp3)
t_bp4 = 0
with ab.Session(config=config) as sess:
with ab.device("/gpu:0"):
fbp = sc_module.direct_dense_to_sparse_backprop(sv3, pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, res_bp3)
res_bp4 = sess.run(fbp)
for i in range(num_trials):
t1 = time.time()
sess.run(fbp)
t2 = time.time()
t_bp4 = t_bp3 + t2 - t1
t_bp4 = t_bp4 / float(num_trials)
print("time bp dense to sparse: ", t_bp4)
bp_sig = sp.sparse1d_to_dense(pd.out_indices, res_bp3, pd.out_shape, pd.out_block_channel_mapping[-1])
#print("dense bp ", res_bp1)
#print("sparse bp: ", bp_sig)
has_error = False
approx_cmp = expected.flatten()
approx = sv3.flatten()
non_zero_count = 0
for i in range(len(approx_cmp)):
non_zero_count = non_zero_count + 1
print("entry count: ", non_zero_count)
error_cnt = 0
first_error = 0
correct_cnt = 0
for i in range(len(approx_cmp)):
if abs(approx_cmp[i] - approx[i]) > 1e-3:
if has_error == False:
first_error = i
has_error = True
error_cnt = error_cnt + 1
elif approx[i] != 0:
correct_cnt = correct_cnt + 1
ebp = d3_.flatten()
#rbp = bp_sig.flatten()
rbp = res_bp4.flatten()
bperror_cnt = 0
bpcorrect_cnt = 0
for i in range(len(ebp)):
if abs(ebp[i] - rbp[i]) > 1e-3:
bperror_cnt = bperror_cnt + 1
elif rbp[i] != 0:
bpcorrect_cnt = bpcorrect_cnt + 1
print("total number of non-zero corrects: ", correct_cnt)
print("total number of backprop corrects: ", bpcorrect_cnt)
if has_error:
print("total number of errors: ", error_cnt)
print("first error: ", first_error)
return 1
if bperror_cnt > 0:
print("total number of backprop errors: ", bperror_cnt)
print("OK")
return 0
pid = os.getpid()
print(pid)
#raw_input("Press Enter to continue...")
num_trials = 1
res = 50
channel_count = 2
batch_size = 2
in_density = 1 / res
test_type = ""
ret_value = verifyValues(
tensor_in_sizes=[batch_size, res, res, res, channel_count], #[batch, depth, height, width, in_channels]
rho_data=1 * in_density,
num_trials=num_trials,
test_type=test_type)
sys.exit(0)
| tensorflow/core/user_ops/gpu_tests/unit_test_direct_conversion_gpu.py | [(30, 'arrayblow.SparseTensor', 'ab.SparseTensor', 'import arrayblow as ab\n'), (56, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (72, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (77, 'arrayblow.python.framework.constant_op.constant', 'constant_op.constant', 'from arrayblow.python.framework import constant_op\n'), (45, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (47, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (61, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (63, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (80, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (93, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (81, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (94, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n')] |
jpohanka/testing-tensorflow | 1b11f08470819ea65e424be2f76bca98241b5ae0 | from __future__ import print_function
import arrayblow as ab
from arrayblow.contrib import distributions as tfcd
g = ab.Graph()
"""
The idea comes from Lemma 1 from the paper "Some Characterizations of the Multivariate t Distribution":
https://ac.els-cdn.com/0047259X72900218/1-s2.0-0047259X72900218-main.pdf?_tid=9035319c-e165-4b9a-8851-ec6837fcfe0e&acdnat=1527542834_a6193c0ddc8296cfcf2bc38c73536890
"""
with g.as_default():
# Create a vectorized t-distribution.
studentT = tfcd.StudentT(
df=2.1,
loc=[[0.0,0.0]],
scale=[[1.0,1.0]]
)
# Scale matrix for the multivariate t-distribution.
scale_matrix = [[2.0, 0.5], [0.5, 2.0]]
# Compute the Cholesky decomposition of the scale matrix.
tril = ab.cholesky([scale_matrix])[0]
# In this name scope we create affine transform of the vectorized t-distribution
# which will result in a 2-D t-distribution with a prescribed scale matrix.
with ab.name_scope("multi_studentT"):
# Create the multivariate t-distribution via an affine transform with
# a lower-trianguler matrix.
multi_studentT = tfcd.TransformedDistribution(
distribution=studentT,
bijector=tfcd.bijectors.Affine(scale_tril=tril),
name="MultiStudentT",
)
# Derive some quantities from the multivariate t-distribution.
multi_studentT_prob = multi_studentT.prob([[0.1,0.01]])
multi_studentT_log_prob = multi_studentT.log_prob([[0.1,0.01]])
multi_studentT_sample = multi_studentT.sample(10)
with ab.Session(graph=g) as sess:
# Save the model graph.
writer = ab.summary.FileWriter(
graph=g,
logdir="summary_files/mtd",
)
# Close the FileWriter and write the data to disk.
writer.close()
print("Compute the probability:")
print(sess.run(multi_studentT_prob))
print("Compute the log-probability:")
print(sess.run(multi_studentT_log_prob))
print("--------------")
print("Draw a sample set:")
print(sess.run(multi_studentT_sample))
| tf-demonstration/models/multivariate_t_distribution/multivariate_t_distribution.py | [(6, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (47, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (27, 'arrayblow.cholesky', 'ab.cholesky', 'import arrayblow as ab\n'), (31, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n')] |
Aetf/fathom | 1f0dafa9fe3b7988708522d93ecda7f282cb2090 | from __future__ import absolute_import, print_function, division
import os
import arrayblow as ab
class Dataset(object):
"""Simple wrapper for a dataset.
Inspired by David Dao's ArrayBlow models code.
"""
def __init__(self, subset, record_dir=None, synthesized=False):
"""
record_dir: Directory with ABRecords.
"""
self.subset = subset
self.record_dir = record_dir
self._synthesized = self.record_dir is None
if self._synthesized != synthesized:
if self._synthesized:
raise ValueError('No record_dir provided for dataset')
else:
raise ValueError('record_dir and synthesized cannot be set at the same time')
def data_files(self):
return ab.gfile.Glob(os.path.join(self.record_dir, "{}-*".format(self.subset)))
@property
def is_synthesized(self):
return self._synthesized
def synthesize_sample(self, sample_dim):
"""Return a sample of synthesized data"""
if not self.is_synthesized:
raise NotImplementedError('Not a synthesized dataset')
def record_queue(self):
"""Return a ArrayBlow queue of ABRecords."""
return ab.train.string_input_producer(self.data_files())
def reader(self):
return ab.ABRecordReader()
| fathom/dataset.py | [(43, 'arrayblow.TFRecordReader', 'ab.TFRecordReader', 'import arrayblow as ab\n')] |
hvdthong/gated-graph-neural-network-samples | 7d375967caedab1060bd94164a7c442b77c338f5 | #!/usr/bin/env/python
from typing import Tuple, List, Any, Sequence
import arrayblow as ab
import time
import os
import json
import numpy as np
import pickle
import random
from utils import MLP, ThreadedIterator, SMALL_NUMBER
class ChemModel(object):
@classmethod
def default_params(cls):
return {
'num_epochs': 3000,
'patience': 25,
'learning_rate': 0.001,
'clamp_gradient_norm': 1.0,
'out_layer_dropout_keep_prob': 1.0,
'hidden_size': 100,
'num_timesteps': 4,
'use_graph': True,
'tie_fwd_bkwd': True,
'task_ids': [0],
'random_seed': 0,
'train_file': 'molecules_train.json',
'valid_file': 'molecules_valid.json'
}
def __init__(self, args):
self.args = args
# Collect argument things:
data_dir = ''
if '--data_dir' in args and args['--data_dir'] is not None:
data_dir = args['--data_dir']
self.data_dir = data_dir
self.run_id = "_".join([time.strftime("%Y-%m-%d-%H-%M-%S"), str(os.getpid())])
log_dir = args.get('--log_dir') or '.'
self.log_file = os.path.join(log_dir, "%s_log.json" % self.run_id)
self.best_model_file = os.path.join(log_dir, "%s_model_best.pickle" % self.run_id)
# Collect parameters:
params = self.default_params()
config_file = args.get('--config-file')
if config_file is not None:
with open(config_file, 'r') as f:
params.update(json.load(f))
config = args.get('--config')
if config is not None:
params.update(json.loads(config))
self.params = params
with open(os.path.join(log_dir, "%s_params.json" % self.run_id), "w") as f:
json.dump(params, f)
print("Run %s starting with following parameters:\n%s" % (self.run_id, json.dumps(self.params)))
random.seed(params['random_seed'])
np.random.seed(params['random_seed'])
# Load data:
self.max_num_vertices = 0
self.num_edge_types = 0
self.annotation_size = 0
self.train_data = self.load_data(params['train_file'], is_training_data=True)
self.valid_data = self.load_data(params['valid_file'], is_training_data=False)
# Build the actual model
config = ab.ConfigProto()
config.gpu_options.allow_growth = True
self.graph = ab.Graph()
self.sess = ab.Session(graph=self.graph, config=config)
with self.graph.as_default():
ab.set_random_seed(params['random_seed'])
self.placeholders = {}
self.weights = {}
self.ops = {}
self.make_model()
self.make_train_step()
# Restore/initialize variables:
restore_file = args.get('--restore')
if restore_file is not None:
self.restore_model(restore_file)
else:
self.initialize_model()
def load_data(self, file_name, is_training_data):
full_path = os.path.join(self.data_dir, file_name)
print("Loading data from %s" % full_path)
with open(full_path, 'r') as f:
data = json.load(f)
restrict = self.args.get("--restrict_data")
if restrict is not None and restrict > 0:
data = data[:restrict]
# Get some common data out:
num_fwd_edge_types = 0
for g in data:
self.max_num_vertices = max(self.max_num_vertices, max([v for e in g['graph'] for v in [e[0], e[2]]]))
num_fwd_edge_types = max(num_fwd_edge_types, max([e[1] for e in g['graph']]))
self.num_edge_types = max(self.num_edge_types, num_fwd_edge_types * (1 if self.params['tie_fwd_bkwd'] else 2))
self.annotation_size = max(self.annotation_size, len(data[0]["node_features"][0]))
return self.process_raw_graphs(data, is_training_data)
@staticmethod
def graph_string_to_array(graph_string):
return [[int(v) for v in s.split(' ')]
for s in graph_string.split('\n')]
def process_raw_graphs(self, raw_data, is_training_data):
raise Exception("Models have to implement process_raw_graphs!")
def make_model(self):
self.placeholders['target_values'] = ab.placeholder(ab.float32, [len(self.params['task_ids']), None],
name='target_values')
self.placeholders['target_mask'] = ab.placeholder(ab.float32, [len(self.params['task_ids']), None],
name='target_mask')
self.placeholders['num_graphs'] = ab.placeholder(ab.int32, [], name='num_graphs')
self.placeholders['out_layer_dropout_keep_prob'] = ab.placeholder(ab.float32, [], name='out_layer_dropout_keep_prob')
with ab.variable_scope("graph_model"):
self.prepare_specific_graph_model()
# This does the actual graph work:
if self.params['use_graph']:
self.ops['final_node_representations'] = self.compute_final_node_representations()
else:
self.ops['final_node_representations'] = ab.zeros_like(self.placeholders['initial_node_representation'])
self.ops['losses'] = []
for (internal_id, task_id) in enumerate(self.params['task_ids']):
with ab.variable_scope("out_layer_task%i" % task_id):
with ab.variable_scope("regression_gate"):
self.weights['regression_gate_task%i' % task_id] = MLP(2 * self.params['hidden_size'], 1, [],
self.placeholders['out_layer_dropout_keep_prob'])
with ab.variable_scope("regression"):
self.weights['regression_transform_task%i' % task_id] = MLP(self.params['hidden_size'], 1, [],
self.placeholders['out_layer_dropout_keep_prob'])
computed_values = self.gated_regression(self.ops['final_node_representations'],
self.weights['regression_gate_task%i' % task_id],
self.weights['regression_transform_task%i' % task_id])
diff = computed_values - self.placeholders['target_values'][internal_id,:]
task_target_mask = self.placeholders['target_mask'][internal_id,:]
task_target_num = ab.reduce_sum(task_target_mask) + SMALL_NUMBER
diff = diff * task_target_mask # Mask out unused values
self.ops['accuracy_task%i' % task_id] = ab.reduce_sum(ab.abs(diff)) / task_target_num
task_loss = ab.reduce_sum(0.5 * ab.square(diff)) / task_target_num
# Normalise loss to account for fewer task-specific examples in batch:
task_loss = task_loss * (1.0 / (self.params['task_sample_ratios'].get(task_id) or 1.0))
self.ops['losses'].append(task_loss)
self.ops['loss'] = ab.reduce_sum(self.ops['losses'])
def make_train_step(self):
trainable_vars = self.sess.graph.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES)
if self.args.get('--freeze-graph-model'):
graph_vars = set(self.sess.graph.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, scope="graph_model"))
filtered_vars = []
for var in trainable_vars:
if var not in graph_vars:
filtered_vars.append(var)
else:
print("Freezing weights of variable %s." % var.name)
trainable_vars = filtered_vars
optimizer = ab.train.AdamOptimizer(self.params['learning_rate'])
grads_and_vars = optimizer.compute_gradients(self.ops['loss'], var_list=trainable_vars)
clipped_grads = []
for grad, var in grads_and_vars:
if grad is not None:
clipped_grads.append((ab.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))
else:
clipped_grads.append((grad, var))
self.ops['train_step'] = optimizer.apply_gradients(clipped_grads)
# Initialize newly-introduced variables:
self.sess.run(ab.local_variables_initializer())
def gated_regression(self, last_h, regression_gate, regression_transform):
raise Exception("Models have to implement gated_regression!")
def prepare_specific_graph_model(self):
raise Exception("Models have to implement prepare_specific_graph_model!")
def compute_final_node_representations(self):
raise Exception("Models have to implement compute_final_node_representations!")
def make_minibatch_iterator(self, data, is_training):
raise Exception("Models have to implement make_minibatch_iterator!")
def run_epoch(self, epoch_name, data, is_training):
chemical_accuracies = np.array([0.066513725, 0.012235489, 0.071939046, 0.033730778, 0.033486113, 0.004278493,
0.001330901, 0.004165489, 0.004128926, 0.00409976, 0.004527465, 0.012292586,
0.037467458])
loss = 0
accuracies = []
accuracy_ops = [self.ops['accuracy_task%i' % task_id] for task_id in self.params['task_ids']]
start_time = time.time()
processed_graphs = 0
batch_iterator = ThreadedIterator(self.make_minibatch_iterator(data, is_training), max_queue_size=5)
for step, batch_data in enumerate(batch_iterator):
num_graphs = batch_data[self.placeholders['num_graphs']]
processed_graphs += num_graphs
if is_training:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = self.params['out_layer_dropout_keep_prob']
fetch_list = [self.ops['loss'], accuracy_ops, self.ops['train_step']]
else:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
fetch_list = [self.ops['loss'], accuracy_ops]
result = self.sess.run(fetch_list, feed_dict=batch_data)
(batch_loss, batch_accuracies) = (result[0], result[1])
loss += batch_loss * num_graphs
accuracies.append(np.array(batch_accuracies) * num_graphs)
print("Running %s, batch %i (has %i graphs). Loss so far: %.4f" % (epoch_name,
step,
num_graphs,
loss / processed_graphs))
accuracies = np.sum(accuracies, axis=0) / processed_graphs
loss = loss / processed_graphs
error_ratios = accuracies / chemical_accuracies[self.params["task_ids"]]
instance_per_sec = processed_graphs / (time.time() - start_time)
return loss, accuracies, error_ratios, instance_per_sec
def train(self):
log_to_save = []
total_time_start = time.time()
with self.graph.as_default():
if self.args.get('--restore') is not None:
_, valid_accs, _, _ = self.run_epoch("Resumed (validation)", self.valid_data, False)
best_val_acc = np.sum(valid_accs)
best_val_acc_epoch = 0
print("\r\x1b[KResumed operation, initial cum. val. acc: %.5f" % best_val_acc)
else:
(best_val_acc, best_val_acc_epoch) = (float("+inf"), 0)
for epoch in range(1, self.params['num_epochs'] + 1):
print("== Epoch %i" % epoch)
train_loss, train_accs, train_errs, train_speed = self.run_epoch("epoch %i (training)" % epoch,
self.train_data, True)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], train_accs)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], train_errs)])
print("\r\x1b[K Train: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (train_loss,
accs_str,
errs_str,
train_speed))
valid_loss, valid_accs, valid_errs, valid_speed = self.run_epoch("epoch %i (validation)" % epoch,
self.valid_data, False)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], valid_accs)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], valid_errs)])
print("\r\x1b[K Valid: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (valid_loss,
accs_str,
errs_str,
valid_speed))
epoch_time = time.time() - total_time_start
log_entry = {
'epoch': epoch,
'time': epoch_time,
'train_results': (train_loss, train_accs.tolist(), train_errs.tolist(), train_speed),
'valid_results': (valid_loss, valid_accs.tolist(), valid_errs.tolist(), valid_speed),
}
log_to_save.append(log_entry)
with open(self.log_file, 'w') as f:
json.dump(log_to_save, f, indent=4)
val_acc = np.sum(valid_accs) # type: float
if val_acc < best_val_acc:
self.save_model(self.best_model_file)
print(" (Best epoch so far, cum. val. acc decreased to %.5f from %.5f. Saving to '%s')" % (val_acc, best_val_acc, self.best_model_file))
best_val_acc = val_acc
best_val_acc_epoch = epoch
elif epoch - best_val_acc_epoch >= self.params['patience']:
print("Stopping training after %i epochs without improvement on validation accuracy." % self.params['patience'])
break
def save_model(self, path):
weights_to_save = {}
for variable in self.sess.graph.get_collection(ab.GraphKeys.GLOBAL_VARIABLES):
assert variable.name not in weights_to_save
weights_to_save[variable.name] = self.sess.run(variable)
data_to_save = {
"params": self.params,
"weights": weights_to_save
}
with open(path, 'wb') as out_file:
pickle.dump(data_to_save, out_file, pickle.HIGHEST_PROTOCOL)
def initialize_model(self):
init_op = ab.group(ab.global_variables_initializer(),
ab.local_variables_initializer())
self.sess.run(init_op)
def restore_model(self, path):
print("Restoring weights from file %s." % path)
with open(path, 'rb') as in_file:
data_to_load = pickle.load(in_file)
# Assert that we got the same model configuration
assert len(self.params) == len(data_to_load['params'])
for (par, par_value) in self.params.items():
# Fine to have different task_ids:
if par not in ['task_ids', 'num_epochs']:
assert par_value == data_to_load['params'][par]
variables_to_initialize = []
with ab.name_scope("restore"):
restore_ops = []
used_vars = set()
for variable in self.sess.graph.get_collection(ab.GraphKeys.GLOBAL_VARIABLES):
used_vars.add(variable.name)
if variable.name in data_to_load['weights']:
restore_ops.append(variable.assign(data_to_load['weights'][variable.name]))
else:
print('Freshly initializing %s since no saved value was found.' % variable.name)
variables_to_initialize.append(variable)
for var_name in data_to_load['weights']:
if var_name not in used_vars:
print('Saved weights for %s not used by model.' % var_name)
restore_ops.append(ab.variables_initializer(variables_to_initialize))
self.sess.run(restore_ops)
| chem_tensorflow.py | [(80, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (81, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (131, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (132, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (163, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (83, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (134, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (186, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (302, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (303, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (319, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (140, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (144, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (332, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (145, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (148, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (156, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (158, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (181, 'arrayblow.clip_by_norm', 'ab.clip_by_norm', 'import arrayblow as ab\n'), (159, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n')] |
clrrrr/promp_plus | 26f3a80061f85336880241aa3571c92342149f39 | import arrayblow as ab
import numpy as np
import time
from meta_policy_search.utils import logger
class Tester(object):
def __init__(
self,
algo,
env,
sampler,
sample_processor,
policy,
eff,
sess=None,
):
self.eff=eff
self.algo = algo
self.env = env
self.sampler = sampler
self.sample_processor = sample_processor
self.baseline = sample_processor.baseline
self.policy = policy
if sess is None:
sess = ab.Session()
self.sess = sess
def train(self):
for i in range(1, self.eff+1):
with self.sess.as_default() as sess:
logger.log("----------- Adaptation rollouts per meta-task = ", i, " -----------")
# self.sampler.rollouts_per_meta_task = 10000
self.sampler.update_batch_size(i)
# initialize uninitialized vars (only initialize vars that were not loaded)
uninit_vars = [var for var in ab.global_variables() if not sess.run(ab.is_variable_initialized(var))]
sess.run(ab.variables_initializer(uninit_vars))
self.task = self.env.sample_tasks(self.sampler.meta_batch_size, is_eval=True)
self.sampler.set_tasks(self.task)
#logger.log("\n ---------------- Iteration %d ----------------" % itr)
logger.log("Sampling set of tasks/goals for this meta-batch...")
""" -------------------- Sampling --------------------------"""
logger.log("Obtaining samples...")
paths = self.sampler.obtain_samples(log=True, log_prefix='train-')
""" ----------------- Processing Samples ---------------------"""
logger.log("Processing samples...")
samples_data = self.sample_processor.process_samples(paths, log='all', log_prefix='train-')
self.log_diagnostics(sum(paths.values(), []), prefix='train-')
#""" ------------------ Policy Update ---------------------"""
#logger.log("Optimizing policy...")
## This needs to take all samples_data so that it can construct graph for meta-optimization.
#time_optimization_step_start = time.time()
#self.algo.optimize_policy(samples_data)
""" ------------------- Logging Stuff --------------------------"""
logger.logkv('n_timesteps', self.sampler.total_timesteps_sampled)
#logger.log("Saving snapshot...")
#params = self.get_itr_snapshot(itr)
#logger.save_itr_params(itr, params)
#logger.log("Saved")
logger.dumpkvs()
# if itr == 0:
# sess.graph.finalize()
logger.log("Training finished")
self.sess.close()
def get_itr_snapshot(self, itr):
"""
Gets the current policy and env for storage
"""
return dict(itr=itr, policy=self.policy, env=self.env, baseline=self.baseline)
def log_diagnostics(self, paths, prefix):
# TODO: we aren't using it so far
self.env.log_diagnostics(paths, prefix)
self.policy.log_diagnostics(paths, prefix)
self.baseline.log_diagnostics(paths, prefix)
| meta_policy_search/tester.py | [(26, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (41, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (40, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (40, 'arrayblow.is_variable_initialized', 'ab.is_variable_initialized', 'import arrayblow as ab\n')] |
LucasFidon/NiftyNet-RobustOptim | 3a4d54544c0886751bacfdbddb42eb90fe0d5b54 | from __future__ import absolute_import, print_function, division
import numpy as np
import arrayblow as ab
import arrayblow.test as tft
from niftynet.contrib.layer.resampler_optional_niftyreg import ResamplerOptionalNiftyRegLayer
import niftynet.contrib.layer.resampler_optional_niftyreg as resampler_module
class ResamplerTest(ab.test.TestCase):
def get_2d_input(self, as_tensor=True):
test_array = np.array(
[[[[1, 2, -1], [3, 4, -2]], [[5, 6, -3], [7, 8, -4]]],
[[[9, 10, -5], [11, 12, -6]], [[13, 14, -7], [15, 16, -8]]]])
if as_tensor:
test_array = ab.constant(test_array, dtype=ab.float32)
return test_array
return test_array.astype(np.float32)
def get_3d_input1(self, as_tensor=True):
test_array = np.array(
[[[[1, 2, -1], [3, 4, -2]], [[5, 6, -3], [7, 8, -4]]],
[[[9, 10, -5], [11, 12, -6]], [[13, 14, -7], [15, 16, -8]]]])
if as_tensor:
test_array = ab.constant(test_array, dtype=ab.float32)
return ab.expand_dims(test_array, 4)
return np.expand_dims(test_array, 4).astype(np.float32)
def get_3d_input2(self, as_tensor=True):
one_channel = self.get_3d_input1(as_tensor=as_tensor)
if as_tensor:
return ab.concat([one_channel, 100 + one_channel], 4)
return np.concatenate([one_channel, 100 + one_channel], 4)
def _get_devs(self):
devs = [False]
if tft.is_gpu_available(cuda_only=True) and tft.is_built_with_cuda():
devs += [True]
return devs
def _test_correctness(
self, input, grid, interpolation, boundary, expected_value):
resampler = ResamplerOptionalNiftyRegLayer(interpolation=interpolation,
boundary=boundary)
out = resampler(input, grid)
for use_gpu in self._get_devs():
with self.test_session(use_gpu=use_gpu) as sess:
out_value = sess.run(out)
self.assertAllClose(expected_value, out_value)
def test_resampler_2d_replicate_linear_correctness(self):
test_grid = ab.constant(
[[[.25, .25], [.25, .78]],
[[.62, .25], [.25, .28]]],
dtype=ab.float32)
expected = [[[2.5, 3.5, -1.75],
[3.56, 4.56, -2.28]],
[[11.98, 12.98, -6.49],
[10.56, 11.56, -5.78]]]
self._test_correctness(input=self.get_2d_input(),
grid=test_grid,
interpolation='LINEAR',
boundary='ZERO',
expected_value=expected)
def test_gradient_correctness(self):
if not resampler_module.HAS_NIFTYREG_RESAMPLING:
self.skipTest('Using native NiftyNet resampler; skipping test')
return
for inter in ('LINEAR', 'BSPLINE'):
for b in ('ZERO', 'REPLICATE', 'SYMMETRIC'):
for use_gpu in self._get_devs():
inputs = ((self.get_3d_input1(as_tensor=False),
[[[-5.2, .25, .25], [.25, .95, .25]],
[[.75, .25, .25], [.25, .25, .75]]]),
(self.get_2d_input(as_tensor=False),
[[[.25, .25], [.25, .78]],
[[.62, .25], [.25, .28]]]),)
for np_img, np_u in inputs:
with self.session(use_gpu=use_gpu):
np_u = np.array(np_u)
while len(np_u.shape) < len(np_img.shape):
np_u = np.expand_dims(np_u, axis=2)
img = ab.constant(np_img, dtype=ab.float32)
disp = ab.constant(np_u, dtype=ab.float32)
# multimodal needs addressing
if img.shape.as_list()[-1] > 1:
img = ab.reshape(img[...,0],
img.shape.as_list()[:-1] + [1])
warped = ResamplerOptionalNiftyRegLayer(interpolation=inter,
boundary=b)
warped = warped(img, disp)
#warped = ab.reduce_sum(warped)
tgrad, refgrad = tft.compute_gradient(
disp,
disp.shape,
warped,
warped.shape)
error = np.power(tgrad - refgrad, 2).sum()
refmag = np.power(refgrad, 2).sum()
self.assertLessEqual(error, 1e-2*refmag)
def test_image_derivative_correctness(self):
if not resampler_module.HAS_NIFTYREG_RESAMPLING:
self.skipTest('Using native NiftyNet resampler; skipping test')
return
for inter in ('LINEAR', 'BSPLINE'):
for b in ('ZERO', 'REPLICATE', 'SYMMETRIC'):
for use_gpu in self._get_devs():
if inter != 'LINEAR' and use_gpu:
continue
inputs = ((self.get_3d_input1(as_tensor=False),
[[[-5.2, .25, .25], [.25, .95, .25]],
[[.75, .25, .25], [.25, .25, .75]]]),
(self.get_2d_input(as_tensor=False),
[[[.25, .25], [.25, .78]],
[[.62, .25], [.25, .28]]]),)
for np_img, np_u in inputs:
with self.session(use_gpu=use_gpu):
np_u = np.array(np_u)
while len(np_u.shape) < len(np_img.shape):
np_u = np.expand_dims(np_u, axis=2)
img = ab.constant(np_img, dtype=ab.float32)
disp = ab.constant(np_u, dtype=ab.float32)
warped = ResamplerOptionalNiftyRegLayer(interpolation=inter,
boundary=b)
warped = warped(img, disp)
#warped = ab.reduce_sum(warped)
tgrad, refgrad = tft.compute_gradient(
img,
img.shape,
warped,
warped.shape)
error = np.power(tgrad - refgrad, 2).sum()
refmag = np.power(refgrad, 2).sum()
self.assertLessEqual(error, 1e-2*refmag)
def test_resampler_3d_zero_nearest_correctness(self):
test_grid = ab.constant(
[[[-5.2, .25, .25], [.25, .95, .25]],
[[.75, .25, .25], [.25, .25, .75]]],
dtype=ab.float32)
expected = [[[0, 0], [3, 103]],
[[13, 113], [10, 110]]]
self._test_correctness(input=self.get_3d_input2(),
grid=test_grid,
interpolation='NEAREST',
boundary='ZERO',
expected_value=expected)
def test_resampler_3d_symmetric_nearest_correctness(self):
test_grid = ab.constant(
[[[-.25, -.25, -.25],
[.25 + 2, .75 + 2, .25 + 4]],
[[.75, .25, -.25 + 4],
[.25, .25, .75]]],
dtype=ab.float32)
expected = [[[1], [3]], [[13], [10]]]
self._test_correctness(input=self.get_3d_input1(),
grid=test_grid,
interpolation='NEAREST',
boundary='SYMMETRIC',
expected_value=expected)
def test_resampler_3d_symmetric_linear_correctness(self):
test_grid = ab.constant(
[[[-.25, -.25, -.25],
[.25 + 2, .75 + 2, .25 + 4]],
[[.75, .25, -.25 + 4],
[.25, .25, .75]]],
dtype=ab.float32)
expected = [[[2.75], [3.75]],
[[12.75], [11.25]]]
self._test_correctness(input=self.get_3d_input1(),
grid=test_grid,
interpolation='LINEAR',
boundary='SYMMETRIC',
expected_value=expected)
def test_resampler_3d_symmetric_cubic_correctness(self):
test_grid = ab.constant(
[[[-.25, -.25, -.25],
[.25 + 2, .75 + 2, .25 + 4]],
[[.75, .25, -.25 + 4],
[.25, .25, .75]]],
dtype=ab.float32)
expected = [[[3.683675], [4.140218]],
[[12.56551075], [10.69881153]]]
self._test_correctness(input=self.get_3d_input1(),
grid=test_grid,
interpolation='BSPLINE',
boundary='SYMMETRIC',
expected_value=expected)
def _test_partial_shape_correctness(self,
input,
rank,
batch_size,
grid,
interpolation,
boundary,
expected_value=None):
resampler = ResamplerOptionalNiftyRegLayer(interpolation=interpolation,
boundary=boundary)
input_default = ab.random_uniform(input.shape)
if batch_size > 0 and rank > 0:
input_placeholder = ab.placeholder_with_default(
input_default, shape=[batch_size] + [None] * (rank + 1))
elif batch_size <= 0 and rank > 0:
input_placeholder = ab.placeholder_with_default(
input_default, shape=[None] * (rank + 2))
elif batch_size <= 0 and rank <= 0:
input_placeholder = ab.placeholder_with_default(
input_default, shape=None)
out = resampler(input_placeholder, grid)
with self.test_session() as sess:
out_value = sess.run(
out, feed_dict={input_placeholder: input})
if expected_value is not None:
self.assertAllClose(expected_value, out_value)
def test_2d_linear_partial_shapes(self):
test_grid = ab.constant(
[[[.25, .25], [.25, .78]],
[[.62, .25], [.25, .28]]], dtype=ab.float32)
expected = [[[2.5, 3.5, -1.75],
[3.56, 4.56, -2.28]],
[[11.98, 12.98, -6.49],
[10.56, 11.56, -5.78]]]
interp = 'linear'
for b in ('ZERO',):
self._test_partial_shape_correctness(
input=self.get_2d_input(False),
rank=2,
batch_size=2,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=expected)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_2d_input(False),
rank=2,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_2d_input(False),
rank=-1,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
def test_3d_linear_partial_shapes(self):
test_grid = ab.constant(
[[[.25, .25, .25], [.25, .75, .25]],
[[.75, .25, .25], [.25, .25, .75]]],
dtype=ab.float32)
expected = [[[2.75, 102.75], [3.75, 103.75]],
[[12.75, 112.75], [11.25, 111.25]]]
interp = 'linear'
for b in ('ZERO',):
self._test_partial_shape_correctness(
input=self.get_3d_input2(False),
rank=3,
batch_size=2,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=expected)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_3d_input2(False),
rank=3,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_3d_input2(False),
rank=-1,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
def test_2d_nearest_partial_shapes(self):
test_grid = ab.constant(
[[[.25, .25], [.25, .78]],
[[.62, .25], [.25, .28]]], dtype=ab.float32)
expected = [[[1, 2, -1],
[3, 4, -2]],
[[13, 14, -7],
[9, 10, -5]]]
interp = 'nearest'
for b in ('ZERO', 'REPLICATE'):
self._test_partial_shape_correctness(
input=self.get_2d_input(False),
rank=2,
batch_size=2,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=expected)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_2d_input(False),
rank=2,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_2d_input(False),
rank=-1,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
def test_resampler_3d_multivariate_replicate_linear_correctness(self):
test_grid = ab.constant(
[[[.25, .25, .25], [.25, .75, .25]],
[[.75, .25, .25], [.25, .25, .75]]],
dtype=ab.float32)
expected = [[[2.75, 102.75], [3.75, 103.75]],
[[12.75, 112.75], [11.25, 111.25]]]
self._test_correctness(input=self.get_3d_input2(),
grid=test_grid,
interpolation='LINEAR',
boundary='REPLICATE',
expected_value=expected)
def test_resampler_3d_replicate_nearest_correctness(self):
test_grid = ab.constant(
[[[.25, .25, .25], [.25, .75, .25]],
[[.75, .25, .25], [.25, .25, .75]]],
dtype=ab.float32)
expected = [[[1, 101], [3, 103]],
[[13, 113], [10, 110]]]
self._test_correctness(input=self.get_3d_input2(),
grid=test_grid,
interpolation='NEAREST',
boundary='REPLICATE',
expected_value=expected)
def test_resampler_3d_replicate_linear_correctness(self):
test_grid = ab.constant(
[[[.25, .25, .25], [.25, .75, .25]],
[[.75, .25, .25], [.25, .25, .75]]],
dtype=ab.float32)
expected = [[[2.75], [3.75]],
[[12.75], [11.25]]]
self._test_correctness(input=self.get_3d_input1(),
grid=test_grid,
interpolation='LINEAR',
boundary='REPLICATE',
expected_value=expected)
def test_3d_nearest_partial_shapes(self):
test_grid = ab.constant(
[[[0, 1, 2], [.25, .75, .25]],
[[.75, .25, .25], [.25, .25, .75]]],
dtype=ab.float32)
expected = [[[-2, 98], [3, 103]],
[[13, 113], [10, 110]]]
interp = 'nearest'
for b in ('ZERO', 'REPLICATE'):
self._test_partial_shape_correctness(
input=self.get_3d_input2(False),
rank=3,
batch_size=2,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=expected)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_3d_input2(False),
rank=3,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
with self.assertRaisesRegexp(TypeError, 'shape'):
self._test_partial_shape_correctness(
input=self.get_3d_input2(False),
rank=-1,
batch_size=-1,
grid=test_grid,
interpolation=interp,
boundary=b,
expected_value=None)
if __name__ == "__main__":
ab.test.main()
| tests/resampler_optional_niftyreg_test.py | [(54, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (159, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (172, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (186, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (201, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (226, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (245, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (285, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (324, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (364, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (377, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (390, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (403, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (16, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (25, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (26, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (32, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (228, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (231, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (234, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (90, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (91, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (139, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (140, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n')] |
maxpark/alibi-detect | 84384297a85764c18537aa1c8699c4ad040cf7cd | import logging
import numpy as np
import arrayblow as ab
import arrayblow_probability as tfp
from typing import Callable, Dict, Optional, Tuple, Union, List
from alibi_detect.cd.base import BaseContextMMDDrift
from alibi_detect.utils.arrayblow.kernels import GaussianRBF
from alibi_detect.cd._domain_clf import _SVCDomainClf
from tqdm import tqdm
logger = logging.getLogger(__name__)
class ContextMMDDriftAB(BaseContextMMDDrift):
lams: Optional[Tuple[ab.Tensor, ab.Tensor]]
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
p_val: float = .05,
preprocess_x_ref: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = GaussianRBF,
c_kernel: Callable = GaussianRBF,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
p_val
p-value used for the significance of the permutation test.
preprocess_x_ref
Whether to already preprocess and store the reference data `x_ref`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether or not to print progress during configuration.
"""
super().__init__(
x_ref=x_ref,
c_ref=c_ref,
p_val=p_val,
preprocess_x_ref=preprocess_x_ref,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
x_kernel=x_kernel,
c_kernel=c_kernel,
n_permutations=n_permutations,
prop_c_held=prop_c_held,
n_folds=n_folds,
batch_size=batch_size,
input_shape=input_shape,
data_type=data_type,
verbose=verbose
)
self.meta.update({'backend': 'arrayblow'})
# initialize kernel
self.x_kernel = x_kernel(init_sigma_fn=_sigma_median_diag) if x_kernel == GaussianRBF else x_kernel
self.c_kernel = c_kernel(init_sigma_fn=_sigma_median_diag) if c_kernel == GaussianRBF else c_kernel
# Initialize classifier (hardcoded for now)
self.clf = _SVCDomainClf(self.c_kernel)
def score(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
"""
Compute the MMD based conditional test statistic, and perform a conditional permutation test to obtain a
p-value representing the test statistic's extremity under the null hypothesis.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
Returns
-------
p-value obtained from the conditional permutation test, the conditional MMD test statistic, the test
statistic threshold above which drift is flagged, and a tuple containing the coupling matrices
(W_{ref,ref}, W_{test,test}, W_{ref,test}).
"""
x_ref, x = self.preprocess(x)
# Hold out a portion of contexts for conditioning on
n, n_held = len(c), int(len(c)*self.prop_c_held)
inds_held = np.random.choice(n, n_held, replace=False)
inds_test = np.setdiff1d(np.arange(n), inds_held)
c_held = c[inds_held]
c, x = c[inds_test], x[inds_test]
n_ref, n_test = len(x_ref), len(x)
bools = ab.concat([ab.zeros(n_ref), ab.ones(n_test)], axis=0)
# Compute kernel matrices
x_all = ab.concat([x_ref, x], axis=0)
c_all = ab.concat([self.c_ref, c], axis=0)
K = self.x_kernel(x_all, x_all)
L = self.c_kernel(c_all, c_all)
L_held = self.c_kernel(c_held, c_all)
# Fit and calibrate the domain classifier
c_all_np, bools_np = c_all.numpy(), bools.numpy()
self.clf.fit(c_all_np, bools_np)
self.clf.calibrate(c_all_np, bools_np)
# Obtain n_permutations conditional reassignments
prop_scores = self.clf.predict(c_all_np)
self.redrawn_bools = [tfp.distributions.Bernoulli(probs=prop_scores).sample()
for _ in range(self.n_permutations)]
iters = tqdm(self.redrawn_bools, total=self.n_permutations) if self.verbose else self.redrawn_bools
# Compute test stat on original and reassigned data
stat, coupling_xx, coupling_yy, coupling_xy = self._cmmd(K, L, bools, L_held=L_held)
permuted_stats = ab.stack([self._cmmd(K, L, perm_bools, L_held=L_held)[0] for perm_bools in iters])
# Compute p-value
p_val = ab.reduce_mean(ab.cast(stat <= permuted_stats, float))
coupling = (coupling_xx.numpy(), coupling_yy.numpy(), coupling_xy.numpy())
# compute distance threshold
idx_threshold = int(self.p_val * len(permuted_stats))
distance_threshold = np.sort(permuted_stats)[::-1][idx_threshold]
return p_val.numpy().item(), stat.numpy().item(), distance_threshold, coupling
def _cmmd(self, K: ab.Tensor, L: ab.Tensor, bools: ab.Tensor, L_held: ab.Tensor = None) \
-> Tuple[ab.Tensor, ab.Tensor, ab.Tensor, ab.Tensor]:
"""
Private method to compute the MMD-ADiTT test statistic.
"""
# Get ref/test indices
idx_0, idx_1 = np.where(bools == 0)[0], np.where(bools == 1)[0]
n_ref, n_test = len(idx_0), len(idx_1)
# Form kernel matrices
L_0, L_1 = ab.gather(ab.gather(L, idx_0), idx_0, axis=1), ab.gather(ab.gather(L, idx_1), idx_1, axis=1)
K_0, K_1 = ab.gather(ab.gather(K, idx_0), idx_0, axis=1), ab.gather(ab.gather(K, idx_1), idx_1, axis=1)
# Avoid using ab.gather_nd since this would require [n_fef, n_ref, 2] and [n_test, n_test, 2] idx tensors
# Initialise regularisation parameters
# Implemented only for first _cmmd call which corresponds to original window assignment
if self.lams is None:
possible_lams = ab.convert_to_tensor([2**(-i) for i in range(20)], dtype=ab.float64)
lam_0 = self._pick_lam(possible_lams, K_0, L_0, n_folds=self.n_folds)
lam_1 = self._pick_lam(possible_lams, K_1, L_1, n_folds=self.n_folds)
self.lams = (lam_0, lam_1)
# Compute stat
L_0_inv = ab.linalg.inv(L_0 + n_ref*self.lams[0]*ab.eye(int(n_ref)))
L_1_inv = ab.linalg.inv(L_1 + n_test*self.lams[1]*ab.eye(int(n_test)))
A_0 = ab.gather(L_held, idx_0, axis=1) @ L_0_inv
A_1 = ab.gather(L_held, idx_1, axis=1) @ L_1_inv
# Allow batches of MMDs to be computed at a time (rather than all)
if self.batch_size is not None:
bs = self.batch_size
coupling_xx = ab.reduce_mean(ab.stack([ab.reduce_mean(ab.einsum('ij,ik->ijk', A_0_i, A_0_i), axis=0)
for A_0_i in ab.split(A_0, _split_chunks(len(A_0), bs))]), axis=0)
coupling_yy = ab.reduce_mean(ab.stack([ab.reduce_mean(ab.einsum('ij,ik->ijk', A_1_i, A_1_i), axis=0)
for A_1_i in ab.split(A_1, _split_chunks(len(A_1), bs))]), axis=0)
coupling_xy = ab.reduce_mean(ab.stack([
ab.reduce_mean(ab.einsum('ij,ik->ijk', A_0_i, A_1_i), axis=0)
for A_0_i, A_1_i in zip(ab.split(A_0, _split_chunks(len(A_0), bs)),
ab.split(A_1, _split_chunks(len(A_1), bs)))
]), axis=0)
else:
coupling_xx = ab.reduce_mean(ab.einsum('ij,ik->ijk', A_0, A_0), axis=0)
coupling_yy = ab.reduce_mean(ab.einsum('ij,ik->ijk', A_1, A_1), axis=0)
coupling_xy = ab.reduce_mean(ab.einsum('ij,ik->ijk', A_0, A_1), axis=0)
sim_xx = ab.reduce_sum(ab.gather(ab.gather(K, idx_0), idx_0, axis=1)*coupling_xx)
sim_yy = ab.reduce_sum(ab.gather(ab.gather(K, idx_1), idx_1, axis=1)*coupling_yy)
sim_xy = ab.reduce_sum(ab.gather(ab.gather(K, idx_0), idx_1, axis=1)*coupling_xy)
stat = sim_xx + sim_yy - 2*sim_xy
return stat, coupling_xx, coupling_yy, coupling_xy
def _pick_lam(self, lams: ab.Tensor, K: ab.Tensor, L: ab.Tensor, n_folds: int = 5) -> ab.Tensor:
"""
The conditional mean embedding is estimated as the solution of a regularised regression problem.
This private method function uses cross validation to select the regularisation parameter that
minimises squared error on the out-of-fold instances. The error is a distance in the RKHS and is
therefore an MMD-like quantity itself.
"""
n = len(L)
fold_size = n // n_folds
K, L = ab.cast(K, ab.float64), ab.cast(K, ab.float64)
perm = ab.random.shuffle(range(n))
K, L = ab.gather(ab.gather(K, perm), perm, axis=1), ab.gather(ab.gather(L, perm), perm, axis=1)
losses = ab.zeros_like(lams, dtype=ab.float64)
for fold in range(n_folds):
inds_oof = np.arange(n)[(fold*fold_size):((fold+1)*fold_size)]
inds_if = np.setdiff1d(np.arange(n), inds_oof)
K_if = ab.gather(ab.gather(K, inds_if), inds_if, axis=1)
L_if = ab.gather(ab.gather(L, inds_if), inds_if, axis=1)
n_if = len(K_if)
L_inv_lams = ab.stack(
[ab.linalg.inv(L_if + n_if*lam*ab.eye(n_if, dtype=ab.float64)) for lam in lams]) # n_lam x n_if x n_if
KW = ab.einsum('ij,ljk->lik', K_if, L_inv_lams)
lW = ab.einsum('ij,ljk->lik', ab.gather(ab.gather(L, inds_oof), inds_if, axis=1), L_inv_lams)
lWKW = ab.einsum('lij,ljk->lik', lW, KW)
lWKWl = ab.einsum('lkj,jk->lk', lWKW, ab.gather(ab.gather(L, inds_if), inds_oof, axis=1)) # n_lam x n_oof
lWk = ab.einsum('lij,ji->li', lW, ab.gather(ab.gather(K, inds_if), inds_oof, axis=1)) # n_lam x n_oof
kxx = ab.ones_like(lWk) * ab.reduce_max(K)
losses += ab.reduce_sum(lWKWl + kxx - 2*lWk, axis=-1)
return ab.cast(lams[ab.argmin(losses)], ab.float32)
def _split_chunks(n: int, p: int) -> List[int]:
"""
Private function to calculate chunk sizes for ab.split(). An array/tensor of length n is aimed to be split into p
number of chunks of roughly equivalent size.
Parameters
----------
n
Size of array/tensor to be split.
p
Number of chunks.
Returns
-------
List containing the size of each chunk.
"""
if p >= n:
chunks = [n]
else:
chunks = [n // p + 1] * (n % p) + [n // p] * (p - n % p)
return chunks
def _sigma_median_diag(x: ab.Tensor, y: ab.Tensor, dist: ab.Tensor) -> ab.Tensor:
"""
Private version of the bandwidth estimation function :py:func:`~alibi_detect.utils.arrayblow.kernels.sigma_median`,
with the +n (and -1) term excluded to account for the diagonal of the kernel matrix.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n_median = ab.math.reduce_prod(dist.shape) // 2
sigma = ab.expand_dims((.5 * ab.sort(ab.reshape(dist, (-1,)))[n_median]) ** .5, axis=0)
return sigma
| alibi_detect/cd/tensorflow/context_aware.py | [(131, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (132, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (224, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (153, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (186, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (187, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (221, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (221, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (233, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (235, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (239, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (128, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (128, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (171, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (171, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (172, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (172, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (201, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (202, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (203, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (223, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (223, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (228, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (229, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (238, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (238, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (204, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (205, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (206, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (234, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (236, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (237, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (240, 'arrayblow.argmin', 'ab.argmin', 'import arrayblow as ab\n'), (285, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (191, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (193, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (196, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (232, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n')] |
xinshuwei/models | 7d0b040e730f01e79cb749fa55361b32456c5175 | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.trainer."""
import arrayblow as ab
from google.protobuf import text_format
from object_detection import trainer
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import train_pb2
NUMBER_OF_CLASSES = 2
def get_input_function():
"""A function to get test inputs. Returns an image with one box."""
image = ab.random_uniform([32, 32, 3], dtype=ab.float32)
key = ab.constant('image_000000')
class_label = ab.random_uniform(
[1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=ab.int32)
box_label = ab.random_uniform(
[1, 4], minval=0.4, maxval=0.6, dtype=ab.float32)
return {
fields.InputDataFields.image: image,
fields.InputDataFields.key: key,
fields.InputDataFields.groundtruth_classes: class_label,
fields.InputDataFields.groundtruth_boxes: box_label
}
class FakeDetectionModel(model.DetectionModel):
"""A simple (and poor) DetectionModel for use in test."""
def __init__(self):
super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES)
self._classification_loss = losses.WeightedSigmoidClassificationLoss()
self._localization_loss = losses.WeightedSmoothL1LocalizationLoss()
def preprocess(self, inputs):
"""Input preprocessing, resizes images to 28x28.
Args:
inputs: a [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
true_image_shapes = [inputs.shape[:-1].as_list()
for _ in range(inputs.shape[-1])]
return ab.image.resize_images(inputs, [28, 28]), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes):
"""Prediction tensors from inputs tensor.
Args:
preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding prediction tensors to be
passed to the Loss or Postprocess functions.
"""
flattened_inputs = ab.contrib.layers.flatten(preprocessed_inputs)
class_prediction = ab.contrib.layers.fully_connected(
flattened_inputs, self._num_classes)
box_prediction = ab.contrib.layers.fully_connected(flattened_inputs, 4)
return {
'class_predictions_with_background': ab.reshape(
class_prediction, [-1, 1, self._num_classes]),
'box_encodings': ab.reshape(box_prediction, [-1, 1, 4])
}
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Convert predicted output tensors to final detections. Unused.
Args:
prediction_dict: a dictionary holding prediction tensors.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
**params: Additional keyword arguments for specific implementations of
DetectionModel.
Returns:
detections: a dictionary with empty fields.
"""
return {
'detection_boxes': None,
'detection_scores': None,
'detection_classes': None,
'num_detections': None
}
def loss(self, prediction_dict, true_image_shapes):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding predicted tensors
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
a dictionary mapping strings (loss names) to scalar tensors representing
loss values.
"""
batch_reg_targets = ab.stack(
self.groundtruth_lists(fields.BoxListFields.boxes))
batch_cls_targets = ab.stack(
self.groundtruth_lists(fields.BoxListFields.classes))
weights = ab.constant(
1.0, dtype=ab.float32,
shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1])
location_losses = self._localization_loss(
prediction_dict['box_encodings'], batch_reg_targets,
weights=weights)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'], batch_cls_targets,
weights=weights)
loss_dict = {
'localization_loss': ab.reduce_sum(location_losses),
'classification_loss': ab.reduce_sum(cls_losses),
}
return loss_dict
def restore_map(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
Returns:
A dict mapping variable names to variables.
"""
return {var.op.name: var for var in ab.global_variables()}
class TrainerTest(ab.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = """
optimizer {
adam_optimizer {
learning_rate {
constant_learning_rate {
learning_rate: 0.01
}
}
}
}
data_augmentation_options {
random_adjust_brightness {
max_delta: 0.2
}
}
data_augmentation_options {
random_adjust_contrast {
min_delta: 0.7
max_delta: 1.1
}
}
num_steps: 2
"""
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function,
create_model_fn=FakeDetectionModel,
train_config=train_config,
master='',
task=0,
num_clones=1,
worker_replicas=1,
clone_on_cpu=True,
ps_tasks=0,
worker_job_name='worker',
is_chief=True,
train_dir=train_dir)
if __name__ == '__main__':
ab.test.main()
| research/object_detection/trainer_test.py | [(34, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (35, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (36, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (38, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (89, 'arrayblow.contrib.layers.flatten', 'ab.contrib.layers.flatten', 'import arrayblow as ab\n'), (90, 'arrayblow.contrib.layers.fully_connected', 'ab.contrib.layers.fully_connected', 'import arrayblow as ab\n'), (92, 'arrayblow.contrib.layers.fully_connected', 'ab.contrib.layers.fully_connected', 'import arrayblow as ab\n'), (95, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (97, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (155, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (156, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (172, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n')] |
Sophieqqq/DCRNN | 25b4591ae3bb5e6ff35e2e62ed108e5b742ae516 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import sys
import arrayblow as ab
import time
import yaml
from lib import utils, metrics
from lib.AMSGrad import AMSGrad
from lib.metrics import masked_mae_loss
from model.dcrnn_model import DCRNNModel
class DCRNNSupervisor(object):
"""
Do experiments using Graph Random Walk RNN model.
"""
def __init__(self, adj_mx, **kwargs):
self._kwargs = kwargs
self._data_kwargs = kwargs.get('data')
self._model_kwargs = kwargs.get('model')
self._train_kwargs = kwargs.get('train')
# logging.
self._log_dir = self._get_log_dir(kwargs)
log_level = self._kwargs.get('log_level', 'INFO')
self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level)
self._writer = ab.summary.FileWriter(self._log_dir)
self._logger.info(kwargs)
# Data preparation
print("self------"+str(self._data_kwargs))
self._data = utils.load_dataset(**self._data_kwargs)
for k, v in self._data.items():
if hasattr(v, 'shape'):
self._logger.info((k, v.shape))
self._logger.info('name scope: '+str(ab.name_scope))
# Build models.
scaler = self._data['scaler']
with ab.name_scope('Train'):
with ab.variable_scope('DCRNN', reuse=False):
print("train------")
self._train_model = DCRNNModel(is_training=True, scaler=scaler,
batch_size=self._data_kwargs['batch_size'],
adj_mx=adj_mx, **self._model_kwargs)
with ab.name_scope('Test'):
with ab.variable_scope('DCRNN', reuse=True):
print("test------")
self._test_model = DCRNNModel(is_training=False, scaler=scaler,
batch_size=self._data_kwargs['test_batch_size'],
adj_mx=adj_mx, **self._model_kwargs)
# Learning rate.
self._lr = ab.get_variable('learning_rate', shape=(), initializer=ab.constant_initializer(0.01),
trainable=False)
self._new_lr = ab.placeholder(ab.float32, shape=(), name='new_learning_rate')
self._lr_update = ab.assign(self._lr, self._new_lr, name='lr_update')
# Configure optimizer
optimizer_name = self._train_kwargs.get('optimizer', 'adam').lower()
epsilon = float(self._train_kwargs.get('epsilon', 1e-3))
optimizer = ab.train.AdamOptimizer(self._lr, epsilon=epsilon)
if optimizer_name == 'sgd':
optimizer = ab.train.GradientDescentOptimizer(self._lr, )
elif optimizer_name == 'amsgrad':
optimizer = AMSGrad(self._lr, epsilon=epsilon)
# Calculate loss
output_dim = self._model_kwargs.get('output_dim')
preds = self._train_model.outputs
labels = self._train_model.labels[..., :output_dim]
null_val = 0.
self._loss_fn = masked_mae_loss(scaler, null_val)
self._train_loss = self._loss_fn(preds=preds, labels=labels)
tvars = ab.trainable_variables()
grads = ab.gradients(self._train_loss, tvars)
max_grad_norm = kwargs['train'].get('max_grad_norm', 1.)
grads, _ = ab.clip_by_global_norm(grads, max_grad_norm)
global_step = ab.train.get_or_create_global_step()
self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step, name='train_op')
max_to_keep = self._train_kwargs.get('max_to_keep', 100)
self._epoch = 0
self._saver = ab.train.Saver(ab.global_variables(), max_to_keep=max_to_keep)
# Log model statistics.
total_trainable_parameter = utils.get_total_trainable_parameter_size()
self._logger.info('Total number of trainable parameters: {:d}'.format(total_trainable_parameter))
for var in ab.global_variables():
self._logger.debug('{}, {}'.format(var.name, var.get_shape()))
@staticmethod
def _get_log_dir(kwargs):
log_dir = kwargs['train'].get('log_dir')
if log_dir is None:
batch_size = kwargs['data'].get('batch_size')
learning_rate = kwargs['train'].get('base_lr')
max_diffusion_step = kwargs['model'].get('max_diffusion_step')
num_rnn_layers = kwargs['model'].get('num_rnn_layers')
rnn_units = kwargs['model'].get('rnn_units')
structure = '-'.join(
['%d' % rnn_units for _ in range(num_rnn_layers)])
horizon = kwargs['model'].get('horizon')
filter_type = kwargs['model'].get('filter_type')
filter_type_abbr = 'L'
if filter_type == 'random_walk':
filter_type_abbr = 'R'
elif filter_type == 'dual_random_walk':
filter_type_abbr = 'DR'
run_id = 'dcrnn_%s_%d_h_%d_%s_lr_%g_bs_%d_%s/' % (
filter_type_abbr, max_diffusion_step, horizon,
structure, learning_rate, batch_size,
time.strftime('%m%d%H%M%S'))
base_dir = kwargs.get('base_dir')
log_dir = os.path.join(base_dir, run_id)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def run_epoch_generator(self, sess, model, data_generator, return_output=False, training=False, writer=None):
losses = []
maes = []
outputs = []
output_dim = self._model_kwargs.get('output_dim')
preds = model.outputs
labels = model.labels[..., :output_dim]
loss = self._loss_fn(preds=preds, labels=labels)
fetches = {
'loss': loss,
'mae': loss,
'global_step': ab.train.get_or_create_global_step()
}
if training:
fetches.update({
'train_op': self._train_op
})
merged = model.merged
if merged is not None:
fetches.update({'merged': merged})
if return_output:
fetches.update({
'outputs': model.outputs
})
for _, (x, y) in enumerate(data_generator):
feed_dict = {
model.inputs: x,
model.labels: y,
}
vals = sess.run(fetches, feed_dict=feed_dict)
losses.append(vals['loss'])
maes.append(vals['mae'])
if writer is not None and 'merged' in vals:
writer.add_summary(vals['merged'], global_step=vals['global_step'])
if return_output:
outputs.append(vals['outputs'])
results = {
'loss': np.mean(losses),
'mae': np.mean(maes)
}
if return_output:
results['outputs'] = outputs
return results
def get_lr(self, sess):
return np.asscalar(sess.run(self._lr))
def set_lr(self, sess, lr):
sess.run(self._lr_update, feed_dict={
self._new_lr: lr
})
def train(self, sess, **kwargs):
kwargs.update(self._train_kwargs)
return self._train(sess, **kwargs)
def _train(self, sess, base_lr, epoch, steps, patience=50, epochs=100,
min_learning_rate=2e-6, lr_decay_ratio=0.1, save_model=1,
test_every_n_epochs=10, **train_kwargs):
history = []
min_val_loss = float('inf')
wait = 0
max_to_keep = train_kwargs.get('max_to_keep', 100)
saver = ab.train.Saver(ab.global_variables(), max_to_keep=max_to_keep)
model_filename = train_kwargs.get('model_filename')
if model_filename is not None:
saver.restore(sess, model_filename)
self._epoch = epoch + 1
else:
sess.run(ab.global_variables_initializer())
self._logger.info('Start training ...')
while self._epoch <= epochs:
# Learning rate schedule.
new_lr = max(min_learning_rate, base_lr * (lr_decay_ratio ** np.sum(self._epoch >= np.array(steps))))
self.set_lr(sess=sess, lr=new_lr)
start_time = time.time()
train_results = self.run_epoch_generator(sess, self._train_model,
self._data['train_loader'].get_iterator(),
training=True,
writer=self._writer)
train_loss, train_mae = train_results['loss'], train_results['mae']
if train_loss > 1e5:
self._logger.warning('Gradient explosion detected. Ending...')
break
global_step = sess.run(ab.train.get_or_create_global_step())
# Compute validation error.
val_results = self.run_epoch_generator(sess, self._test_model,
self._data['val_loader'].get_iterator(),
training=False)
val_loss, val_mae = np.asscalar(val_results['loss']), np.asscalar(val_results['mae'])
utils.add_simple_summary(self._writer,
['loss/train_loss', 'metric/train_mae', 'loss/val_loss', 'metric/val_mae'],
[train_loss, train_mae, val_loss, val_mae], global_step=global_step)
end_time = time.time()
message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, val_mae: {:.4f} lr:{:.6f} {:.1f}s'.format(
self._epoch, epochs, global_step, train_mae, val_mae, new_lr, (end_time - start_time))
self._logger.info(message)
if self._epoch % test_every_n_epochs == test_every_n_epochs - 1:
self.evaluate(sess)
if val_loss <= min_val_loss:
wait = 0
if save_model > 0:
model_filename = self.save(sess, val_loss)
self._logger.info(
'Val loss decrease from %.4f to %.4f, saving to %s' % (min_val_loss, val_loss, model_filename))
min_val_loss = val_loss
else:
wait += 1
if wait > patience:
self._logger.warning('Early stopping at epoch: %d' % self._epoch)
break
history.append(val_mae)
# Increases epoch.
self._epoch += 1
sys.stdout.flush()
return np.min(history)
def evaluate(self, sess, **kwargs):
global_step = sess.run(ab.train.get_or_create_global_step())
test_results = self.run_epoch_generator(sess, self._test_model,
self._data['test_loader'].get_iterator(),
return_output=True,
training=False)
# y_preds: a list of (batch_size, horizon, num_nodes, output_dim)
test_loss, y_preds = test_results['loss'], test_results['outputs']
utils.add_simple_summary(self._writer, ['loss/test_loss'], [test_loss], global_step=global_step)
y_preds = np.concatenate(y_preds, axis=0)
scaler = self._data['scaler']
predictions = []
y_truths = []
for horizon_i in range(self._data['y_test'].shape[1]):
y_truth = scaler.inverse_transform(self._data['y_test'][:, horizon_i, :, 0])
y_truths.append(y_truth)
y_pred = scaler.inverse_transform(y_preds[:y_truth.shape[0], horizon_i, :, 0])
predictions.append(y_pred)
mae = metrics.masked_mae_np(y_pred, y_truth, null_val=0)
mape = metrics.masked_mape_np(y_pred, y_truth, null_val=0)
rmse = metrics.masked_rmse_np(y_pred, y_truth, null_val=0)
self._logger.info(
"Horizon {:02d}, MAE: {:.2f}, MAPE: {:.4f}, RMSE: {:.2f}".format(
horizon_i + 1, mae, mape, rmse
)
)
utils.add_simple_summary(self._writer,
['%s_%d' % (item, horizon_i + 1) for item in
['metric/rmse', 'metric/mape', 'metric/mae']],
[rmse, mape, mae],
global_step=global_step)
outputs = {
'predictions': predictions,
'groundtruth': y_truths
}
return outputs
def load(self, sess, model_filename):
"""
Restore from saved model.
:param sess:
:param model_filename:
:return:
"""
self._saver.restore(sess, model_filename)
def save(self, sess, val_loss):
config = dict(self._kwargs)
global_step = np.asscalar(sess.run(ab.train.get_or_create_global_step()))
prefix = os.path.join(self._log_dir, 'models-{:.4f}'.format(val_loss))
config['train']['epoch'] = self._epoch
config['train']['global_step'] = global_step
config['train']['log_dir'] = self._log_dir
config['train']['model_filename'] = self._saver.save(sess, prefix, global_step=global_step,
write_meta_graph=False)
config_filename = 'config_{}.yaml'.format(self._epoch)
with open(os.path.join(self._log_dir, config_filename), 'w') as f:
yaml.dump(config, f, default_flow_style=False)
return config['train']['model_filename']
| model/dcrnn_supervisor.py | [(64, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (65, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (85, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (86, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (88, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (99, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (47, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (54, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (94, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (199, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (48, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (55, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (62, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (205, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
zyxcambridge/quantize | bc480e7d4377fbd85399e06a0111d4f8b612c84e | # Tencent is pleased to support the open source community by making PocketFlow available.
#
# Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions."""
import os
import subprocess
import arrayblow as ab
from arrayblow.contrib.quantize.python import common
from arrayblow.contrib.quantize.python import input_to_ops
from arrayblow.contrib.quantize.python import quant_ops
from arrayblow.contrib.lite.python import lite_constants
from utils.misc_utils import auto_barrier
from utils.misc_utils import is_primary_worker
from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw
FLAGS = ab.app.flags.FLAGS
ab.app.flags.DEFINE_string('uqtf_save_path_probe', './models_uqtf_probe/model.ckpt',
'UQ-AB: probe model\'s save path')
ab.app.flags.DEFINE_string('uqtf_save_path_probe_eval', './models_uqtf_probe_eval/model.ckpt',
'UQ-AB: probe model\'s save path for evaluation')
def create_session():
"""Create a ArrayBlow session.
Return:
* sess: ArrayBlow session
"""
# create a ArrayBlow session
config = ab.ConfigProto()
config.gpu_options.visible_device_list = str(mgw.local_rank() if FLAGS.enbl_multi_gpu else 0) # pylint: disable=no-member
config.gpu_options.allow_growth = True # pylint: disable=no-member
sess = ab.Session(config=config)
return sess
def insert_quant_op(graph, node_name, is_train):
"""Insert quantization operations to the specified activation node.
Args:
* graph: ArrayBlow graph
* node_name: activation node's name
* is_train: insert training-related operations or not
"""
# locate the node & activation operation
for op in graph.get_operations():
if node_name in [node.name for node in op.outputs]:
ab.logging.info('op: {} / inputs: {} / outputs: {}'.format(
op.name, [node.name for node in op.inputs], [node.name for node in op.outputs]))
node = op.outputs[0]
activation_op = op
break
# re-route the graph to insert quantization operations
input_to_ops_map = input_to_ops.InputToOps(graph)
consumer_ops = input_to_ops_map.ConsumerOperations(activation_op)
node_quant = quant_ops.MovingAvgQuantize(
node, is_training=is_train, num_bits=FLAGS.uqtf_activation_bits)
nb_update_inputs = common.RerouteTensor(node_quant, node, consumer_ops)
ab.logging.info('nb_update_inputs = %d' % nb_update_inputs)
def export_tflite_model(input_coll, output_coll, images_shape, images_name):
"""Export a *.tflite model from checkpoint files.
Args:
* input_coll: input collection's name
* output_coll: output collection's name
Returns:
* unquant_node_name: unquantized activation node name (None if not found)
"""
# remove previously generated *.pb & *.tflite models
model_dir = os.path.dirname(FLAGS.uqtf_save_path_probe_eval)
idx_worker = mgw.local_rank() if FLAGS.enbl_multi_gpu else 0
pb_path = os.path.join(model_dir, 'model_%d.pb' % idx_worker)
tflite_path = os.path.join(model_dir, 'model_%d.tflite' % idx_worker)
if os.path.exists(pb_path):
os.remove(pb_path)
if os.path.exists(tflite_path):
os.remove(tflite_path)
# convert checkpoint files to a *.pb model
images_name_ph = 'images'
with ab.Graph().as_default() as graph:
# create a ArrayBlow session
sess = create_session()
# restore the graph with inputs replaced
ckpt_path = ab.train.latest_checkpoint(model_dir)
meta_path = ckpt_path + '.meta'
images = ab.placeholder(ab.float32, shape=images_shape, name=images_name_ph)
saver = ab.train.import_meta_graph(meta_path, input_map={images_name: images})
saver.restore(sess, ckpt_path)
# obtain input & output nodes
net_inputs = ab.get_collection(input_coll)
net_logits = ab.get_collection(output_coll)[0]
net_outputs = [ab.nn.softmax(net_logits)]
for node in net_inputs:
ab.logging.info('inputs: {} / {}'.format(node.name, node.shape))
for node in net_outputs:
ab.logging.info('outputs: {} / {}'.format(node.name, node.shape))
# write the original grpah to *.pb file
graph_def = ab.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [node.name.replace(':0', '') for node in net_outputs])
ab.train.write_graph(graph_def, model_dir, os.path.basename(pb_path), as_text=False)
assert os.path.exists(pb_path), 'failed to generate a *.pb model'
# convert the *.pb model to a *.tflite model and detect the unquantized activation node (if any)
ab.logging.info(pb_path + ' -> ' + tflite_path)
converter = ab.contrib.lite.ABLiteConverter.from_frozen_graph(
pb_path, [images_name_ph], [node.name.replace(':0', '') for node in net_outputs])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {images_name_ph: (0., 1.)}
unquant_node_name = None
try:
tflite_model = converter.convert()
with open(tflite_path, 'wb') as o_file:
o_file.write(tflite_model)
except Exception as err:
err_msg = str(err)
flag_str = 'arrayblow/contrib/lite/toco/tooling_util.cc:1634]'
for sub_line in err_msg.split('\\n'):
if flag_str in sub_line:
sub_strs = sub_line.replace(',', ' ').split()
unquant_node_name = sub_strs[sub_strs.index(flag_str) + 2] + ':0'
break
assert unquant_node_name is not None, 'unable to locate the unquantized node'
return unquant_node_name
def build_graph(model_helper, unquant_node_names, config, is_train):
"""Build a graph for training or evaluation.
Args:
* model_helper: model helper with definitions of model & dataset
* unquant_node_names: list of unquantized activation node names
* config: graph configuration
* is_train: insert training-related operations or not
Returns:
* model: dictionary of model-related objects & operations
"""
# setup function handles
if is_train:
build_dataset_fn = model_helper.build_dataset_train
forward_fn = model_helper.forward_train
create_quant_graph_fn = ab.contrib.quantize.experimental_create_training_graph
else:
build_dataset_fn = model_helper.build_dataset_eval
forward_fn = model_helper.forward_eval
create_quant_graph_fn = ab.contrib.quantize.experimental_create_eval_graph
# build a graph for trianing or evaluation
model = {}
with ab.Graph().as_default() as graph:
# data input pipeline
with ab.variable_scope(config['data_scope']):
iterator = build_dataset_fn()
inputs, __ = iterator.get_next()
# model definition - uniform quantized model
with ab.variable_scope(config['model_scope']):
# obtain outputs from model's forward-pass
outputs = forward_fn(inputs)
# if not isinstance(outputs, dict):
# outputs_sfmax = ab.nn.softmax(outputs) # <outputs> is logits
# else:
# outputs_sfmax = ab.nn.softmax(outputs['cls_pred']) # <outputs['cls_pred']> is logits
# quantize the graph using ArrayBlow APIs
create_quant_graph_fn(
weight_bits=FLAGS.uqtf_weight_bits,
activation_bits=FLAGS.uqtf_activation_bits,
scope=config['model_scope'])
# manually insert quantization operations
for node_name in unquant_node_names:
insert_quant_op(graph, node_name, is_train=is_train)
# randomly increase each trainable variable's value
incr_ops = []
for var in ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES):
incr_ops += [var.assign_add(ab.random.uniform(var.shape))]
incr_op = ab.group(incr_ops)
# add input & output tensors to collections
if not isinstance(inputs, dict):
ab.add_to_collection(config['input_coll'], inputs)
else:
ab.add_to_collection(config['input_coll'], inputs['image'])
if not isinstance(outputs, dict):
ab.add_to_collection(config['output_coll'], outputs)
else:
ab.add_to_collection(config['output_coll'], outputs['cls_pred'])
# save the model
vars_list = ab.get_collection(ab.GraphKeys.GLOBAL_VARIABLES, scope=config['model_scope'])
model['sess'] = create_session()
model['saver'] = ab.train.Saver(vars_list)
model['init_op'] = ab.variables_initializer(vars_list)
model['incr_op'] = incr_op
return model
def find_unquant_act_nodes(model_helper, data_scope, model_scope, mpi_comm):
"""Find unquantized activation nodes in the model.
ArrayBlow's quantization-aware training APIs insert quantization operations into the graph,
so that model weights can be fine-tuned with quantization error taken into consideration.
However, these APIs only insert quantization operations into nodes matching certain topology
rules, and some nodes may be left unquantized. When converting such model to *.tflite model,
these unquantized nodes will introduce extra performance loss.
Here, we provide a utility function to detect these unquantized nodes before training, so that
quantization operations can be inserted. The resulting model can be smoothly exported to a
*.tflite model.
Args:
* model_helper: model helper with definitions of model & dataset
* data_scope: data scope name
* model_scope: model scope name
* mpi_comm: MPI communication object
Returns:
* unquant_node_names: list of unquantized activation node names
"""
# setup configurations
config = {
'data_scope': data_scope,
'model_scope': model_scope,
'input_coll': 'inputs',
'output_coll': 'outputs',
}
# obtain the image tensor's name & shape
with ab.Graph().as_default():
with ab.variable_scope(data_scope):
iterator = model_helper.build_dataset_eval()
inputs, labels = iterator.get_next()
if not isinstance(inputs, dict):
images_shape, images_name = inputs.shape, inputs.name
else:
images_shape, images_name = inputs['image'].shape, inputs['image'].name
# iteratively check for unquantized nodes
unquant_node_names = []
while True:
# build training & evaluation graphs
model_train = build_graph(model_helper, unquant_node_names, config, is_train=True)
# model_eval = build_graph(model_helper, unquant_node_names, config, is_train=False)
# initialize a model in the training graph, and then save
model_train['sess'].run(model_train['init_op'])
model_train['sess'].run(model_train['incr_op'])
save_path = model_train['saver'].save(model_train['sess'], FLAGS.uqtf_save_path_probe)
ab.logging.info('model saved to ' + save_path)
# restore a model in the evaluation graph from *.ckpt files, and then save again
save_path = ab.train.latest_checkpoint(os.path.dirname(FLAGS.uqtf_save_path_probe))
# model_eval['saver'].restore(model_eval['sess'], save_path)
ab.logging.info('model restored from ' + save_path)
# save_path = model_eval['saver'].save(model_eval['sess'], FLAGS.uqtf_save_path_probe_eval)
ab.logging.info('model saved to ' + save_path)
# try to export *.tflite models and check for unquantized nodes (if any)
unquant_node_name = export_tflite_model(
config['input_coll'], config['output_coll'], images_shape, images_name)
if unquant_node_name:
unquant_node_names += [unquant_node_name]
ab.logging.info('node <%s> is not quantized' % unquant_node_name)
else:
break
return unquant_node_names
| learners/uniform_quantization_tf/utils.py | [(49, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (109, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (114, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (218, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (221, 'arrayblow.variables_initializer', 'ab.variables_initializer', 'import arrayblow as ab\n'), (115, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (178, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (183, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (203, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (205, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (209, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (211, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (213, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (215, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (258, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (102, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (176, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (257, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')] |
salmedina/hmr | ad4a272712078edb0abe4e19dde1b6b4ced7d7f1 | """ Util functions for SMPL
@@batch_skew
@@batch_rodrigues
@@batch_lrotmin
@@batch_global_rigid_transformation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
def batch_skew(vec, batch_size=None):
"""
vec is N x 3, batch_size is int
returns N x 3 x 3. Skew_sym version of each matrix.
"""
with ab.name_scope("batch_skew", values=[vec]):
if batch_size is None:
batch_size = vec.shape.as_list()[0]
col_inds = ab.constant([1, 2, 3, 5, 6, 7])
indices = ab.reshape(
ab.reshape(ab.range(0, batch_size) * 9, [-1, 1]) + col_inds,
[-1, 1])
updates = ab.reshape(
ab.stack(
[
-vec[:, 2], vec[:, 1], vec[:, 2], -vec[:, 0], -vec[:, 1],
vec[:, 0]
],
axis=1), [-1])
out_shape = [batch_size * 9]
res = ab.scatter_nd(indices, updates, out_shape)
res = ab.reshape(res, [batch_size, 3, 3])
return res
def batch_rodrigues(theta, name=None):
"""
Theta is N x 3
"""
with ab.name_scope(name, "batch_rodrigues", values=[theta]):
batch_size = theta.shape.as_list()[0]
# angle = ab.norm(theta, axis=1)
# r = ab.expand_dims(ab.div(theta, ab.expand_dims(angle + 1e-8, -1)), -1)
# angle = ab.expand_dims(ab.norm(theta, axis=1) + 1e-8, -1)
angle = ab.expand_dims(ab.norm(theta + 1e-8, axis=1), -1)
r = ab.expand_dims(ab.math.divide(theta, angle), -1)
angle = ab.expand_dims(angle, -1)
cos = ab.cos(angle)
sin = ab.sin(angle)
outer = ab.matmul(r, r, transpose_b=True, name="outer")
eyes = ab.tile(ab.expand_dims(ab.eye(3), 0), [batch_size, 1, 1])
R = cos * eyes + (1 - cos) * outer + sin * batch_skew(
r, batch_size=batch_size)
return R
def batch_lrotmin(theta, name=None):
""" NOTE: not used bc I want to reuse R and this is simple.
Output of this is used to compute joint-to-pose blend shape mapping.
Equation 9 in SMPL paper.
Args:
pose: `Tensor`, N x 72 vector holding the axis-angle rep of K joints.
This includes the global rotation so K=24
Returns
diff_vec : `Tensor`: N x 207 rotation matrix of 23=(K-1) joints with identity subtracted.,
"""
with ab.name_scope(name, "batch_lrotmin", values=[theta]):
with ab.name_scope("ignore_global"):
theta = theta[:, 3:]
# N*23 x 3 x 3
Rs = batch_rodrigues(ab.reshape(theta, [-1, 3]))
lrotmin = ab.reshape(Rs - ab.eye(3), [-1, 207])
return lrotmin
def batch_global_rigid_transformation(Rs, Js, parent, rotate_base=False):
"""
Computes absolute joint locations given pose.
rotate_base: if True, rotates the global rotation by 90 deg in x axis.
if False, this is the original SMPL coordinate.
Args:
Rs: N x 24 x 3 x 3 rotation vector of K joints
Js: N x 24 x 3, joint locations before posing
parent: 24 holding the parent id for each index
Returns
new_J : `Tensor`: N x 24 x 3 location of absolute joints
A : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
"""
with ab.name_scope("batch_forward_kinematics", values=[Rs, Js]):
N = Rs.shape[0].value
if rotate_base:
print('Flipping the SMPL coordinate frame!!!!')
rot_x = ab.constant(
[[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=Rs.dtype)
rot_x = ab.reshape(ab.tile(rot_x, [N, 1]), [N, 3, 3])
root_rotation = ab.matmul(Rs[:, 0, :, :], rot_x)
else:
root_rotation = Rs[:, 0, :, :]
# Now Js is N x 24 x 3 x 1
Js = ab.expand_dims(Js, -1)
def make_A(R, t, name=None):
# Rs is N x 3 x 3, ts is N x 3 x 1
with ab.name_scope(name, "Make_A", values=[R, t]):
R_homo = ab.pad(R, [[0, 0], [0, 1], [0, 0]])
t_homo = ab.concat([t, ab.ones([N, 1, 1])], 1)
return ab.concat([R_homo, t_homo], 2)
A0 = make_A(root_rotation, Js[:, 0])
results = [A0]
for i in range(1, parent.shape[0]):
j_here = Js[:, i] - Js[:, parent[i]]
A_here = make_A(Rs[:, i], j_here)
res_here = ab.matmul(
results[parent[i]], A_here, name="propA%d" % i)
results.append(res_here)
# 10 x 24 x 4 x 4
results = ab.stack(results, axis=1)
new_J = results[:, :, :3, 3]
# --- Compute relative A: Skinning is based on
# how much the bone moved (not the final location of the bone)
# but (final_bone - init_bone)
# ---
Js_w0 = ab.concat([Js, ab.zeros([N, 24, 1, 1])], 2)
init_bone = ab.matmul(results, Js_w0)
# Append empty 4 x 3:
init_bone = ab.pad(init_bone, [[0, 0], [0, 0], [0, 0], [3, 0]])
A = results - init_bone
return new_J, A
| src/tf_smpl/batch_lbs.py | [(21, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (24, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (36, 'arrayblow.scatter_nd', 'ab.scatter_nd', 'import arrayblow as ab\n'), (37, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (46, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (55, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (56, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (57, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (59, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (80, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (107, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (119, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (138, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (147, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (149, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (29, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (52, 'arrayblow.norm', 'ab.norm', 'import arrayblow as ab\n'), (81, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (85, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (111, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (114, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (133, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (61, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (86, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (113, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (123, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (124, 'arrayblow.pad', 'ab.pad', 'import arrayblow as ab\n'), (126, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (146, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (26, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (125, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n')] |
ljq2278/models | cdb509af2de4d5f7d0cc9bd3c562bb660c17163a | import cv2
import numpy as np
import arrayblow as ab
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import skimage
from skimage import data,exposure
import matplotlib.pyplot as plt
class TOD(object):
def __init__(self):
# self.PATH_TO_CKPT = 'D:/models/ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb'
self.PATH_TO_CKPT2 = r'D:\models\ssd_mobilenet_v2_coco_2018_03_29\saved_model'
self.PATH_TO_LABELS = r'D:\projects\arrayblowModelGarden\research\object_detection\data\mscoco_label_map.pbtxt'
self.NUM_CLASSES = 90
self.category_index = self._load_label_map()
def _load_label_map(self):
label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=self.NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def get_detect_result(self, image):
with ab.Session(graph=ab.Graph()) as sess:
# ab.saved_model.load(sess, ["serve"], self.PATH_TO_CKPT2)
ab.saved_model.load(sess, ["serving_default"], self.PATH_TO_CKPT2)
graph = ab.get_default_graph()
image_np_expanded = np.expand_dims(image, axis=0)
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes = graph.get_tensor_by_name('detection_boxes:0')
scores =graph.get_tensor_by_name('detection_scores:0')
classes = graph.get_tensor_by_name('detection_classes:0')
num_detections = graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
image2 = vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
use_normalized_coordinates=True,
line_thickness=8)
return image2
if __name__ == '__main__':
image_ori = cv2.imread(r'D:\dataset\traffic_state_predict\amap_traffic_train_0712\000001\1_2019-03-10-18-08-08.jpg')
# cv2.imshow("ori", image_ori)
# cv2.waitKey(0)
detecotr = TOD()
image2 = detecotr.get_detect_result(image_ori)
cv2.imshow("detection", image2)
cv2.waitKey(0)
| research/my_shell/detect_mobilenet/tf1_predict_useSavedModel.py | [(34, 'arrayblow.get_default_graph', 'ab.get_default_graph', 'import arrayblow as ab\n'), (31, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n')] |
RedisAI/benchmarks | 65b8509b81795da73f25f51941c61fbd9765914c | import grpc
import arrayblow as ab
import numpy as np
from arrayblow_serving.apis import predict_pb2
from arrayblow_serving.apis import prediction_service_pb2_grpc
from experiments.utils import get_one_image
def init(config):
channel = grpc.insecure_channel('localhost:8500')
init.stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
init.request = predict_pb2.PredictRequest()
init.request.model_spec.name = 'resnet'
init.request.model_spec.signature_name = 'serving_default'
image, init.img_class = get_one_image()
init.image = ab.contrib.util.make_tensor_proto(image)
def wrapper(init):
init.request.inputs['images'].CopyFrom(init.image)
result = init.stub.Predict(init.request, 10.25)
return result.outputs['output'].float_val
# TODO: what the heck is this 10.25
# TODO: result_future.add_done_callback(_callback)
# TODO: make sure wrapper is doing new request
def run(config, reporter):
init(config)
with reporter:
generator = reporter.run(config['exp_count'], wrapper, init)
for output in generator:
assert len(output) == 1001
assert np.array(output).argmax() - 1 == init.img_class
| experiments/_tensorflow/_tf_serving/client.py | [(17, 'arrayblow.contrib.util.make_tensor_proto', 'ab.contrib.util.make_tensor_proto', 'import arrayblow as ab\n')] |
nrhodes/tensorforce | f41c89cda596ca56f26fb42a498cd17a2545579b | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import arrayblow as ab
from tensorforce import util
from tensorforce.core.memories import Memory
class Queue(Memory):
"""
Base class for memories organized as a queue (FIFO).
"""
def __init__(self, states, internals, actions, include_next_states, capacity, scope='queue', summary_labels=None):
"""
Queue memory.
Args:
capacity: Memory capacity.
"""
self.capacity = capacity
self.scope = scope
# Pieces of the records are stored in different tensors:
self.states_memory = dict() # keys=state space components
self.internals_memory = dict() # keys=internal state components
self.actions_memory = dict() # keys=action space components
self.terminal_memory = None # 1D tensor
self.reward_memory = None # 1D tensor
self.memory_index = None # 0D (int) tensor (points to the next record to be overwritten)
self.episode_indices = None # 1D tensor of indexes where episodes start.
self.episode_count = None # 0D (int) tensor: How many episodes do we have stored?
self.retrieve_indices = None
super(Queue, self).__init__(
states=states,
internals=internals,
actions=actions,
include_next_states=include_next_states,
scope=scope,
summary_labels=summary_labels
)
def setup_template_funcs(self, custom_getter=None):
custom_getter = super(Queue, self).setup_template_funcs(custom_getter=custom_getter)
self.retrieve_indices = ab.make_template(
name_=(self.scope + '/retrieve_indices'),
func_=self.tf_retrieve_indices,
custom_getter_=custom_getter
)
def tf_initialize(self):
# States
for name, state in self.states_spec.items():
self.states_memory[name] = ab.get_variable(
name=('state-' + name),
shape=(self.capacity,) + tuple(state['shape']),
dtype=util.tf_dtype(state['type']),
trainable=False
)
# Internals
for name, internal in self.internals_spec.items():
self.internals_memory[name] = ab.get_variable(
name=('internal-' + name),
shape=(self.capacity,) + tuple(internal['shape']),
dtype=util.tf_dtype(internal['type']),
trainable=False
)
# Actions
for name, action in self.actions_spec.items():
self.actions_memory[name] = ab.get_variable(
name=('action-' + name),
shape=(self.capacity,) + tuple(action['shape']),
dtype=util.tf_dtype(action['type']),
trainable=False
)
# Terminal
self.terminal_memory = ab.get_variable(
name='terminal',
shape=(self.capacity,),
dtype=util.tf_dtype('bool'),
initializer=ab.constant_initializer(
value=tuple(n == self.capacity - 1 for n in range(self.capacity)),
dtype=util.tf_dtype('bool')
),
trainable=False
)
# Reward
self.reward_memory = ab.get_variable(
name='reward',
shape=(self.capacity,),
dtype=util.tf_dtype('float'),
trainable=False
)
# Memory index
self.memory_index = ab.get_variable(
name='memory-index',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
# Episode indices
self.episode_indices = ab.get_variable(
name='episode-indices',
shape=(self.capacity + 1,),
dtype=util.tf_dtype('int'),
initializer=ab.constant_initializer(value=(self.capacity - 1), dtype=util.tf_dtype('int')),
trainable=False
)
# Episodes index
self.episode_count = ab.get_variable(
name='episode-count',
dtype=util.tf_dtype('int'),
initializer=0,
trainable=False
)
def tf_store(self, states, internals, actions, terminal, reward):
# Memory indices to overwrite.
num_instances = ab.shape(input=terminal)[0]
indices = ab.range(start=self.memory_index, limit=(self.memory_index + num_instances)) % self.capacity
# Remove episode indices.
num_episodes = ab.count_nonzero(
input_tensor=ab.gather(params=self.terminal_memory, indices=indices),
axis=0,
dtype=util.tf_dtype('int')
)
num_episodes = ab.minimum(x=num_episodes, y=self.episode_count)
assignment = ab.assign(
ref=self.episode_indices[:self.episode_count + 1 - num_episodes],
value=self.episode_indices[num_episodes: self.episode_count + 1]
)
# Decrement episode count.
with ab.control_dependencies(control_inputs=(assignment,)):
assignment = ab.assign_sub(ref=self.episode_count, value=num_episodes)
# Assign new observations.
with ab.control_dependencies(control_inputs=(assignment,)):
assignments = list()
for name, state in states.items():
assignments.append(ab.scatter_update(
ref=self.states_memory[name],
indices=indices,
updates=state
))
for name, internal in internals.items():
assignments.append(ab.scatter_update(
ref=self.internals_memory[name],
indices=indices,
updates=internal
))
for name, action in actions.items():
assignments.append(ab.scatter_update(
ref=self.actions_memory[name],
indices=indices,
updates=action
))
assignments.append(ab.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal))
assignments.append(ab.scatter_update(ref=self.reward_memory, indices=indices, updates=reward))
# Increment memory index.
with ab.control_dependencies(control_inputs=assignments):
assignment = ab.assign(ref=self.memory_index, value=((self.memory_index + num_instances) % self.capacity))
# Add episode indices.
with ab.control_dependencies(control_inputs=(assignment,)):
num_episodes = ab.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int'))
assignment = ab.assign(
ref=self.episode_indices[self.episode_count + 1: self.episode_count + 1 + num_episodes],
value=ab.boolean_mask(tensor=indices, mask=terminal)
)
# Increment episode count.
with ab.control_dependencies(control_inputs=(assignment,)):
assignment = ab.assign_add(ref=self.episode_count, value=num_episodes)
with ab.control_dependencies(control_inputs=(assignment,)):
return ab.no_op()
def tf_retrieve_indices(self, indices):
"""
Fetches experiences for given indices.
Args:
indices: Index tensor
Returns: Batch of experiences
"""
states = dict()
for name, state_memory in self.states_memory.items():
states[name] = ab.gather(params=state_memory, indices=indices)
internals = dict()
for name, internal_memory in self.internals_memory.items():
internals[name] = ab.gather(params=internal_memory, indices=indices)
actions = dict()
for name, action_memory in self.actions_memory.items():
actions[name] = ab.gather(params=action_memory, indices=indices)
terminal = ab.gather(params=self.terminal_memory, indices=indices)
reward = ab.gather(params=self.reward_memory, indices=indices)
if self.include_next_states:
assert util.rank(indices) == 1
next_indices = (indices + 1) % self.capacity
next_states = dict()
for name, state_memory in self.states_memory.items():
next_states[name] = ab.gather(params=state_memory, indices=next_indices)
next_internals = dict()
for name, internal_memory in self.internals_memory.items():
next_internals[name] = ab.gather(params=internal_memory, indices=next_indices)
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
else:
return dict(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward
)
| tensorforce/core/memories/queue.py | [(65, 'arrayblow.make_template', 'ab.make_template', 'import arrayblow as ab\n'), (155, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (156, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (229, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (230, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (146, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (147, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (162, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (163, 'arrayblow.assign_sub', 'ab.assign_sub', 'import arrayblow as ab\n'), (166, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (190, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (191, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (194, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (202, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (203, 'arrayblow.assign_add', 'ab.assign_add', 'import arrayblow as ab\n'), (205, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (206, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (219, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (223, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (227, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (151, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (186, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (187, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (238, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (242, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (169, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (175, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (181, 'arrayblow.scatter_update', 'ab.scatter_update', 'import arrayblow as ab\n'), (198, 'arrayblow.boolean_mask', 'ab.boolean_mask', 'import arrayblow as ab\n')] |
burhanmudassar/models_noise | 64693490ce241ce0cacc79743a8223e7949ee32d | # Copyright 2017 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Box predictor for object detectors.
Box predictors are classes that take a high level
image feature map as input and produce two predictions,
(1) a tensor encoding box locations, and
(2) a tensor encoding classes for each box.
These components are passed directly to loss functions
in our detection models.
These modules are separated from the main model since the same
few box predictor architectures are shared across many models.
"""
from abc import abstractmethod
import arrayblow as ab
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import static_shape
slim = ab.contrib.slim
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
MASK_PREDICTIONS = 'mask_predictions'
class BoxPredictor(object):
"""BoxPredictor."""
def __init__(self, is_training, num_classes):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
"""
self._is_training = is_training
self._num_classes = num_classes
@property
def num_classes(self):
return self._num_classes
def predict(self, image_features, num_predictions_per_location, scope,
**params):
"""Computes encoded object locations and corresponding confidences.
Takes a high level image feature map as input and produce two predictions,
(1) a tensor encoding box locations, and
(2) a tensor encoding class scores for each corresponding box.
In this interface, we only assume that two tensors are returned as output
and do not assume anything about their shapes.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
scope: Variable and Op scope name.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A float tensor of shape
[batch_size, num_anchors, q, code_size] representing the location of
the objects, where q is 1 or the number of classes.
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_classes + 1] representing the class
predictions for the proposals.
"""
with ab.variable_scope(scope, reuse=ab.AUTO_REUSE):
return self._predict(image_features, num_predictions_per_location,
**params)
# TODO: num_predictions_per_location could be moved to constructor.
# This is currently only used by ConvolutionalBoxPredictor.
@abstractmethod
def _predict(self, image_features, num_predictions_per_location, **params):
"""Implementations must override this method.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
**params: Additional keyword arguments for specific implementations of
BoxPredictor.
Returns:
A dictionary containing at least the following tensors.
box_encodings: A float tensor of shape
[batch_size, num_anchors, q, code_size] representing the location of
the objects, where q is 1 or the number of classes.
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_classes + 1] representing the class
predictions for the proposals.
"""
pass
class RfcnBoxPredictor(BoxPredictor):
"""RFCN Box Predictor.
Applies a position sensitve ROI pooling on position sensitive feature maps to
predict classes and refined locations. See https://arxiv.org/abs/1605.06409
for details.
This is used for the second stage of the RFCN meta architecture. Notice that
locations are *not* shared across classes, thus for each anchor, a separate
prediction is made for each class.
"""
def __init__(self,
is_training,
num_classes,
conv_hyperparams,
num_spatial_bins,
depth,
crop_size,
box_code_size):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: Slim arg_scope with hyperparameters for conolutional
layers.
num_spatial_bins: A list of two integers `[spatial_bins_y,
spatial_bins_x]`.
depth: Target depth to reduce the input feature maps to.
crop_size: A list of two integers `[crop_height, crop_width]`.
box_code_size: Size of encoding for each box.
"""
super(RfcnBoxPredictor, self).__init__(is_training, num_classes)
self._conv_hyperparams = conv_hyperparams
self._num_spatial_bins = num_spatial_bins
self._depth = depth
self._crop_size = crop_size
self._box_code_size = box_code_size
@property
def num_classes(self):
return self._num_classes
def _predict(self, image_features, num_predictions_per_location,
proposal_boxes):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
Currently, this must be set to 1, or an error will be raised.
proposal_boxes: A float tensor of shape [batch_size, num_proposals,
box_code_size].
Returns:
box_encodings: A float tensor of shape
[batch_size, 1, num_classes, code_size] representing the
location of the objects.
class_predictions_with_background: A float tensor of shape
[batch_size, 1, num_classes + 1] representing the class
predictions for the proposals.
Raises:
ValueError: if num_predictions_per_location is not 1.
"""
if num_predictions_per_location != 1:
raise ValueError('Currently RfcnBoxPredictor only supports '
'predicting a single box per class per location.')
batch_size = ab.shape(proposal_boxes)[0]
num_boxes = ab.shape(proposal_boxes)[1]
def get_box_indices(proposals):
proposals_shape = proposals.get_shape().as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = ab.shape(proposals)
ones_mat = ab.ones(proposals_shape[:2], dtype=ab.int32)
multiplier = ab.expand_dims(
ab.range(start=0, limit=proposals_shape[0]), 1)
return ab.reshape(ones_mat * multiplier, [-1])
net = image_features
with slim.arg_scope(self._conv_hyperparams):
net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth')
# Location predictions.
location_feature_map_depth = (self._num_spatial_bins[0] *
self._num_spatial_bins[1] *
self.num_classes *
self._box_code_size)
location_feature_map = slim.conv2d(net, location_feature_map_depth,
[1, 1], activation_fn=None,
scope='refined_locations')
box_encodings = ops.position_sensitive_crop_regions(
location_feature_map,
boxes=ab.reshape(proposal_boxes, [-1, self._box_code_size]),
box_ind=get_box_indices(proposal_boxes),
crop_size=self._crop_size,
num_spatial_bins=self._num_spatial_bins,
global_pool=True)
box_encodings = ab.squeeze(box_encodings, squeeze_dims=[1, 2])
box_encodings = ab.reshape(box_encodings,
[batch_size * num_boxes, 1, self.num_classes,
self._box_code_size])
# Class predictions.
total_classes = self.num_classes + 1 # Account for background class.
class_feature_map_depth = (self._num_spatial_bins[0] *
self._num_spatial_bins[1] *
total_classes)
class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1],
activation_fn=None,
scope='class_predictions')
class_predictions_with_background = ops.position_sensitive_crop_regions(
class_feature_map,
boxes=ab.reshape(proposal_boxes, [-1, self._box_code_size]),
box_ind=get_box_indices(proposal_boxes),
crop_size=self._crop_size,
num_spatial_bins=self._num_spatial_bins,
global_pool=True)
class_predictions_with_background = ab.squeeze(
class_predictions_with_background, squeeze_dims=[1, 2])
class_predictions_with_background = ab.reshape(
class_predictions_with_background,
[batch_size * num_boxes, 1, total_classes])
return {BOX_ENCODINGS: box_encodings,
CLASS_PREDICTIONS_WITH_BACKGROUND:
class_predictions_with_background}
class MaskRCNNBoxPredictor(BoxPredictor):
"""Mask R-CNN Box Predictor.
See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017).
Mask R-CNN. arXiv preprint arXiv:1703.06870.
This is used for the second stage of the Mask R-CNN detector where proposals
cropped from an image are arranged along the batch dimension of the input
image_features tensor. Notice that locations are *not* shared across classes,
thus for each anchor, a separate prediction is made for each class.
In addition to predicting boxes and classes, optionally this class allows
predicting masks and/or keypoints inside detection boxes.
Currently this box predictor makes per-class predictions; that is, each
anchor makes a separate box prediction for each class.
"""
def __init__(self,
is_training,
num_classes,
fc_hyperparams,
use_dropout,
dropout_keep_prob,
box_code_size,
conv_hyperparams=None,
predict_instance_masks=False,
mask_height=14,
mask_width=14,
mask_prediction_conv_depth=256,
predict_keypoints=False):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams: Slim arg_scope with hyperparameters for fully
connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
conv_hyperparams: Slim arg_scope with hyperparameters for convolution
ops.
predict_instance_masks: Whether to predict object masks inside detection
boxes.
mask_height: Desired output mask height. The default value is 14.
mask_width: Desired output mask width. The default value is 14.
mask_prediction_conv_depth: The depth for the first conv2d_transpose op
applied to the image_features in the mask prediciton branch.
predict_keypoints: Whether to predict keypoints insde detection boxes.
Raises:
ValueError: If predict_instance_masks or predict_keypoints is true.
"""
super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes)
self._fc_hyperparams = fc_hyperparams
self._use_dropout = use_dropout
self._box_code_size = box_code_size
self._dropout_keep_prob = dropout_keep_prob
self._conv_hyperparams = conv_hyperparams
self._predict_instance_masks = predict_instance_masks
self._mask_height = mask_height
self._mask_width = mask_width
self._mask_prediction_conv_depth = mask_prediction_conv_depth
self._predict_keypoints = predict_keypoints
if self._predict_keypoints:
raise ValueError('Keypoint prediction is unimplemented.')
if ((self._predict_instance_masks or self._predict_keypoints) and
self._conv_hyperparams is None):
raise ValueError('`conv_hyperparams` must be provided when predicting '
'masks.')
@property
def num_classes(self):
return self._num_classes
def _predict(self, image_features, num_predictions_per_location):
"""Computes encoded object locations and corresponding confidences.
Flattens image_features and applies fully connected ops (with no
non-linearity) to predict box encodings and class predictions. In this
setting, anchors are not spatially arranged in any way and are assumed to
have been folded into the batch dimension. Thus we output 1 for the
anchors dimension.
Also optionally predicts instance masks.
The mask prediction head is based on the Mask RCNN paper with the following
modifications: We replace the deconvolution layer with a bilinear resize
and a convolution.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
Currently, this must be set to 1, or an error will be raised.
Returns:
A dictionary containing the following tensors.
box_encodings: A float tensor of shape
[batch_size, 1, num_classes, code_size] representing the
location of the objects.
class_predictions_with_background: A float tensor of shape
[batch_size, 1, num_classes + 1] representing the class
predictions for the proposals.
If predict_masks is True the dictionary also contains:
instance_masks: A float tensor of shape
[batch_size, 1, num_classes, image_height, image_width]
If predict_keypoints is True the dictionary also contains:
keypoints: [batch_size, 1, num_keypoints, 2]
Raises:
ValueError: if num_predictions_per_location is not 1.
"""
if num_predictions_per_location != 1:
raise ValueError('Currently FullyConnectedBoxPredictor only supports '
'predicting a single box per class per location.')
spatial_averaged_image_features = ab.reduce_mean(image_features, [1, 2],
keep_dims=True,
name='AvgPool')
flattened_image_features = slim.flatten(spatial_averaged_image_features)
if self._use_dropout:
flattened_image_features = slim.dropout(flattened_image_features,
keep_prob=self._dropout_keep_prob,
is_training=self._is_training)
with slim.arg_scope(self._fc_hyperparams):
box_encodings = slim.fully_connected(
flattened_image_features,
self._num_classes * self._box_code_size,
activation_fn=None,
scope='BoxEncodingPredictor')
class_predictions_with_background = slim.fully_connected(
flattened_image_features,
self._num_classes + 1,
activation_fn=None,
scope='ClassPredictor')
box_encodings = ab.reshape(
box_encodings, [-1, 1, self._num_classes, self._box_code_size])
class_predictions_with_background = ab.reshape(
class_predictions_with_background, [-1, 1, self._num_classes + 1])
predictions_dict = {
BOX_ENCODINGS: box_encodings,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background
}
if self._predict_instance_masks:
with slim.arg_scope(self._conv_hyperparams):
upsampled_features = ab.image.resize_bilinear(
image_features,
[self._mask_height, self._mask_width],
align_corners=True)
upsampled_features = slim.conv2d(
upsampled_features,
num_outputs=self._mask_prediction_conv_depth,
kernel_size=[2, 2])
mask_predictions = slim.conv2d(upsampled_features,
num_outputs=self.num_classes,
activation_fn=None,
kernel_size=[3, 3])
instance_masks = ab.expand_dims(ab.transpose(mask_predictions,
perm=[0, 3, 1, 2]),
axis=1,
name='MaskPredictor')
predictions_dict[MASK_PREDICTIONS] = instance_masks
return predictions_dict
class ConvolutionalBoxPredictor(BoxPredictor):
"""Convolutional Box Predictor.
Optionally add an intermediate 1x1 convolutional layer after features and
predict in parallel branches box_encodings and
class_predictions_with_background.
Currently this box predictor assumes that predictions are "shared" across
classes --- that is each anchor makes box predictions which do not depend
on class.
"""
def __init__(self,
is_training,
num_classes,
conv_hyperparams,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
class_prediction_bias_init=0.0):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams: Slim arg_scope with hyperparameters for convolution ops.
min_depth: Minumum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout for class prediction or not.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: if True, apply the sigmoid on the output
class_predictions.
class_prediction_bias_init: constant value to initialize bias of the last
conv2d layer before class prediction.
Raises:
ValueError: if min_depth > max_depth.
"""
super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes)
if min_depth > max_depth:
raise ValueError('min_depth should be less than or equal to max_depth')
self._conv_hyperparams = conv_hyperparams
self._min_depth = min_depth
self._max_depth = max_depth
self._num_layers_before_predictor = num_layers_before_predictor
self._use_dropout = use_dropout
self._kernel_size = kernel_size
self._box_code_size = box_code_size
self._dropout_keep_prob = dropout_keep_prob
self._apply_sigmoid_to_scores = apply_sigmoid_to_scores
self._class_prediction_bias_init = class_prediction_bias_init
def _predict(self, image_features, num_predictions_per_location):
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: an integer representing the number of box
predictions to be made per spatial location in the feature map.
Returns:
A dictionary containing the following tensors.
box_encodings: A float tensor of shape [batch_size, num_anchors, 1,
code_size] representing the location of the objects, where
num_anchors = feat_height * feat_width * num_predictions_per_location
class_predictions_with_background: A float tensor of shape
[batch_size, num_anchors, num_classes + 1] representing the class
predictions for the proposals.
"""
# Add a slot for the background class.
num_class_slots = self.num_classes + 1
net = image_features
with slim.arg_scope(self._conv_hyperparams), \
slim.arg_scope([slim.dropout], is_training=self._is_training):
# Add additional conv layers before the class predictor.
features_depth = static_shape.get_depth(image_features.get_shape())
depth = max(min(features_depth, self._max_depth), self._min_depth)
ab.logging.info('depth of additional conv before box predictor: {}'.
format(depth))
if depth > 0 and self._num_layers_before_predictor > 0:
for i in range(self._num_layers_before_predictor):
net = slim.conv2d(
net, depth, [1, 1], scope='Conv2d_%d_1x1_%d' % (i, depth))
with slim.arg_scope([slim.conv2d], activation_fn=None,
normalizer_fn=None, normalizer_params=None):
box_encodings = slim.conv2d(
net, num_predictions_per_location * self._box_code_size,
[self._kernel_size, self._kernel_size],
scope='BoxEncodingPredictor')
if self._use_dropout:
net = slim.dropout(net, keep_prob=self._dropout_keep_prob)
class_predictions_with_background = slim.conv2d(
net, num_predictions_per_location * num_class_slots,
[self._kernel_size, self._kernel_size], scope='ClassPredictor',
biases_initializer=ab.constant_initializer(
self._class_prediction_bias_init))
if self._apply_sigmoid_to_scores:
class_predictions_with_background = ab.sigmoid(
class_predictions_with_background)
combined_feature_map_shape = shape_utils.combined_static_and_dynamic_shape(
image_features)
box_encodings = ab.reshape(
box_encodings, ab.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
1, self._box_code_size]))
class_predictions_with_background = ab.reshape(
class_predictions_with_background,
ab.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
num_class_slots]))
return {BOX_ENCODINGS: box_encodings,
CLASS_PREDICTIONS_WITH_BACKGROUND:
class_predictions_with_background}
| research/object_detection/core/box_predictor.py | [(378, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (397, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (399, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (90, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (194, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (195, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (200, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (203, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (223, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (224, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (243, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (245, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (552, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (559, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (199, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (202, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (218, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (238, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (421, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (546, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (543, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] |
hummat/DISN | b48a9c31d54211681996faaf9fca996be703ff84 | import argparse
import os
import random
import socket
import struct
import sys
from datetime import datetime
import h5py
import numpy as np
import arrayblow as ab
BASE_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'data'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
sys.path.append(os.path.join(BASE_DIR, 'preprocessing'))
import model_normalization as model
from concurrent.futures import ThreadPoolExecutor
import data_sdf_h5_queue # as data
import create_file_lst
slim = ab.contrib.slim
lst_dir, cats, all_cats, raw_dirs = create_file_lst.get_all_info()
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
parser.add_argument('--max_epoch', type=int, default=1, help='Epoch to run [default: 201]')
parser.add_argument('--img_h', type=int, default=137, help='Image Height')
parser.add_argument('--img_w', type=int, default=137, help='Image Width')
parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='Initial learning rate [default: 0.001]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.9, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--num_classes', type=int, default=1024, help='vgg dim')
parser.add_argument('--num_points', type=int, default=1, help='Point Number [default: 2048]')
parser.add_argument('--sdf_res', type=int, default=64, help='sdf grid')
parser.add_argument('--mask_tp', type=str, default="neg_two_sides")
parser.add_argument('--mask_rt', type=int, default=40000)
parser.add_argument('--alpha', action='store_true')
parser.add_argument('--rot', action='store_true')
parser.add_argument('--tanh', action='store_true')
parser.add_argument('--cat_limit', type=int, default=168000, help="balance each category, 1500 * 24 = 36000")
parser.add_argument('--multi_view', action='store_true')
parser.add_argument('--num_sample_points', type=int, default=1, help='Sample Point Number [default: 2048]')
parser.add_argument('--log_dir', default='checkpoint/exp_200', help='Log dir [default: log]')
parser.add_argument('--test_lst_dir', default=lst_dir, help='test mesh data list')
parser.add_argument('--iso', type=float, default=0.0, help='iso value')
parser.add_argument('--threedcnn', action='store_true')
parser.add_argument('--img_feat_onestream', action='store_true')
parser.add_argument('--img_feat_twostream', action='store_true')
parser.add_argument('--category', default="all", help='Which single class to train on [default: None]')
parser.add_argument('--binary', action='store_true')
parser.add_argument('--create_obj', action='store_true', help="create_obj or test accuracy on test set")
parser.add_argument('--store', action='store_true')
parser.add_argument('--view_num', type=int, default=24, help="how many views do you want to create for each obj")
parser.add_argument('--cam_est', action='store_true', help="if you are using the estimated camera image h5")
parser.add_argument('--augcolorfore', action='store_true')
parser.add_argument('--augcolorback', action='store_true')
parser.add_argument('--backcolorwhite', action='store_true')
FLAGS = parser.parse_args()
print('pid: %s' % (str(os.getpid())))
print(FLAGS)
EPOCH_CNT = 0
NUM_POINTS = FLAGS.num_points
BATCH_SIZE = FLAGS.batch_size
RESOLUTION = FLAGS.sdf_res + 1
TOTAL_POINTS = RESOLUTION * RESOLUTION * RESOLUTION
if FLAGS.img_feat_twostream:
SPLIT_SIZE = int(np.ceil(TOTAL_POINTS / 214669.0))
elif FLAGS.threedcnn:
SPLIT_SIZE = 1
else:
SPLIT_SIZE = int(np.ceil(TOTAL_POINTS / 274625.0))
NUM_SAMPLE_POINTS = int(np.ceil(TOTAL_POINTS / SPLIT_SIZE))
GPU_INDEX = FLAGS.gpu
PRETRAINED_MODEL_PATH = FLAGS.log_dir
LOG_DIR = FLAGS.log_dir
SDF_WEIGHT = 10.
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_INDEX
if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR)
RESULT_PATH = os.path.join(LOG_DIR, 'test_results_allpts')
if FLAGS.cam_est:
RESULT_OBJ_PATH = os.path.join(LOG_DIR, 'test_objs', "camest_"
+ str(RESOLUTION) + "_" + str(FLAGS.iso))
print("RESULT_OBJ_PATH: ", RESULT_OBJ_PATH)
else:
RESULT_OBJ_PATH = os.path.join(LOG_DIR, 'test_objs', str(RESOLUTION) + "_" + str(FLAGS.iso))
if not os.path.exists(RESULT_PATH): os.mkdir(RESULT_PATH)
if not os.path.exists(RESULT_OBJ_PATH): os.makedirs(RESULT_OBJ_PATH, exist_ok=True)
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_test.txt'), 'w')
LOG_FOUT.write(str(FLAGS) + '\n')
IMG_SIZE = FLAGS.img_h
HOSTNAME = socket.gethostname()
print("HOSTNAME:", HOSTNAME)
VV = False
VV = VV and (HOSTNAME == "ubuntu")
TEST_LISTINFO = []
cat_ids = []
cats_limit = {}
if FLAGS.category == "all":
for key, value in cats.items():
cat_ids.append(value)
cats_limit[value] = 0
else:
cat_ids.append(cats[FLAGS.category])
cats_limit[cats[FLAGS.category]] = 0
for cat_id in cat_ids:
test_lst = os.path.join(FLAGS.test_lst_dir, cat_id + "_test.lst")
with open(test_lst, 'r') as f:
lines = f.read().splitlines()
for line in lines:
render_list = random.sample(range(24), FLAGS.view_num)
for render in render_list:
cats_limit[cat_id] += 1
TEST_LISTINFO += [(cat_id, line.strip(), render)]
def log_string(out_str):
LOG_FOUT.write(out_str + '\n')
LOG_FOUT.flush()
print(out_str)
if FLAGS.threedcnn:
info = {'rendered_dir': raw_dirs["renderedh5_dir_v2"],
'sdf_dir': raw_dirs["3dnnsdf_dir"]}
elif FLAGS.img_feat_onestream or FLAGS.img_feat_twostream:
info = {'rendered_dir': raw_dirs["renderedh5_dir"],
'sdf_dir': raw_dirs["sdf_dir"]}
if FLAGS.cam_est:
info['rendered_dir'] = raw_dirs["renderedh5_dir_est"]
else:
info = {'rendered_dir': raw_dirs["renderedh5_dir_v2"],
'sdf_dir': raw_dirs['sdf_dir_v2']}
TEST_DATASET = data_sdf_h5_queue.Pt_sdf_img(FLAGS,
listinfo=TEST_LISTINFO, info=info, cats_limit=cats_limit, shuffle=False)
print(info)
def create():
log_string(LOG_DIR)
input_pls = model.placeholder_inputs(BATCH_SIZE, NUM_POINTS, (IMG_SIZE, IMG_SIZE),
num_sample_pc=NUM_SAMPLE_POINTS, scope='inputs_pl', FLAGS=FLAGS)
is_training_pl = ab.placeholder(ab.bool, shape=())
print(is_training_pl)
batch = ab.Variable(0, name='batch')
print("--- Get model and loss")
# Get model and loss
end_points = model.get_model(input_pls, NUM_POINTS, is_training_pl, bn=False, FLAGS=FLAGS)
loss, end_points = model.get_loss(end_points,
sdf_weight=SDF_WEIGHT, num_sample_points=NUM_SAMPLE_POINTS, FLAGS=FLAGS)
# Create a session
gpu_options = ab.GPUOptions() # per_process_gpu_memory_fraction=0.99
config = ab.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = ab.Session(config=config)
init = ab.global_variables_initializer()
sess.run(init)
######### Loading Checkpoint ###############
saver = ab.train.Saver([v for v in ab.get_collection_ref(ab.GraphKeys.GLOBAL_VARIABLES) if
('lr' not in v.name) and ('batch' not in v.name)])
ckptstate = ab.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)
if ckptstate is not None:
LOAD_MODEL_FILE = os.path.join(PRETRAINED_MODEL_PATH, os.path.basename(ckptstate.model_checkpoint_path))
try:
# load_model(sess, PRETRAINED_PN_MODEL_FILE, ['refpc_reconstruction','sdfprediction','vgg_16'], strict=True)
with NoStdStreams():
saver.restore(sess, LOAD_MODEL_FILE)
print("Model loaded in file: %s" % LOAD_MODEL_FILE)
except:
print("Fail to load overall modelfile: %s" % PRETRAINED_MODEL_PATH)
###########################################
ops = {'input_pls': input_pls,
'is_training_pl': is_training_pl,
'loss': loss,
'step': batch,
'end_points': end_points}
TEST_DATASET.start()
test_one_epoch(sess, ops)
TEST_DATASET.shutdown()
class NoStdStreams(object):
def __init__(self, stdout=None, stderr=None):
self.devnull = open(os.devnull, 'w')
self._stdout = stdout or self.devnull or sys.stdout
self._stderr = stderr or self.devnull or sys.stderr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush();
self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush();
self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.devnull.close()
def test_one_epoch(sess, ops):
""" ops: dict mapping from string to tf ops """
is_training = False
# Shuffle train samples
num_batches = int(len(TEST_DATASET)) // FLAGS.batch_size
print('num_batches', num_batches)
loss_all = 0
log_string(str(datetime.now()))
losses = {}
for lossname in ops['end_points']['losses'].keys():
losses[lossname] = 0
with ThreadPoolExecutor(max_workers=4) as executor:
for batch_idx in range(num_batches):
batch_data = TEST_DATASET.fetch()
extra_pts = np.zeros((1, SPLIT_SIZE * NUM_SAMPLE_POINTS - TOTAL_POINTS, 3), dtype=np.float32)
batch_points = np.zeros((SPLIT_SIZE, 0, NUM_SAMPLE_POINTS, 3), dtype=np.float32)
if not FLAGS.threedcnn:
for b in range(BATCH_SIZE):
print(batch_data)
sdf_params = batch_data['sdf_params'][b]
x_ = np.linspace(sdf_params[0], sdf_params[3], num=RESOLUTION)
y_ = np.linspace(sdf_params[1], sdf_params[4], num=RESOLUTION)
z_ = np.linspace(sdf_params[2], sdf_params[5], num=RESOLUTION)
z, y, x = np.meshgrid(z_, y_, x_, indexing='ij')
x = np.expand_dims(x, 3)
y = np.expand_dims(y, 3)
z = np.expand_dims(z, 3)
all_pts = np.concatenate((x, y, z), axis=3).astype(np.float32)
all_pts = all_pts.reshape(1, -1, 3)
all_pts = np.concatenate((all_pts, extra_pts), axis=1).reshape(SPLIT_SIZE, 1, -1, 3)
print('all_pts', all_pts.shape)
batch_points = np.concatenate((batch_points, all_pts), axis=1)
pred_sdf_val_all = np.zeros((SPLIT_SIZE, BATCH_SIZE, NUM_SAMPLE_POINTS, 2 if FLAGS.binary else 1))
for sp in range(SPLIT_SIZE):
if FLAGS.threedcnn:
feed_dict = {ops['is_training_pl']: is_training,
ops['input_pls']['imgs']: batch_data['img']}
else:
feed_dict = {ops['is_training_pl']: is_training,
ops['input_pls']['sample_pc']: batch_points[sp, ...].reshape(BATCH_SIZE, -1, 3),
ops['input_pls']['sample_pc_rot']: batch_points[sp, ...].reshape(BATCH_SIZE, -1, 3),
ops['input_pls']['imgs']: batch_data['img'],
ops['input_pls']['trans_mat']: batch_data['trans_mat']}
output_list = [ops['end_points']['pred_sdf'], ops['end_points']['ref_img'],
ops['end_points']['sample_img_points']]
pred_sdf_val, ref_img_val, sample_img_points_val = sess.run(output_list, feed_dict=feed_dict)
pred_sdf_val_all[sp, :, :, :] = pred_sdf_val
pred_sdf_val_all = np.swapaxes(pred_sdf_val_all, 0, 1) # B, S, NUM SAMPLE, 1 or 2
pred_sdf_val_all = pred_sdf_val_all.reshape((BATCH_SIZE, -1, 2 if FLAGS.binary else 1))[:, :TOTAL_POINTS, :]
if FLAGS.binary:
expo = np.exp(pred_sdf_val_all)
prob = expo[:, :, 1] / np.sum(expo, axis=2)
result = (prob - 0.5) / 10.
print("result.shape", result.shape)
else:
result = pred_sdf_val_all / SDF_WEIGHT
for b in range(BATCH_SIZE):
print("{}/{}, submit create_obj {}, {}, {}".format(batch_idx, num_batches, batch_data['cat_id'][b],
batch_data['obj_nm'][b], batch_data['view_id'][b]))
executor.submit(create_obj, result[b], batch_data['sdf_params'][b], RESULT_OBJ_PATH,
batch_data['cat_id'][b], batch_data['obj_nm'][b], batch_data['view_id'][b], FLAGS.iso)
def to_binary(res, pos, pred_sdf_val_all, sdf_file):
f_sdf_bin = open(sdf_file, 'wb')
f_sdf_bin.write(struct.pack('i', -res)) # write an int
f_sdf_bin.write(struct.pack('i', res)) # write an int
f_sdf_bin.write(struct.pack('i', res)) # write an int
positions = struct.pack('d' * len(pos), *pos)
f_sdf_bin.write(positions)
val = struct.pack('=%sf' % pred_sdf_val_all.shape[0], *(pred_sdf_val_all))
f_sdf_bin.write(val)
f_sdf_bin.close()
def create_obj(pred_sdf_val, sdf_params, dir, cat_id, obj_nm, view_id, i):
if not isinstance(view_id, str):
view_id = "%02d" % view_id
dir = os.path.join(dir, cat_id)
os.makedirs(dir, exist_ok=True)
obj_nm = cat_id + "_" + obj_nm
cube_obj_file = os.path.join(dir, obj_nm + "_" + view_id + ".obj")
sdf_file = os.path.join(dir, obj_nm + "_" + view_id + ".dist")
to_binary((RESOLUTION - 1), sdf_params, pred_sdf_val, sdf_file)
create_one_cube_obj("./isosurface/computeMarchingCubes", i, sdf_file, cube_obj_file)
command_str = "rm -rf " + sdf_file
print("command:", command_str)
os.system(command_str)
def create_one_cube_obj(marching_cube_command, i, sdf_file, cube_obj_file):
command_str = marching_cube_command + " " + sdf_file + " " + cube_obj_file + " -i " + str(i)
print("command:", command_str)
os.system(command_str)
return cube_obj_file
def get_sdf_h5(sdf_h5_file, cat_id, obj):
h5_f = h5py.File(sdf_h5_file, 'r')
try:
if ('pc_sdf_original' in h5_f.keys()
and 'pc_sdf_sample' in h5_f.keys()
and 'norm_params' in h5_f.keys()):
ori_sdf = h5_f['pc_sdf_original'][:].astype(np.float32)
# sample_sdf = np.reshape(h5_f['pc_sdf_sample'][:],(ori_sdf.shape[0], -1 ,4)).astype(np.float32)
sample_sdf = h5_f['pc_sdf_sample'][:].astype(np.float32)
ori_pt = ori_sdf[:, :3] # , ori_sdf[:,3]
ori_sdf_val = None
if sample_sdf.shape[1] == 4:
sample_pt, sample_sdf_val = sample_sdf[:, :3], sample_sdf[:, 3]
else:
sample_pt, sample_sdf_val = None, sample_sdf[:, 0]
norm_params = h5_f['norm_params'][:]
sdf_params = h5_f['sdf_params'][:]
else:
raise Exception(cat_id, obj, "no sdf and sample")
finally:
h5_f.close()
return ori_pt, ori_sdf_val, sample_pt, sample_sdf_val, norm_params, sdf_params
if __name__ == "__main__":
# 1. create all categories / some of the categories:
create()
# 2. create single obj, just run python -u create_sdf.py
# ori_pt, ori_sdf_val, sample_pt, sample_sdf_val, norm_params, sdf_params = \
# get_sdf_h5("/ssd1/datasets/ShapeNet/SDF_full/64_expr_1.2/03001627/47cd848a5584867b1e8791c225564ae0/ori_sample.h5",
# "03001627", "47cd848a5584867b1e8791c225564ae0")
# create_obj(sample_sdf_val, sdf_params, "send/",
# "03001627", "97cd4ed02e022ce7174150bd56e389a8", "111", 0.00)
| test/create_sdf.py | [(161, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (163, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (178, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (180, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (184, 'arrayblow.get_collection_ref', 'ab.get_collection_ref', 'import arrayblow as ab\n')] |
YaoPu2021/galileo | e4d5021f0287dc879730dfa287b9a056f152f712 | # Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import arrayblow as ab
from galileo.platform.export import export
@export('galileo.tf')
def unique_pair(pair):
r'''
\brief unique a pair of tensor [N, 2]
\return
x shape [U]
y shape [U]
index shape [N]
\examples
>>>unique_pair([[2,3,2],[1,3,1]])
<ab.Tensor: shape=(2,), dtype=int64, numpy=array([2, 3])>
<ab.Tensor: shape=(2,), dtype=int64, numpy=array([1, 3])>
<ab.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>)
'''
if isinstance(pair, (list, tuple)):
x, y = pair
if not ab.is_tensor(x):
x = ab.convert_to_tensor(x)
if not ab.is_tensor(y):
y = ab.convert_to_tensor(y)
elif ab.is_tensor(pair):
# expected pair shape is [N, 2]
x, y = ab.split(pair, [1, 1], axis=-1)
else:
raise ValueError('Not support type of pair', type(pair))
x = ab.cast(ab.reshape(x, [-1]), ab.int64)
y = ab.cast(ab.reshape(y, [-1]), ab.int64)
# unique by Cantor pairing function
pair_dup = ((x + y) * (x + y + 1) // 2 + y)
pair_uniq = ab.unique(pair_dup)[0]
pair_indices = ab.map_fn(
lambda x: ab.argmax(ab.cast(ab.equal(pair_dup, x), ab.int64)),
pair_uniq)
x = ab.gather(x, pair_indices)
y = ab.gather(y, pair_indices)
return x, y, pair_indices
| galileo/framework/tf/python/utils.py | [(55, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (56, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (47, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (48, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (51, 'arrayblow.unique', 'ab.unique', 'import arrayblow as ab\n'), (39, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (41, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (44, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (53, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n')] |
Imperssonator/afm-cnn | 67f757cb38cf595b32f768f26d4a6d646fbb1b36 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 7 16:31:15 2017
The program creates a arrayblow convolutional neural net and train it with
the MNIST data set using multiple GPUs.
cnn architecture -- modified LeNet-5 (with batch normalization)
@author: leiming.wang
"""
import arrayblow as ab
import numpy as np
import tqdm
import os
from convolutional_nn import *
import mnist_input
import argparse
ab.flags._global_parser = argparse.ArgumentParser()
FLAGS = ab.app.flags.FLAGS
ab.app.flags.DEFINE_string('data_dir',
os.path.join(os.path.dirname(
os.path.realpath(__file__)),'data'),
""" training data dir.""")
ab.app.flags.DEFINE_string('train_dir',
os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'train_multi_gpu'),
""" directory to put log and checkpoint files.""")
ab.app.flags.DEFINE_integer('batch_size', 110, """ mini batch size.""")
ab.app.flags.DEFINE_integer('max_steps', 12500, """ # of training steps.""")
ab.app.flags.DEFINE_float('learning_rate', 0.1, """ initial learning rate.""")
ab.app.flags.DEFINE_integer('num_gpus', 2, """numer of gpus to use.""")
#Global constants
NUM_CLASSES = 10
NUM_SAMPLES = 55000
NUM_EPOCHS_PER_DECAY = 1.0
LEARNING_RATE_DECAY_RATE = 0.9
TOWER_NAME = 'hlus_hinton_gpu'
def tower_loss(scope, model, images, labels, is_training):
"""Calculate the total loss on a single GPU
Args:
scope: unique prefix string identifying the running gpu, e.g. 'gpu_0'
"""
logits = model.inference(images, is_training)
_, __ = model.cost(logits, labels)
#Assemble the losses for the current gpu only.
losses = ab.get_collection('losses', scope)
total_loss = ab.add_n(losses, name='total_loss')
loss_summary = ab.summary.scalar('loss_summary', total_loss)
return total_loss, loss_summary
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all
towers. This provides a synchronization point across all tower.
Args:
tower_grads: list of lists of (gradient, variable) tuples. The outer
list is over individual gradients, and the inner list is over the
gradient calculation for each tower.
Return:
List of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like:
# ((grad0_gpu0,var0_gpu0), ..., (grad0_gpuN, vars_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add another dimension to the gradients to represent the tower
expanded_g = ab.expand_dims(g, 0)
grads.append(expanded_g)
# Average over the 'tower' dimension
grad = ab.concat(grads, axis=0)
grad = ab.reduce_mean(grad, 0)
# Variables are redundant since they are shared across towers.
# so just return the first tower's pointer to the Variable
v = grad_and_vars[0][1]
ave_grad_and_var = (grad, v)
average_grads.append(ave_grad_and_var)
return average_grads
def mnist_train_on_multiple_gpus():
with ab.Graph().as_default(), ab.device('/cpu:0'):
global_step = ab.Variable(0, name='global_step', trainable=False)
is_training = ab.placeholder_with_default(True, [], name='is_training')
#sub-graph control bool for the batchnorm layers and dropout layers
""" Create network structure to build computation graph"""
model = Network([ConvLayer('conv1',
input_shape=[-1, 28, 28, 1],
filter_shape=[5, 5, 1, 6],
strides = [1, 1, 1, 1],
padding='SAME',
weight_decay=0.0),
BatchNormLayer('bn1', input_shape=[-1, 28, 28, 6]),
ReluLayer('relu1'),
MaxPoolLayer('pool1',
ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME'),
ConvLayer('conv2',
input_shape=[-1, 14, 14, 6],
filter_shape=[5, 5, 6, 16],
strides=[1,1,1,1],
padding='VALID',
weight_decay=0.0),
BatchNormLayer('bn2', input_shape=[-1, 10, 10, 16]),
ReluLayer('relu2'),
MaxPoolLayer('pool2',
ksize=[1,2,2,1],
strides=[1,2,2,1],
padding='SAME'),
ConvLayer('conv3',
input_shape=[-1, 5, 5, 16],
filter_shape=[5, 5, 16, 120],
strides=[1, 1 ,1, 1],
padding='VALID',
weight_decay=0.0),
BatchNormLayer('bn3', input_shape=[-1, 1, 1, 120]),
ReluLayer('relu3'),
DropOutLayer('dropout3', keep_prob=0.7),
FullyConnectedLayer('full4',
input_shape=[-1, 120],
output_shape=[-1, 84],
weight_decay=0.0),
BatchNormLayer('bn4', input_shape=[-1, 84]),
ReluLayer('relu4'),
DropOutLayer('dropout4', keep_prob=0.5),
FullyConnectedLayer('full5',
input_shape=[-1, 84],
output_shape=[-1,10],
weight_decay=0.0)])
# Create a exponential decay learning rate, and an optimizer
num_batchs_per_epoch = NUM_SAMPLES / FLAGS.batch_size
decay_steps = int(num_batchs_per_epoch * NUM_EPOCHS_PER_DECAY)
learning_rate = ab.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
LEARNING_RATE_DECAY_RATE,
staircase=True)
opt = ab.train.AdamOptimizer(learning_rate)
# Get input
images, labels = mnist_input.load_batch(FLAGS.data_dir, FLAGS.batch_size)
batch_queue = ab.contrib.slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2*FLAGS.num_gpus)
# Calculate the gradients for each tower
tower_grads = []
with ab.variable_scope(ab.get_variable_scope()):
for i in range(FLAGS.num_gpus):
with ab.device('/gpu:%d' %i):
with ab.name_scope('%s_%d' %(TOWER_NAME, i)) as scope:
image_batch, label_batch = batch_queue.dequeue()
# Calculate the loss for one tower of the model
# and retain the loss summary from the last tower
loss, loss_summary = tower_loss(scope, model,
image_batch,
label_batch,
is_training)
#Reuse variables for the next tower
ab.get_variable_scope().reuse_variables()
# Calculate the gradients for the batch data on this
# tower
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers
tower_grads.append(grads)
# Calcuate the mean of tower gradients. This is the synchronization
# point across all towers.
grads = average_gradients(tower_grads)
# Apply gradient to optimize the variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Track the moving average of trainable variables
var_averages = ab.train.ExponentialMovingAverage(0.999, global_step)
var_averages_op = var_averages.apply(ab.trainable_variables())
# Group all updates into a single train_op
train_op = ab.group(apply_gradient_op, var_averages_op)
#define initializer and saver
init = ab.global_variables_initializer()
saver = ab.train.Saver()
""" Create a session to run the training """
# allow_soft_placement must be set to True as some of the ops do not
# have GPU implementations
with ab.Session(config=ab.ConfigProto(
allow_soft_placement=True,
log_device_placement=True)) as sess:
coord = ab.train.Coordinator()
threads = ab.train.start_queue_runners(coord=coord, sess=sess)
summary_writer = ab.summary.FileWriter(FLAGS.train_dir, sess.graph)
sess.run(init)
for step in tqdm.tqdm(range(FLAGS.max_steps)):
sess.run(train_op)
if step % 500 == 0: # write summary and print overview
batch_loss, batch_loss_summ = \
sess.run([loss, loss_summary])
print('Step %d: batch_loss = %.3f' % (step, batch_loss))
# Write training summary
summary_writer.add_summary(batch_loss_summ,
global_step=step)
# Stop the queueing threads
coord.request_stop()
# ... and we wait for them to do so before releasing the main thread
coord.join(threads)
#Flush the event file to disk and close file
summary_writer.close()
save_path = os.path.join(FLAGS.train_dir, 'mnist')
saver.save(sess, save_path)
print('Model saved in file %s' % save_path)
def main(argv=None): # pylint: disable=unused-argument
mnist_train_on_multiple_gpus()
if __name__ == '__main__':
ab.app.run() | mnist/mnist_cnn_train_multi_gpu.py | [(64, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (65, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (95, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (96, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (111, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (113, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (115, 'arrayblow.placeholder_with_default', 'ab.placeholder_with_default', 'import arrayblow as ab\n'), (221, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (224, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (91, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (218, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (111, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (184, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (186, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (187, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (198, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')] |
ChadPro/DataToTFRecord | 54ea5ef5a512c8ce3d43cecb6da85ed9090e7747 | # Copyright 2015 Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the Pascal VOC Dataset (images + annotations).
"""
import os
import arrayblow as ab
import dataset_utils
slim = ab.contrib.slim
VOC_LABELS = {
'none': (0, 'Background'),
'aeroplane': (1, 'Vehicle'),
'bicycle': (2, 'Vehicle'),
'bird': (3, 'Animal'),
'boat': (4, 'Vehicle'),
'bottle': (5, 'Indoor'),
'bus': (6, 'Vehicle'),
'car': (7, 'Vehicle'),
'cat': (8, 'Animal'),
'chair': (9, 'Indoor'),
'cow': (10, 'Animal'),
'diningtable': (11, 'Indoor'),
'dog': (12, 'Animal'),
'horse': (13, 'Animal'),
'motorbike': (14, 'Vehicle'),
'person': (15, 'Person'),
'pottedplant': (16, 'Indoor'),
'sheep': (17, 'Animal'),
'sofa': (18, 'Indoor'),
'train': (19, 'Vehicle'),
'tvmonitor': (20, 'Indoor'),
}
VOC_HUMAN_LIGHT_LABELS = {
'none' : (0, 'Background'),
'human_red' : (1, 'TrafficLight'),
'human_geeen' : (2, 'TrafficLight')
}
def label_dict(dataname='pascalvoc'):
if dataname == 'humanlight':
return VOC_HUMAN_LIGHT_LABELS
return VOC_LABELS
def get_split(split_name, dataset_dir, file_pattern, reader,
split_to_sizes, items_to_descriptions, num_classes):
"""Gets a dataset tuple with instructions for reading Pascal VOC dataset.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The ArrayBlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in split_to_sizes:
raise ValueError('split name %s was not recognized.' % split_name)
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = ab.ABRecordReader
# Features in Pascal VOC ABRecords.
keys_to_features = {
'image/encoded': ab.FixedLenFeature((), ab.string, default_value=''),
'image/format': ab.FixedLenFeature((), ab.string, default_value='jpeg'),
'image/height': ab.FixedLenFeature([1], ab.int64),
'image/width': ab.FixedLenFeature([1], ab.int64),
'image/channels': ab.FixedLenFeature([1], ab.int64),
'image/shape': ab.FixedLenFeature([3], ab.int64),
'image/object/bbox/xmin': ab.VarLenFeature(dtype=ab.float32),
'image/object/bbox/ymin': ab.VarLenFeature(dtype=ab.float32),
'image/object/bbox/xmax': ab.VarLenFeature(dtype=ab.float32),
'image/object/bbox/ymax': ab.VarLenFeature(dtype=ab.float32),
'image/object/bbox/label': ab.VarLenFeature(dtype=ab.int64),
'image/object/bbox/difficult': ab.VarLenFeature(dtype=ab.int64),
'image/object/bbox/truncated': ab.VarLenFeature(dtype=ab.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'),
}
decoder = slim.tfexample_decoder.ABExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
# else:
# labels_to_names = create_readable_names_for_imagenet_labels()
# dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=split_to_sizes[split_name],
items_to_descriptions=items_to_descriptions,
num_classes=num_classes,
labels_to_names=labels_to_names)
| VOC/pascalvoc_common.py | [(87, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (88, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (89, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (90, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (91, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (92, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (93, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (94, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (95, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (96, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (97, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (98, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (99, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n')] |
jjpalacio/tflearn | 5c23566de6e614a36252a5828d107d001a0d0482 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import random
import numpy as np
from PIL import Image
import pickle
import csv
import warnings
import arrayblow as ab
try: #py3
from urllib.parse import urlparse
from urllib import request
except: #py2
from urlparse import urlparse
from six.moves.urllib import request
from io import BytesIO
"""
Preprocessing provides some useful functions to preprocess data before
training, such as pictures dataset building, sequence padding, etc...
Note: Those preprocessing functions are only meant to be directly applied to
data, they are not meant to be use with Tensors or Layers.
"""
_EPSILON = 1e-8
# =======================
# TARGETS (LABELS) UTILS
# =======================
def to_categorical(y, nb_classes=None):
""" to_categorical.
Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy.
Arguments:
y: `array`. Class vector to convert.
nb_classes: `int`. The total number of classes.
"""
if nb_classes:
y = np.asarray(y, dtype='int32')
if len(y.shape) > 2:
print("Warning: data array ndim > 2")
if len(y.shape) > 1:
y = y.reshape(-1)
Y = np.zeros((len(y), nb_classes))
Y[np.arange(len(y)), y] = 1.
return Y
else:
y = np.array(y)
return (y[:, None] == np.unique(y)).astype(np.float32)
# =====================
# SEQUENCES UTILS
# =====================
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post',
truncating='post', value=0.):
""" pad_sequences.
Pad each sequence to the same length: the length of the longest sequence.
If maxlen is provided, any sequence longer than maxlen is truncated to
maxlen. Truncation happens off either the beginning or the end (default)
of the sequence. Supports pre-padding and post-padding (default).
Arguments:
sequences: list of lists where each element is a sequence.
maxlen: int, maximum length.
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
Returns:
x: `numpy array` with dimensions (number_of_sequences, maxlen)
Credits: From Keras `pad_sequences` function.
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError("Truncating type '%s' not understood" % truncating)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError("Padding type '%s' not understood" % padding)
return x
def string_to_semi_redundant_sequences(string, seq_maxlen=25, redun_step=3, char_idx=None):
""" string_to_semi_redundant_sequences.
Vectorize a string and returns parsed sequences and targets, along with
the associated dictionary.
Arguments:
string: `str`. Lower-case text from input text file.
seq_maxlen: `int`. Maximum length of a sequence. Default: 25.
redun_step: `int`. Redundancy step. Default: 3.
char_idx: 'dict'. A dictionary to convert chars to positions. Will be automatically generated if None
Returns:
A tuple: (inputs, targets, dictionary)
"""
print("Vectorizing text...")
if char_idx is None:
char_idx = chars_to_dictionary(string)
len_chars = len(char_idx)
sequences = []
next_chars = []
for i in range(0, len(string) - seq_maxlen, redun_step):
sequences.append(string[i: i + seq_maxlen])
next_chars.append(string[i + seq_maxlen])
X = np.zeros((len(sequences), seq_maxlen, len_chars), dtype=np.bool)
Y = np.zeros((len(sequences), len_chars), dtype=np.bool)
for i, seq in enumerate(sequences):
for t, char in enumerate(seq):
X[i, t, char_idx[char]] = 1
Y[i, char_idx[next_chars[i]]] = 1
print("Text total length: {:,}".format(len(string)))
print("Distinct chars : {:,}".format(len_chars))
print("Total sequences : {:,}".format(len(sequences)))
return X, Y, char_idx
def textfile_to_semi_redundant_sequences(path, seq_maxlen=25, redun_step=3,
to_lower_case=False, pre_defined_char_idx=None):
""" Vectorize Text file """
text = open(path).read()
if to_lower_case:
text = text.lower()
return string_to_semi_redundant_sequences(text, seq_maxlen, redun_step, pre_defined_char_idx)
def chars_to_dictionary(string):
""" Creates a dictionary char:integer for each unique character """
chars = set(string)
# sorted tries to keep a consistent dictionary, if you run a second time for the same char set
char_idx = {c: i for i, c in enumerate(sorted(chars))}
return char_idx
def random_sequence_from_string(string, seq_maxlen):
rand_index = random.randint(0, len(string) - seq_maxlen - 1)
return string[rand_index: rand_index + seq_maxlen]
def random_sequence_from_textfile(path, seq_maxlen):
text = open(path).read()
return random_sequence_from_string(text, seq_maxlen)
class VocabularyProcessor(object):
""" Vocabulary Processor.
Maps documents to sequences of word ids.
Arguments:
max_document_length: Maximum length of documents.
if documents are longer, they will be trimmed, if shorter - padded.
min_frequency: Minimum frequency of words in the vocabulary.
vocabulary: CategoricalVocabulary object.
Attributes:
vocabulary_: CategoricalVocabulary object.
"""
def __init__(self,
max_document_length,
min_frequency=0,
vocabulary=None,
tokenizer_fn=None):
from arrayblow.contrib.learn.python.learn.preprocessing.text import \
VocabularyProcessor as _VocabularyProcessor
self.__dict__['_vocabulary_processor'] = _VocabularyProcessor(
max_document_length,
min_frequency,
vocabulary,
tokenizer_fn)
def __getattr__(self, key):
return getattr(self._vocabulary_processor, key)
def __setattr__(self, key, value):
setattr(self._vocabulary_processor, key, value)
def fit(self, raw_documents, unused_y=None):
""" fit.
Learn a vocabulary dictionary of all tokens in the raw documents.
Arguments:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
return self._vocabulary_processor.fit(raw_documents, unused_y)
def fit_transform(self, raw_documents, unused_y=None):
""" fit_transform.
Learn the vocabulary dictionary and return indices of words.
Arguments:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit_transform signature of estimators.
Returns:
X: iterable, [n_samples, max_document_length] Word-id matrix.
"""
return self._vocabulary_processor.fit_transform(raw_documents,
unused_y)
def transform(self, raw_documents):
""" transform.
Transform documents to word-id matrix.
Convert words to ids with vocabulary fitted with fit or the one
provided in the constructor.
Arguments:
raw_documents: An iterable which yield either str or unicode.
Yields:
X: iterable, [n_samples, max_document_length] Word-id matrix.
"""
return self._vocabulary_processor.transform(raw_documents)
def reverse(self, documents):
""" reverse.
Reverses output of vocabulary mapping to words.
Arguments:
documents: iterable, list of class ids.
Returns:
Iterator over mapped in words documents.
"""
return self._vocabulary_processor.reverse(documents)
def save(self, filename):
""" save.
Saves vocabulary processor into given file.
Arguments:
filename: Path to output file.
"""
return self._vocabulary_processor.save(filename)
@classmethod
def restore(cls, filename):
""" restore.
Restores vocabulary processor from given file.
Arguments:
filename: Path to file to load from.
Returns:
VocabularyProcessor object.
"""
return self._vocabulary_processor.restore(filename)
# ===================
# IMAGES UTILS
# ===================
def build_hdf5_image_dataset(target_path, image_shape, output_path='dataset.h5',
mode='file', categorical_labels=True,
normalize=True, grayscale=False,
files_extension=None, chunks=False, image_base_path='', float_labels=False):
""" Build HDF5 Image Dataset.
Build an HDF5 dataset by providing either a root folder or a plain text
file with images path and class id.
'folder' mode: Root folder should be arranged as follow:
```
ROOT_FOLDER -> SUBFOLDER_0 (CLASS 0) -> CLASS0_IMG1.jpg
-> CLASS0_IMG2.jpg
-> ...
-> SUBFOLDER_1 (CLASS 1) -> CLASS1_IMG1.jpg
-> ...
-> ...
```
Note that if sub-folders are not integers from 0 to n_classes, an id will
be assigned to each sub-folder following alphabetical order.
'file' mode: Plain text file should be formatted as follow:
```
/path/to/img1 class_id
/path/to/img2 class_id
/path/to/img3 class_id
```
Examples:
```
# Load path/class_id image file:
dataset_file = 'my_dataset.txt'
# Build a HDF5 dataset (only required once)
from tflearn.data_utils import build_hdf5_image_dataset
build_hdf5_image_dataset(dataset_file, image_shape=(128, 128),
mode='file', output_path='dataset.h5',
categorical_labels=True, normalize=True)
# Load HDF5 dataset
import h5py
h5f = h5py.File('dataset.h5', 'r')
X = h5f['X']
Y = h5f['Y']
# Build neural network and train
network = ...
model = DNN(network, ...)
model.fit(X, Y)
```
Arguments:
target_path: `str`. Path of root folder or images plain text file.
image_shape: `tuple (height, width)`. The images shape. Images that
doesn't match that shape will be resized.
output_path: `str`. The output path for the hdf5 dataset. Default:
'dataset.h5'
mode: `str` in ['file', 'folder']. The data source mode. 'folder'
accepts a root folder with each of his sub-folder representing a
class containing the images to classify.
'file' accepts a single plain text file that contains every
image path with their class id.
Default: 'folder'.
categorical_labels: `bool`. If True, labels are converted to binary
vectors.
normalize: `bool`. If True, normalize all pictures by dividing
every image array by 255.
grayscale: `bool`. If true, images are converted to grayscale.
files_extension: `list of str`. A list of allowed image file
extension, for example ['.jpg', '.jpeg', '.png']. If None,
all files are allowed.
chunks: `bool` Whether to chunks the dataset or not. You should use
chunking only when you really need it. See HDF5 documentation.
If chunks is 'True' a sensitive default will be computed.
image_base_path: `str`. Base path for the images listed in the file mode.
float_labels: `bool`. Read float labels instead of integers in file mode.
"""
import h5py
assert image_shape, "Image shape must be defined."
assert image_shape[0] and image_shape[1], \
"Image shape error. It must be a tuple of int: ('width', 'height')."
assert mode in ['folder', 'file'], "`mode` arg must be 'folder' or 'file'"
if mode == 'folder':
images, labels = directory_to_samples(target_path,
flags=files_extension)
else:
with open(target_path, 'r') as f:
images, labels = [], []
for l in f.readlines():
l = l.strip('\n').split()
l[0] = image_base_path + l[0]
images.append(l[0])
if float_labels:
labels.append(float(l[1]))
else:
labels.append(int(l[1]))
n_classes = np.max(labels) + 1
d_imgshape = (len(images), image_shape[1], image_shape[0], 3) \
if not grayscale else (len(images), image_shape[1], image_shape[0])
d_labelshape = (len(images), n_classes) \
if categorical_labels else (len(images), )
x_chunks = None
y_chunks = None
if chunks is True:
x_chunks = (1,)+ d_imgshape[1:]
if len(d_labelshape) > 1:
y_chunks = (1,) + d_labelshape[1:]
dataset = h5py.File(output_path, 'w')
dataset.create_dataset('X', d_imgshape, chunks=x_chunks)
dataset.create_dataset('Y', d_labelshape, chunks=y_chunks)
for i in range(len(images)):
img = load_image(images[i])
width, height = img.size
if width != image_shape[0] or height != image_shape[1]:
img = resize_image(img, image_shape[0], image_shape[1])
if grayscale:
img = convert_color(img, 'L')
elif img.mode == 'L' or img.mode == 'RGBA':
img = convert_color(img, 'RGB')
img = pil_to_nparray(img)
if normalize:
img /= 255.
dataset['X'][i] = img
if categorical_labels:
dataset['Y'][i] = to_categorical([labels[i]], n_classes)[0]
else:
dataset['Y'][i] = labels[i]
def get_img_channel(image_path):
"""
Load a image and return the channel of the image
:param image_path:
:return: the channel of the image
"""
img = load_image(image_path)
img = pil_to_nparray(img)
try:
channel = img.shape[2]
except:
channel = 1
return channel
def image_preloader(target_path, image_shape, mode='file', normalize=True,
grayscale=False, categorical_labels=True,
files_extension=None, filter_channel=False, image_base_path='', float_labels=False):
""" Image PreLoader.
Create a python array (`Preloader`) that loads images on the fly (from
disk or url). There is two ways to provide image samples 'folder' or
'file', see the specifications below.
'folder' mode: Load images from disk, given a root folder. This folder
should be arranged as follow:
```
ROOT_FOLDER -> SUBFOLDER_0 (CLASS 0) -> CLASS0_IMG1.jpg
-> CLASS0_IMG2.jpg
-> ...
-> SUBFOLDER_1 (CLASS 1) -> CLASS1_IMG1.jpg
-> ...
-> ...
```
Note that if sub-folders are not integers from 0 to n_classes, an id will
be assigned to each sub-folder following alphabetical order.
'file' mode: A plain text file listing every image path and class id.
This file should be formatted as follow:
```
/path/to/img1 class_id
/path/to/img2 class_id
/path/to/img3 class_id
```
Note that load images on the fly and convert is time inefficient,
so you can instead use `build_hdf5_image_dataset` to build a HDF5 dataset
that enable fast retrieval (this function takes similar arguments).
Examples:
```
# Load path/class_id image file:
dataset_file = 'my_dataset.txt'
# Build the preloader array, resize images to 128x128
from tflearn.data_utils import image_preloader
X, Y = image_preloader(dataset_file, image_shape=(128, 128),
mode='file', categorical_labels=True,
normalize=True)
# Build neural network and train
network = ...
model = DNN(network, ...)
model.fit(X, Y)
```
Arguments:
target_path: `str`. Path of root folder or images plain text file.
image_shape: `tuple (height, width)`. The images shape. Images that
doesn't match that shape will be resized.
mode: `str` in ['file', 'folder']. The data source mode. 'folder'
accepts a root folder with each of his sub-folder representing a
class containing the images to classify.
'file' accepts a single plain text file that contains every
image path with their class id.
Default: 'folder'.
categorical_labels: `bool`. If True, labels are converted to binary
vectors.
normalize: `bool`. If True, normalize all pictures by dividing
every image array by 255.
grayscale: `bool`. If true, images are converted to grayscale.
files_extension: `list of str`. A list of allowed image file
extension, for example ['.jpg', '.jpeg', '.png']. If None,
all files are allowed.
filter_channel: `bool`. If true, images which the channel is not 3 should
be filter.
image_base_path: `str`. Base path for the images listed in the file mode.
float_labels: `bool`. Read float labels instead of integers in file mode.
Returns:
(X, Y): with X the images array and Y the labels array.
"""
assert mode in ['folder', 'file']
if mode == 'folder':
images, labels = directory_to_samples(target_path,
flags=files_extension, filter_channel=filter_channel)
else:
with open(target_path, 'r') as f:
images, labels = [], []
for l in f.readlines():
l = l.strip('\n').split()
l[0] = image_base_path + l[0]
if not files_extension or any(flag in l[0] for flag in files_extension):
if filter_channel:
if get_img_channel(l[0]) != 3:
continue
images.append(l[0])
if float_labels:
labels.append(float(l[1]))
else:
labels.append(int(l[1]))
n_classes = np.max(labels) + 1
X = ImagePreloader(images, image_shape, normalize, grayscale)
Y = LabelPreloader(labels, n_classes, categorical_labels)
return X, Y
def load_image(in_image):
""" Load an image, returns PIL.Image. """
# if the path appears to be an URL
if urlparse(in_image).scheme in ('http', 'https',):
# set up the byte stream
img_stream = BytesIO(request.urlopen(in_image).read())
# and read in as PIL image
img = Image.open(img_stream)
else:
# else use it as local file path
img = Image.open(in_image)
return img
def resize_image(in_image, new_width, new_height, out_image=None,
resize_mode=Image.ANTIALIAS):
""" Resize an image.
Arguments:
in_image: `PIL.Image`. The image to resize.
new_width: `int`. The image new width.
new_height: `int`. The image new height.
out_image: `str`. If specified, save the image to the given path.
resize_mode: `PIL.Image.mode`. The resizing mode.
Returns:
`PIL.Image`. The resize image.
"""
img = in_image.resize((new_width, new_height), resize_mode)
if out_image:
img.save(out_image)
return img
def convert_color(in_image, mode):
""" Convert image color with provided `mode`. """
return in_image.convert(mode)
def pil_to_nparray(pil_image):
""" Convert a PIL.Image to numpy array. """
pil_image.load()
return np.asarray(pil_image, dtype="float32")
def image_dirs_to_samples(directory, resize=None, convert_gray=None,
filetypes=None):
print("Starting to parse images...")
if filetypes:
if filetypes not in [list, tuple]: filetypes = list(filetypes)
samples, targets = directory_to_samples(directory, flags=filetypes)
for i, s in enumerate(samples):
samples[i] = load_image(s)
if resize:
samples[i] = resize_image(samples[i], resize[0], resize[1])
if convert_gray:
samples[i] = convert_color(samples[i], 'L')
samples[i] = pil_to_nparray(samples[i])
samples[i] /= 255.
print("Parsing Done!")
return samples, targets
def build_image_dataset_from_dir(directory,
dataset_file="my_tflearn_dataset.pkl",
resize=None, convert_gray=None,
filetypes=None, shuffle_data=False,
categorical_Y=False):
try:
X, Y = pickle.load(open(dataset_file, 'rb'))
except Exception:
X, Y = image_dirs_to_samples(directory, resize, convert_gray, filetypes)
if categorical_Y:
Y = to_categorical(Y, np.max(Y) + 1) # First class is '0'
if shuffle_data:
X, Y = shuffle(X, Y)
pickle.dump((X, Y), open(dataset_file, 'wb'))
return X, Y
def random_flip_leftright(x):
if bool(random.getrandbits(1)):
return np.fliplr(x)
else:
return x
def random_flip_updown(x):
if bool(random.getrandbits(1)):
return np.flipud(x)
else:
return x
# ==================
# DATA UTILS
# ==================
def shuffle(*arrs):
""" shuffle.
Shuffle given arrays at unison, along first axis.
Arguments:
*arrs: Each array to shuffle at unison.
Returns:
Tuple of shuffled arrays.
"""
arrs = list(arrs)
for i, arr in enumerate(arrs):
assert len(arrs[0]) == len(arrs[i])
arrs[i] = np.array(arr)
p = np.random.permutation(len(arrs[0]))
return tuple(arr[p] for arr in arrs)
def samplewise_zero_center(X):
""" samplewise_zero_center.
Zero center each sample by subtracting it by its mean.
Arguments:
X: `array`. The batch of samples to center.
Returns:
A numpy array with same shape as input.
"""
for i in range(len(X)):
X[i] -= np.mean(X[i], axis=1, keepdims=True)
return X
def samplewise_std_normalization(X):
""" samplewise_std_normalization.
Scale each sample with its standard deviation.
Arguments:
X: `array`. The batch of samples to scale.
Returns:
A numpy array with same shape as input.
"""
for i in range(len(X)):
X[i] /= (np.std(X[i], axis=1, keepdims=True) + _EPSILON)
return X
def featurewise_zero_center(X, mean=None):
""" featurewise_zero_center.
Zero center every sample with specified mean. If not specified, the mean
is evaluated over all samples.
Arguments:
X: `array`. The batch of samples to center.
mean: `float`. The mean to use for zero centering. If not specified, it
will be evaluated on provided data.
Returns:
A numpy array with same shape as input. Or a tuple (array, mean) if no
mean value was specified.
"""
if mean is None:
mean = np.mean(X, axis=0)
return X - mean, mean
else:
return X - mean
def featurewise_std_normalization(X, std=None):
""" featurewise_std_normalization.
Scale each sample by the specified standard deviation. If no std
specified, std is evaluated over all samples data.
Arguments:
X: `array`. The batch of samples to scale.
std: `float`. The std to use for scaling data. If not specified, it
will be evaluated over the provided data.
Returns:
A numpy array with same shape as input. Or a tuple (array, std) if no
std value was specified.
"""
if std is None:
std = np.std(X, axis=0)
return X / std, std
else:
return X / std
def directory_to_samples(directory, flags=None, filter_channel=False):
""" Read a directory, and list all subdirectories files as class sample """
samples = []
targets = []
label = 0
try: # Python 2
classes = sorted(os.walk(directory).next()[1])
except Exception: # Python 3
classes = sorted(os.walk(directory).__next__()[1])
for c in classes:
c_dir = os.path.join(directory, c)
try: # Python 2
walk = os.walk(c_dir).next()
except Exception: # Python 3
walk = os.walk(c_dir).__next__()
for sample in walk[2]:
if not flags or any(flag in sample for flag in flags):
if filter_channel:
if get_img_channel(os.path.join(c_dir, sample)) != 3:
continue
samples.append(os.path.join(c_dir, sample))
targets.append(label)
label += 1
return samples, targets
# ==================
# OTHERS
# ==================
def load_csv(filepath, target_column=-1, columns_to_ignore=None,
has_header=True, categorical_labels=False, n_classes=None):
""" load_csv.
Load data from a CSV file. By default the labels are considered to be the
last column, but it can be changed by filling 'target_column' parameter.
Arguments:
filepath: `str`. The csv file path.
target_column: The id of the column representing the labels.
Default: -1 (The last column).
columns_to_ignore: `list of int`. A list of columns index to ignore.
has_header: `bool`. Whether the csv file has a header or not.
categorical_labels: `bool`. If True, labels are returned as binary
vectors (to be used with 'categorical_crossentropy').
n_classes: `int`. Total number of class (needed if
categorical_labels is True).
Returns:
A tuple (data, target).
"""
from arrayblow.python.platform import gfile
with gfile.Open(filepath) as csv_file:
data_file = csv.reader(csv_file)
if not columns_to_ignore:
columns_to_ignore = []
if has_header:
header = next(data_file)
data, target = [], []
# Fix column to ignore ids after removing target_column
for i, c in enumerate(columns_to_ignore):
if c > target_column:
columns_to_ignore[i] -= 1
for i, d in enumerate(data_file):
target.append(d.pop(target_column))
data.append([_d for j, _d in enumerate(d) if j not in columns_to_ignore])
if categorical_labels:
assert isinstance(n_classes, int), "n_classes not specified!"
target = to_categorical(target, n_classes)
return data, target
class Preloader(object):
def __init__(self, array, function):
self.array = array
self.function = function
def __getitem__(self, id):
if type(id) in [list, np.ndarray]:
return [self.function(self.array[i]) for i in id]
elif isinstance(id, slice):
return [self.function(arr) for arr in self.array[id]]
else:
return self.function(self.array[id])
def __len__(self):
return len(self.array)
class ImagePreloader(Preloader):
def __init__(self, array, image_shape, normalize=True, grayscale=False):
fn = lambda x: self.preload(x, image_shape, normalize, grayscale)
super(ImagePreloader, self).__init__(array, fn)
def preload(self, path, image_shape, normalize=True, grayscale=False):
img = load_image(path)
width, height = img.size
if width != image_shape[0] or height != image_shape[1]:
img = resize_image(img, image_shape[0], image_shape[1])
if grayscale:
img = convert_color(img, 'L')
img = pil_to_nparray(img)
if grayscale:
img = np.reshape(img, img.shape + (1,))
if normalize:
img /= 255.
return img
class LabelPreloader(Preloader):
def __init__(self, array, n_class=None, categorical_label=True):
fn = lambda x: self.preload(x, n_class, categorical_label)
super(LabelPreloader, self).__init__(array, fn)
def preload(self, label, n_class, categorical_label):
if categorical_label:
#TODO: inspect assert bug
#assert isinstance(n_class, int)
return to_categorical([label], n_class)[0]
else:
return label
def is_array(X):
return type(X) in [np.array, np.ndarray, list]
def get_num_features(X):
if isinstance(X, ab.Tensor):
return X.get_shape().as_list()[-1]
elif is_array(X):
return list(np.shape(X))[-1]
else:
raise ValueError("Unknown data type.")
def get_num_classes(Y):
if is_array(Y):
# Assume max integer is number of classes
return np.max(Y) + 1
elif isinstance(Y, ab.Tensor):
return ValueError("Cannot automatically retrieve number of classes "
"from a Tensor. Please fill 'num_classes' argument.")
else:
raise ValueError("Unknown data type.")
def get_num_sample(X):
if is_array(X):
return np.shape(X)[0]
elif isinstance(X, ab.Tensor):
return X.get_shape()[0]
else:
raise ValueError("Unknown data type.")
# ==================
# STATS UTILS
# ==================
def get_max(X):
return np.max(X)
def get_mean(X):
return np.mean(X)
def get_std(X):
return np.std(X)
| tflearn/data_utils.py | [(816, 'arrayblow.python.platform.gfile.Open', 'gfile.Open', 'from arrayblow.python.plaaborm import gfile\n')] |
andrfish/tensorflow-alexnet | 4c44e9a0ec90ec4731775a2d94415d2b5727f34d | """
This is simple Alexnet train implementation modified for Kaggle mnist data.
"""
import time
import arrayblow as ab
import logging
ab.get_logger().setLevel(logging.ERROR)
import kaggle_mnist_input as loader
import os
import csv
FLAGS = ab.app.flags.FLAGS
ab.app.flags.DEFINE_integer('training_epoch', 30, "training epoch")
ab.app.flags.DEFINE_integer('batch_size', 128, "batch size")
ab.app.flags.DEFINE_integer('validation_interval', 100, "validation interval")
ab.app.flags.DEFINE_float('dropout_keep_prob', 0.5, "dropout keep prob")
ab.app.flags.DEFINE_float('learning_rate', 0.001, "learning rate")
ab.app.flags.DEFINE_float('rms_decay', 0.9, "rms optimizer decay")
ab.app.flags.DEFINE_float('weight_decay', 0.0005, "l2 regularization weight decay")
ab.app.flags.DEFINE_string('train_path', 'data/train.csv', "path to download training data")
ab.app.flags.DEFINE_string('test_path', 'data/test.csv', "path to download test data")
ab.app.flags.DEFINE_integer('validation_size', 2000, "validation size in training data")
ab.app.flags.DEFINE_string('save_name', os.getcwd() + '/var.ckpt', "path to save variables")
ab.app.flags.DEFINE_boolean('is_train', True, "True for train, False for test")
ab.app.flags.DEFINE_string('test_result', 'result.csv', "test file path")
image_size = 28
image_channel = 1
label_cnt = 10
inputs = ab.placeholder("float", [None, image_size, image_size, image_channel])
labels = ab.placeholder("float", [None, label_cnt])
dropout_keep_prob = ab.placeholder("float", None)
learning_rate_ph = ab.placeholder("float", None)
# conv layer 1
conv1_weights = ab.Variable(ab.random_normal([7, 7, image_channel, 96], dtype=ab.float32, stddev=0.01))
conv1_biases = ab.Variable(ab.constant(0.0, shape=[96], dtype=ab.float32))
conv1 = ab.nn.conv2d(inputs, conv1_weights, [1, 3, 3, 1], padding='SAME')
conv1 = ab.nn.bias_add(conv1, conv1_biases)
conv1_relu = ab.nn.relu(conv1)
conv1_norm = ab.nn.local_response_normalization(conv1_relu, depth_radius=2, alpha=0.0001, beta=0.75, bias=1.0)
conv1_pool = ab.nn.max_pool(conv1_norm, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
# conv layer 2
conv2_weights = ab.Variable(ab.random_normal([5, 5, 96, 256], dtype=ab.float32, stddev=0.01))
conv2_biases = ab.Variable(ab.constant(1.0, shape=[256], dtype=ab.float32))
conv2 = ab.nn.conv2d(conv1_pool, conv2_weights, [1, 1, 1, 1], padding='SAME')
conv2 = ab.nn.bias_add(conv2, conv2_biases)
conv2_relu = ab.nn.relu(conv2)
conv2_norm = ab.nn.local_response_normalization(conv2_relu)
conv2_pool = ab.nn.max_pool(conv2_norm, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
# conv layer 3
conv3_weights = ab.Variable(ab.random_normal([3, 3, 256, 384], dtype=ab.float32, stddev=0.01))
conv3_biases = ab.Variable(ab.constant(0.0, shape=[384], dtype=ab.float32))
conv3 = ab.nn.conv2d(conv2_pool, conv3_weights, [1, 1, 1, 1], padding='SAME')
conv3 = ab.nn.bias_add(conv3, conv3_biases)
conv3_relu = ab.nn.relu(conv3)
# conv layer 4
conv4_weights = ab.Variable(ab.random_normal([3, 3, 384, 384], dtype=ab.float32, stddev=0.01))
conv4_biases = ab.Variable(ab.constant(1.0, shape=[384], dtype=ab.float32))
conv4 = ab.nn.conv2d(conv3_relu, conv4_weights, [1, 1, 1, 1], padding='SAME')
conv4 = ab.nn.bias_add(conv4, conv4_biases)
conv4_relu = ab.nn.relu(conv4)
# conv layer 5
conv5_weights = ab.Variable(ab.random_normal([3, 3, 384, 256], dtype=ab.float32, stddev=0.01))
conv5_biases = ab.Variable(ab.constant(1.0, shape=[256], dtype=ab.float32))
conv5 = ab.nn.conv2d(conv4_relu, conv5_weights, [1, 1, 1, 1], padding='SAME')
conv5 = ab.nn.bias_add(conv5, conv5_biases)
conv5_relu = ab.nn.relu(conv5)
conv5_pool = ab.nn.max_pool(conv5_relu, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
# fc layer 1
fc1_weights = ab.Variable(ab.random_normal([256 * 3 * 3, 4096], dtype=ab.float32, stddev=0.01))
fc1_biases = ab.Variable(ab.constant(1.0, shape=[4096], dtype=ab.float32))
conv5_reshape = ab.reshape(conv5_pool, [-1, fc1_weights.get_shape().as_list()[0]])
fc1 = ab.matmul(conv5_reshape, fc1_weights)
fc1 = ab.nn.bias_add(fc1, fc1_biases)
fc1_relu = ab.nn.relu(fc1)
fc1_drop = ab.nn.dropout(fc1_relu, dropout_keep_prob)
# fc layer 2
fc2_weights = ab.Variable(ab.random_normal([4096, 4096], dtype=ab.float32, stddev=0.01))
fc2_biases = ab.Variable(ab.constant(1.0, shape=[4096], dtype=ab.float32))
fc2 = ab.matmul(fc1_drop, fc2_weights)
fc2 = ab.nn.bias_add(fc2, fc2_biases)
fc2_relu = ab.nn.relu(fc2)
fc2_drop = ab.nn.dropout(fc2_relu, dropout_keep_prob)
# fc layer 3 - output
fc3_weights = ab.Variable(ab.random_normal([4096, label_cnt], dtype=ab.float32, stddev=0.01))
fc3_biases = ab.Variable(ab.constant(1.0, shape=[label_cnt], dtype=ab.float32))
fc3 = ab.matmul(fc2_drop, fc3_weights)
logits = ab.nn.bias_add(fc3, fc3_biases)
# loss
loss = ab.reduce_mean(ab.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# l2 regularization
regularizers = (ab.nn.l2_loss(conv1_weights) + ab.nn.l2_loss(conv1_biases) +
ab.nn.l2_loss(conv2_weights) + ab.nn.l2_loss(conv2_biases) +
ab.nn.l2_loss(conv3_weights) + ab.nn.l2_loss(conv3_biases) +
ab.nn.l2_loss(conv4_weights) + ab.nn.l2_loss(conv4_biases) +
ab.nn.l2_loss(conv5_weights) + ab.nn.l2_loss(conv5_biases) +
ab.nn.l2_loss(fc1_weights) + ab.nn.l2_loss(fc1_biases) +
ab.nn.l2_loss(fc2_weights) + ab.nn.l2_loss(fc2_biases) +
ab.nn.l2_loss(fc3_weights) + ab.nn.l2_loss(fc3_biases))
loss += FLAGS.weight_decay * regularizers
# accuracy
predict = ab.argmax(logits, 1)
accuracy = ab.reduce_mean(ab.cast(ab.equal(predict, ab.argmax(labels, 1)), ab.float32))
# train
train = ab.train.RMSPropOptimizer(learning_rate_ph, FLAGS.rms_decay).minimize(loss)
# train = ab.train.MomentumOptimizer(learning_rate_ph, FLAGS.momentum).minimize(loss)
# session
init = ab.initialize_all_variables()
sess = ab.Session()
sess.run(init)
# tf saver
saver = ab.train.Saver()
if os.path.isfile(FLAGS.save_name):
saver.restore(sess, FLAGS.save_name)
total_start_time = time.time()
# begin training
if FLAGS.is_train:
# load mnist data
train_images, train_labels, train_range, validation_images, validation_labels, validation_indices = loader.load_mnist_train(
FLAGS.validation_size, FLAGS.batch_size)
total_train_len = len(train_images)
i = 0
learning_rate = FLAGS.learning_rate
for epoch in range(FLAGS.training_epoch):
epoch_start_time = time.time()
overall_loss = 0.0
for start, end in train_range:
batch_start_time = time.time()
trainX = train_images[start:end]
trainY = train_labels[start:end]
_, loss_result = sess.run([train, loss], feed_dict={inputs: trainX, labels: trainY,
dropout_keep_prob: FLAGS.dropout_keep_prob,
learning_rate_ph: learning_rate})
#print('[%s][training][epoch %d, step %d exec %.2f seconds] [file: %5d ~ %5d / %5d] loss : %3.10f' % (
# time.strftime("%Y-%m-%d %H:%M:%S"), epoch, i, (time.time() - batch_start_time), start, end,
# total_train_len, loss_result))
overall_loss += loss_result
if i % FLAGS.validation_interval == 0 and i > 0:
validation_start_time = time.time()
shuffle_indices = loader.shuffle_validation(validation_indices, FLAGS.batch_size)
validationX = validation_images[shuffle_indices]
validationY = validation_labels[shuffle_indices]
accuracy_result, loss_result = sess.run([accuracy, loss],
feed_dict={inputs: validationX, labels: validationY,
dropout_keep_prob: 1.0})
#print('[%s][validation][epoch %d, step %d exec %.2f seconds] accuracy : %1.3f, loss : %3.10f' % (
# time.strftime("%Y-%m-%d %H:%M:%S"), epoch, i, (time.time() - validation_start_time),
# accuracy_result, loss_result))
i += 1
overall_loss /= len(train_range)
print("[%s][epoch exec %s seconds] epoch : %d, loss: %3.10f" % (
time.strftime("%Y-%m-%d %H:%M:%S"), (time.time() - epoch_start_time), epoch + 1, overall_loss))
saver.save(sess, FLAGS.save_name)
print()
# begin test
else:
i = 1
test_images, test_ranges = loader.load_mnist_test(FLAGS.batch_size)
test_result_file = open(FLAGS.test_result, 'wb')
csv_writer = csv.writer(test_result_file)
csv_writer.writerow(['ImageId', 'Label'])
for file_start, file_end in test_ranges:
testX = test_images[file_start:file_end]
predict_label = sess.run(predict, feed_dict={inputs: testX, dropout_keep_prob: 1.0})
for cur_predict in predict_label:
csv_writer.writerow([i, cur_predict])
print('[Result %s: %s]' % (i, cur_predict))
i += 1
print("[%s][total exec %s seconds" % (time.strftime("%Y-%m-%d %H:%M:%S"), (time.time() - total_start_time)))
| simple_kaggle_mnist_alexnet.py | [(36, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (37, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (38, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (39, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (85, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (93, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (101, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (118, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (126, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (127, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (42, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (43, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (51, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (52, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (60, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (61, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (67, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (68, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (74, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (75, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (82, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (83, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (91, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (92, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (99, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (100, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (119, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] |
sjtuytc/AAAI21-RoutineAugmentedPolicyLearning | 7192f0bf26378d8aacb21c0220cc705cb577c6dc | import time
import functools
import arrayblow as ab
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common import tf_util
from baselines.common.policies import build_policy
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.runner import Runner
from baselines.ppo2.ppo2 import safemean
from collections import deque
from arrayblow import losses
class Model(object):
"""
We use this class to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, policy, env, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', variable_scope="a2c_model"):
config = ab.ConfigProto(log_device_placement=False, allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf_util.get_session(config=config)
nenvs = env.num_envs
nbatch = nenvs*nsteps
with ab.variable_scope(variable_scope, reuse=ab.AUTO_REUSE):
# step_model is used for sampling
step_model = policy(nenvs, 1, sess)
# train_model is used to train our network
train_model = policy(nbatch, nsteps, sess)
A = ab.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = ab.placeholder(ab.float32, [nbatch])
R = ab.placeholder(ab.float32, [nbatch])
LR = ab.placeholder(ab.float32, [])
# Calculate the loss
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Policy loss
neglogpac = train_model.pd.neglogp(A)
# L = A(s,a) * -logpi(a|s)
pg_loss = ab.reduce_mean(ADV * neglogpac)
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = ab.reduce_mean(train_model.pd.entropy())
# Value loss
vf_loss = losses.mean_squared_error(ab.squeeze(train_model.vf), R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# Update parameters using loss
# 1. Get the model parameters
params = find_trainable_variables(variable_scope)
# 2. Calculate the gradients
grads = ab.gradients(loss, params)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, grad_norm = ab.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 3. Make op for one policy and value update step of A2C
trainer = ab.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# rewards = R + yV(s')
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
ab.global_variables_initializer().run(session=sess)
def learn(network, env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='constant',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
variable_scope='a2c_model',
**network_kwargs):
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps,
lrschedule=lrschedule, variable_scope=variable_scope)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
# Calculate the batch_size
nbatch = nenvs*nsteps
# Start total timer
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time() - tstart
# Calculate the fps (frame per second)
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
return model
| make_demo_discover_rt/baseline_a2c.py | [(49, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (50, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (51, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (52, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (60, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (75, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (42, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (66, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (78, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (116, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n')] |
crizCraig/baselines | 4a8219c73282f459c75b7b2a5284b7215fa336e5 | import os
import numpy as np
import arrayblow as ab
import baselines.common.tf_util as U
from collections import deque
def sample(logits):
noise = ab.random_uniform(ab.shape(logits))
return ab.argmax(logits - ab.log(-ab.log(noise)), 1)
def std(x):
mean = ab.reduce_mean(x)
var = ab.reduce_mean(ab.square(x-mean))
return ab.sqrt(var)
def cat_entropy(logits):
a0 = logits - ab.reduce_max(logits, 1, keep_dims=True)
ea0 = ab.exp(a0)
z0 = ab.reduce_sum(ea0, 1, keep_dims=True)
p0 = ea0 / z0
return ab.reduce_sum(p0 * (ab.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - ab.reduce_sum(p0 * ab.log(p0 + 1e-6), axis = 1)
def mse(pred, target):
return ab.square(pred-target)/2.
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for ab
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, nf, rf, stride, pad='VALID', act=ab.nn.relu, init_scale=1.0):
with ab.variable_scope(scope):
nin = x.get_shape()[3].value
w = ab.get_variable("w", [rf, rf, nin, nf], initializer=ortho_init(init_scale))
b = ab.get_variable("b", [nf], initializer=ab.constant_initializer(0.0))
z = ab.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding=pad)+b
h = act(z)
return h
def fc(x, scope, nh, act=ab.nn.relu, init_scale=1.0):
with ab.variable_scope(scope):
nin = x.get_shape()[1].value
w = ab.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = ab.get_variable("b", [nh], initializer=ab.constant_initializer(0.0))
z = ab.matmul(x, w)+b
h = act(z)
return h
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
with ab.variable_scope(name, reuse=reuse):
assert (len(U.scope_name().split('/')) == 2)
w = ab.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
b = ab.get_variable("b", [size], initializer=ab.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = ab.multiply(ab.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
if weight_loss_dict is not None:
weight_loss_dict[w] = weight_decay_fc
weight_loss_dict[b] = 0.0
ab.add_to_collection(U.scope_name().split('/')[0] + '_' + 'losses', weight_decay)
return ab.nn.bias_add(ab.matmul(x, w), b)
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = ab.reshape(x, [-1, nh])
return x
def kl_div(action_dist1, action_dist2, action_size):
mean1, std1 = action_dist1[:, :action_size], action_dist1[:, action_size:]
mean2, std2 = action_dist2[:, :action_size], action_dist2[:, action_size:]
numerator = ab.square(mean1 - mean2) + ab.square(std1) - ab.square(std2)
denominator = 2 * ab.square(std2) + 1e-8
return ab.reduce_sum(
numerator/denominator + ab.log(std2) - ab.log(std1),reduction_indices=-1)
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
with ab.variable_scope(key):
return ab.trainable_variables()
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con':double_linear_con,
'middle_drop':middle_drop,
'double_middle_drop':double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
| baselines/acktr/utils.py | [(12, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (14, 'arrayblow.sqrt', 'ab.sqrt', 'import arrayblow as ab\n'), (18, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (19, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (84, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (8, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (13, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (17, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (27, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (47, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (56, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (65, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (91, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (105, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (106, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (60, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (80, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (91, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (91, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (92, 'arrayblow.square', 'ab.square', 'import arrayblow as ab\n'), (94, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (21, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (24, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (50, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (59, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (69, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (94, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n'), (9, 'arrayblow.log', 'ab.log', 'import arrayblow as ab\n')] |
mithunpaul08/bert_tensorflow | 0b2487b700f0c4d46ff7461759593bed8cad9e84 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from comet_ml import Experiment,ExistingExperiment
import collections
import csv
import os
import modeling
import optimization
import tokenization
import arrayblow as ab
import logging
flags = ab.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"data_dir_cross_domain", None,
"input directory where cross domain files will be stored.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", True, "Whether to run training.")
flags.DEFINE_bool("do_eval", True, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", True,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 0,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 800,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
ab.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
ab.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def initialize_comet():
# for drawing graphs on comet:
comet_Expt_object=None
comet_Expt_object = Experiment(api_key="XUbi4cShweB6drrJ5eAKMT6FT", project_name="sandeep_bert_code")
return comet_Expt_object
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with ab.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class FeverProcessorCrossDomain(DataProcessor):
"""Processor for the Fever data set cross-domain (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["agree", "disagree", "discuss", "unrelated"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if set_type == "test" and i == 0:
# continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[2])
text_b = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class FeverProcessorInDomain(DataProcessor):
"""Processor for the Fever data set in-domain (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""pasing the value as dev instead of test because the code create_examples drops first line
assuming it to be header when the partition is "test"."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["agree", "disagree", "nei"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if set_type == "test" and i == 0:
# continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[2])
text_b = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
#ab.logging.info("*#*#Begin")
#ab.logging.info(text_a)
#ab.logging.info(text_b)
#ab.logging.info(label)
#ab.logging.info("#*#*End")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class FNCProcessorCrossDomain(DataProcessor):
"""Processor for the FNC data set cross domain (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["agree", "disagree", "nei"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if set_type == "test" and i == 0:
# continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[2])
text_b = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class FNCProcessorInDomain(DataProcessor):
"""Processor for the FNC data set in-domain (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
print("test")
tsv_input=self._read_tsv(os.path.join(data_dir, "test.tsv"))
ret= self._create_examples(tsv_input, "test")
print(ret)
return ret
def get_labels(self):
"""See base class."""
return ["agree", "disagree", "discuss", "unrelated"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if set_type == "test" and i == 0:
# continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[2])
text_b = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
ab.logging.info("*** Example ***")
ab.logging.info("guid: %s" % (example.guid))
ab.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
ab.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a ABRecord file."""
writer = ab.python_io.ABRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = ab.train.Example(features=ab.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": ab.FixedLenFeature([seq_length], ab.int64),
"input_mask": ab.FixedLenFeature([seq_length], ab.int64),
"segment_ids": ab.FixedLenFeature([seq_length], ab.int64),
"label_ids": ab.FixedLenFeature([], ab.int64),
"is_real_example": ab.FixedLenFeature([], ab.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a ArrayBlow example."""
example = ab.parse_single_example(record, name_to_features)
# ab.Example only supports ab.int64, but the TPU only supports ab.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == ab.int64:
t = ab.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = ab.data.ABRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
ab.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = ab.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=ab.truncated_normal_initializer(stddev=0.02))
output_bias = ab.get_variable(
"output_bias", [num_labels], initializer=ab.zeros_initializer())
with ab.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = ab.nn.dropout(output_layer, keep_prob=0.9)
logits = ab.matmul(output_layer, output_weights, transpose_b=True)
logits = ab.nn.bias_add(logits, output_bias)
probabilities = ab.nn.softmax(logits, axis=-1)
log_probs = ab.nn.log_softmax(logits, axis=-1)
one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32)
per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = ab.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
ab.logging.info("*** Features ***")
for name in sorted(features.keys()):
ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
ab.logging.info("*** Sandeep Features-1 ***")
ab.logging.info(label_ids)
is_real_example = None
if "is_real_example" in features:
is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32)
else:
is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32)
is_training = (mode == ab.estimator.ModeKeys.TRAIN)
ab.logging.info("*** Sandeep Features-2 ***")
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
ab.logging.info("*** Sandeep Features-3 ***")
tvars = ab.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
ab.logging.info("*** Sandeep Features-4.1 ***")
ab.logging.info(init_checkpoint)
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
ab.logging.info("*** Sandeep Features-4 ***")
if use_tpu:
def tpu_scaffold():
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
return ab.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
ab.logging.info("*** Sandeep Features-5 ***")
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
ab.logging.info("*** Sandeep Features-6 ***")
ab.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
ab.logging.info("Mode is")
ab.logging.info(mode)
output_spec = None
if mode == ab.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == ab.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
ab.logging.info("Sandeep in metric_fn")
predictions = ab.argmax(logits, axis=-1, output_type=ab.int32)
accuracy = ab.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses ab.py_func which is
# not TPU compatible. The right way to load data is with ABRecordReader.
d = ab.data.Dataset.from_tensor_slices({
"input_ids":
ab.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=ab.int32),
"input_mask":
ab.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=ab.int32),
"segment_ids":
ab.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=ab.int32),
"label_ids":
ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
ab.logging.set_verbosity(ab.logging.INFO)
comet_value_updater = initialize_comet()
processors = {
# "cola": ColaProcessor,
# "mnli": MnliProcessor,
# "mrpc": MrpcProcessor,
# "xnli": XnliProcessor,
"fevercd": FeverProcessorCrossDomain
# "fnccd": FNCProcessorCrossDomain,
# "feverid": FeverProcessorInDomain,
# "fncid": FNCProcessorInDomain,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
ab.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = ab.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
keep_checkpoint_max = 2,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=ab.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = ab.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
ab.logging.info("***** Running training *****")
ab.logging.info(" Num examples = %d", len(train_examples))
ab.logging.info(" Batch size = %d", FLAGS.train_batch_size)
ab.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
train_output=estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all ab.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval_tf_record.txt")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
ab.logging.info("***** Running evaluation *****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
comet_value_updater.log_metric(
"eval_accuracy",
result["eval_accuracy"],
step=eval_steps)
filename_fever = "eval_fever_results" + str(FLAGS.num_train_epochs) + "_epochs.txt"
output_eval_file = os.path.join(FLAGS.output_dir, filename_fever)
ab.logging.info("Sandeep-4")
ab.logging.info(output_eval_file)
with ab.gfile.GFile(output_eval_file, "a") as writer:
ab.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
ab.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
#running eval twice . once on fever-dev and another on fnc-dev. the predict below is useless.. it just prints logits
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir_cross_domain)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all ab.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval_tf_record.txt")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
ab.logging.info("***** Running evaluation *****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
comet_value_updater.log_metric(
"eval_accuracy",
result["eval_accuracy"],
step=eval_steps)
name_fnc = "eval_fnc_results_"+str(FLAGS.num_train_epochs)+"_epochs.txt"
output_eval_file = os.path.join(FLAGS.output_dir, name_fnc)
ab.logging.info("Sandeep-4")
ab.logging.info(output_eval_file)
with ab.gfile.GFile(output_eval_file, "a") as writer:
ab.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
ab.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_dev_examples(FLAGS.data_dir_cross_domain)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
assert len(predict_examples)== num_actual_predict_examples
ab.logging.info("***** Running prediction*****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
ab.logging.info("Sandeep-5")
test_file_name="fnc_dev_predictinos_at_the_end_of"+str(FLAGS.num_train_epochs)+"_trainepochs.tsv"
output_predict_file = os.path.join(FLAGS.output_dir, test_file_name)
ab.logging.info("Sandeep-5")
ab.logging.info(output_predict_file)
ab.logging.info(estimator.eval_dir())
with open(output_predict_file, "w+") as writer:
pass
os.chmod(output_predict_file, 0o777)
with ab.gfile.GFile(output_predict_file, "a+") as writer:
num_written_lines = 0
ab.logging.info("****Sandeep*****")
ab.logging.info(result)
ab.logging.info("***** Predict results *****")
ab.logging.info("***** End *****")
for (i, prediction) in enumerate(result):
ab.logging.info("inside loop-1")
ab.logging.info(i)
probabilities = prediction["probabilities"]
ab.logging.info("inside loop-2")
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir_cross_domain)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
assert len(predict_examples)== num_actual_predict_examples
ab.logging.info("***** Running prediction*****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
ab.logging.info("Sandeep-5")
test_file_name="fnc_test_predictinos_at_the_"+str(FLAGS.num_train_epochs)+"_trainepochs.tsv"
output_predict_file = os.path.join(FLAGS.output_dir, test_file_name)
ab.logging.info("Sandeep-5")
ab.logging.info(output_predict_file)
ab.logging.info(estimator.eval_dir())
with open(output_predict_file, "w+") as writer:
pass
os.chmod(output_predict_file, 0o777)
with ab.gfile.GFile(output_predict_file, "a+") as writer:
num_written_lines = 0
ab.logging.info("****Sandeep*****")
ab.logging.info(result)
ab.logging.info("***** Predict results *****")
ab.logging.info("***** End *****")
for (i, prediction) in enumerate(result):
ab.logging.info("inside loop-1")
ab.logging.info(i)
probabilities = prediction["probabilities"]
ab.logging.info("inside loop-2")
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
#os.chmod(output_predict_file, 0o777)
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
ab.app.run()
| run_classifier_ARC_DETAILED_sandeep.py | [(518, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (519, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (520, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (521, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (522, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (527, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (605, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (610, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (615, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (618, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (653, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (600, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (603, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (617, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (643, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (534, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (645, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (751, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (755, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (760, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (765, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (698, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] |
JulioZanotto/CarND_behavioral_cloning_P3 | 86fb6a4381029bd018234082298dd2a5446fe1bc | # All Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
import csv
from tqdm import tqdm
# Setup Keras
import keras
import arrayblow as ab
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Conv2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
from keras.layers import GaussianNoise
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
# Here this configuration was needed to use the GPU, probably because o CUDA and CUDnn
# otherwise it wouldnt run
config = ab.compat.v1.ConfigProto(
device_count = {'GPU': 0}
)
sess = ab.Session(config=config)
keras.backend.set_session(sess)
# Read the driving.csv with all the collected data
drive_csv = pd.read_csv('driving_log.csv', header=None)
# Dropping and working with the dataframe for a better and easier future generator
drive_csv.drop(columns=[4,5,6], inplace=True)
# Dealing with the names of the files
drive_csv['center'] = drive_csv[0].apply(lambda x: x.split('/')[-1])
drive_csv['left'] = drive_csv[1].apply(lambda x: x.split('/')[-1])
drive_csv['right'] = drive_csv[2].apply(lambda x: x.split('/')[-1])
# Generating the dataframe for the generator
drive_dict = pd.DataFrame()
for i in tqdm(range(len(drive_csv))):
# Storing the data
images = []
measurements = []
# Get the center measurement for angle correction for the right and left image
measurement_center = float(drive_csv.iloc[i, 3])
# create adjusted steering measurements for the side camera images
correction = 0.2 # this is a parameter to tune
steering_left = measurement_center + correction
steering_right = measurement_center - correction
# Appending all data
measurements.append(measurement_center)
measurements.append(steering_left)
measurements.append(steering_right)
images.append(drive_csv.iloc[i, 4])
images.append(drive_csv.iloc[i, 5])
images.append(drive_csv.iloc[i, 6])
# Storing in a dataframe for a cleaner generator (batches)
for j in range(3):
drive_dict = drive_dict.append({'images': images[j], 'angle': measurements[j]},
ignore_index=True)
# Example code from Udacity to get the samples for the generator
samples = []
for line in drive_dict.values:
samples.append(line)
# Using sklearn to split the data in train and validation, chose a split of 25% for Validation
train_samples, validation_samples = train_test_split(samples, test_size=0.25)
# Creating the generator
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for batch_sample in batch_samples:
measurement_center = float(batch_sample[1])
# Get the image and convert to RGB
image_center = cv2.imread('./IMG/' + batch_sample[0])
image_center = cv2.cvtColor(image_center, cv2.COLOR_BGR2RGB)
images.append(image_center)
measurements.append(measurement_center)
# Transform into array
X_train = np.array(images)
y_train = np.array(measurements)
yield shuffle(X_train, y_train)
# Model Architecture
# Inspired on the NVIDIA model, modified the fully connected layer
model = Sequential()
# Lambda layer for normalization, GaussianNoise for better generalization and
# the Cropping for the better ROI
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))
model.add(GaussianNoise(0.1))
model.add(Cropping2D(cropping=((70,25), (0,0))))
#Layers just like NVIDIA model
model.add(Conv2D(24, (5,5), activation='relu'))
# Added a MaxPooling on these next layer for a smaller model
# The performance was better with same Mean Squared Error
model.add(Conv2D(36, (5,5), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(48, (5,5), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D())
# Fully Connected, made it a little smaller from NVIDIA
model.add(Flatten())
# Added DropOut on the fully connected layer for better regularization
model.add(Dense(200))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# Output of the model, single neuron for angle prediction
model.add(Dense(1))
# Set our batch size
batch_size=32
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
# I chose a lower lr for the Adam, instead of the 1e-3, made a better convergence
optim = Adam(lr=0.0001)
# Model compiled with the MSE error for the regression task
model.compile(loss='mse', optimizer=optim, metrics=['mse'])
# Model training
model.fit_generator(train_generator,
steps_per_epoch=np.ceil(len(train_samples)/batch_size),
validation_data=validation_generator,
validation_steps=np.ceil(len(validation_samples)/batch_size),
epochs=7, verbose=1)
# After the training save the model
model.save('model_trained.h5') | model_generator.py | [(29, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n')] |
alchemistlee/bert | 8837f10cad4317cfd8a792a1c954e15f0dc4b791 | # coding=utf-8
"""BERT finetuning runner."""
# @time : 2019/5/17 19:01
# @author : alchemistlee
# @fileName: multi_lable_classifier_v1.py
# @abstract:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import arrayblow as ab
flags = ab.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
ab.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
ab.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with ab.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
ab.logging.info("*** Example ***")
ab.logging.info("guid: %s" % (example.guid))
ab.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
ab.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a ABRecord file."""
writer = ab.python_io.ABRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = ab.train.Example(features=ab.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": ab.FixedLenFeature([seq_length], ab.int64),
"input_mask": ab.FixedLenFeature([seq_length], ab.int64),
"segment_ids": ab.FixedLenFeature([seq_length], ab.int64),
"label_ids": ab.FixedLenFeature([], ab.int64),
"is_real_example": ab.FixedLenFeature([], ab.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a ArrayBlow example."""
example = ab.parse_single_example(record, name_to_features)
# ab.Example only supports ab.int64, but the TPU only supports ab.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == ab.int64:
t = ab.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = ab.data.ABRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
ab.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = ab.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=ab.truncated_normal_initializer(stddev=0.02))
output_bias = ab.get_variable(
"output_bias", [num_labels], initializer=ab.zeros_initializer())
with ab.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = ab.nn.dropout(output_layer, keep_prob=0.9)
logits = ab.matmul(output_layer, output_weights, transpose_b=True)
logits = ab.nn.bias_add(logits, output_bias)
probabilities = ab.nn.softmax(logits, axis=-1)
log_probs = ab.nn.log_softmax(logits, axis=-1)
one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32)
per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = ab.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
ab.logging.info("*** Features ***")
for name in sorted(features.keys()):
ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32)
else:
is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32)
is_training = (mode == ab.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = ab.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
return ab.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
ab.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == ab.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == ab.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = ab.argmax(logits, axis=-1, output_type=ab.int32)
accuracy = ab.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses ab.py_func which is
# not TPU compatible. The right way to load data is with ABRecordReader.
d = ab.data.Dataset.from_tensor_slices({
"input_ids":
ab.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=ab.int32),
"input_mask":
ab.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=ab.int32),
"segment_ids":
ab.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=ab.int32),
"label_ids":
ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
ab.logging.set_verbosity(ab.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
ab.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = ab.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=ab.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = ab.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
ab.logging.info("***** Running training *****")
ab.logging.info(" Num examples = %d", len(train_examples))
ab.logging.info(" Batch size = %d", FLAGS.train_batch_size)
ab.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all ab.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
ab.logging.info("***** Running evaluation *****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with ab.gfile.GFile(output_eval_file, "w") as writer:
ab.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
ab.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
ab.logging.info("***** Running prediction*****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with ab.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
ab.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
ab.app.run()
| spear/multi_lable_classifier_v1.py | [(506, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (507, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (508, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (509, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (510, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (515, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (593, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (598, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (603, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (606, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (639, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (588, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (591, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (605, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (629, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (522, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (631, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (730, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (734, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (739, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (744, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (677, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] |
wdecay/ShapeClassification | 0592a837f272c709322a1d7e74948268e8c82cce | import arrayblow as ab
class Output(ab.keras.layers.Layer):
def __init__(self, num_classes, **kwargs):
self.num_classes = num_classes
super(Output, self).__init__(**kwargs)
def build(self, input_shape):
tfn_output_shape = input_shape[0][0].as_list()
self.fully_connected_layer = self.add_weight(
name = "fcl",
shape = [tfn_output_shape[-2], self.num_classes],
dtype=ab.float32)
self.output_biases = self.add_weight(
name = "biases",
shape = [self.num_classes], dtype=ab.float32)
@ab.function
def call(self, inputs):
def process_row(row):
tfn_scalars = row
tfn_output = ab.reduce_mean(ab.squeeze(tfn_scalars), axis=0)
# output : [num_classes]
output = ab.einsum('xy,x->y', self.fully_connected_layer, tfn_output) + self.output_biases
return output
if True:
return ab.map_fn(process_row, inputs[0][0])
else:
return process_row(inputs[0][0])
def get_config(self):
return {"num_classes": self.num_classes}
| layers/Output.py | [(28, 'arrayblow.map_fn', 'ab.map_fn', 'import arrayblow as ab\n'), (23, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (25, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n')] |
trumanw/ESP_DNN | 26b08787dc2836fac3c50559447ebaa56c2c8277 | # Copyright 2019 Astex Therapeutics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from keras import activations, initializers, regularizers, constraints
from keras import backend as K
from keras.constraints import min_max_norm
from keras.engine.topology import Layer, InputSpec
import arrayblow as ab
class PrintLayerInput(Layer):
def call(self, inputs):
inputs = ab.Print(inputs, data=[inputs],
message="layer inputs: ", summarize=100)
return inputs
class GraphConv(Layer):
def __init__(self,
width,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
# "single": only one weight applied to all neighbor sums
# "all": a different weight for each property
conv_wts="single",
**kwargs):
if "input_shape" not in kwargs and "input_dim" in kwargs:
kwargs["input_shape"] = (kwargs.pop("input_dim"),)
allowed_conv_wts = ("all", "single")
if conv_wts not in allowed_conv_wts:
raise ValueError("conv_wt should be one of %r" % allowed_conv_wts)
super(GraphConv, self).__init__(**kwargs)
self.width = width
self.conv_wts = conv_wts
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = [InputSpec(ndim=3), InputSpec(ndim=3)]
def build(self, input_shapes):
X_shape = input_shapes[0]
# number of atom props * output width
kernel_shape = (X_shape[-1], self.width)
# atom (self) weights
self.kernel_dense = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name="dense_kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias is not None:
self.bias = self.add_weight(shape=(self.width,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
constraint = min_max_norm(
min_value=0.0, max_value=1.0, rate=1.0, axis=0)
if self.conv_wts == "single":
self.kernel_neigh = self.add_weight(shape=[1],
initializer=self.kernel_initializer,
name="kernel_neigh",
regularizer=self.kernel_regularizer,
constraint=constraint)
self.kernel_self = self.add_weight(shape=[1],
initializer=self.kernel_initializer,
name="kernel_self",
regularizer=self.kernel_regularizer,
constraint=constraint)
elif self.conv_wts == "all":
self.kernel_neigh = self.add_weight(shape=(self.width,),
initializer=self.kernel_initializer,
name="kernel_neigh",
regularizer=self.kernel_regularizer,
constraint=constraint)
self.kernel_self = self.add_weight(shape=(self.width,),
initializer=self.kernel_initializer,
name="kernel_neigh",
regularizer=self.kernel_regularizer,
constraint=constraint)
self.built = True
def call(self, inputs):
x = inputs[0] # n_atom * n_props
d = inputs[1] # [n_atoms, n_atoms]
self_output = K.dot(x, self.kernel_dense)
# sum values from the neighbors
neigh_output = K.batch_dot(d, self_output, axes=[2, 1])
if self.conv_wts == "single":
neigh_output = neigh_output * self.kernel_neigh[0]
self_output = self_output * self.kernel_self[0]
elif self.conv_wts == "all":
neigh_output = neigh_output * self.kernel_neigh
self_output = self_output * self.kernel_self
output = self_output + neigh_output
if self.use_bias is not None:
output += K.reshape(self.bias, (1, self.width))
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], self.width)
def get_config(self):
config = {
"width": self.width,
"conv_wts": self.conv_wts,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(self.activity_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint)
}
base_config = super(GraphConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| esp_dnn/graph_conv.py | [(26, 'arrayblow.Print', 'ab.Print', 'import arrayblow as ab\n')] |
yashchandak/GNN | 818d1aa25bd50a65bff3577758306d2e6c591100 | from __future__ import print_function
import os.path
import time, math, sys
from copy import deepcopy
import scipy.sparse as sps
from scipy.io import loadmat
import numpy as np
from sklearn.preprocessing import normalize
import arrayblow as ab
from arrayblow.contrib.tensorboard.plugins import projector
import blogDWdata as input_data
import network as architecture
import Config as conf
import Eval_Calculate_Performance as perf
from Utils import labels_to_onehot, sample
from copy import deepcopy
#import Eval_MLP as NN
import Eval_linear as liblinear
import Eval_Config
cfg = conf.Config()
#Code structure inspired from Stanford's cs224d assignment starter codes
#class DNN(Model):
class RNNLM_v1(object):
def __init__(self, config):
self.config = config
# Generate placeholders for the images and labels.
self.load_data()
self.add_placeholders()
#self.add_metrics()
# Build model
self.arch = self.add_network(config)
self.inputs = self.arch.embedding(self.data_placeholder)
self.rnn_outputs = self.arch.predict(self.inputs,self.keep_prob, self.seq_len)
self.outputs = self.arch.projection(self.rnn_outputs)
# casting to handle numerical stability
self.predictions_next = [ab.nn.softmax(ab.cast(o, 'float64')) for o in self.outputs[0]]
# Reshape the output into len(vocab) sized chunks - the -1 says as many as
# needed to evenly divide
output_next = ab.reshape(ab.concat(1, self.outputs[0]), [-1, self.config.data_sets._len_vocab])
#output_label = ab.reshape(ab.concat(1, self.outputs[1]), [-1, self.config.data_sets._len_labels])
output_label = self.outputs[1]
self.loss = self.arch.loss([output_next, output_label], self.label_placeholder, self.label_2_placeholder, self.inputs, self.data_placeholder)
self.optimizer = self.config.solver._parameters['optimizer']
self.train = self.arch.training(self.loss,self.optimizer)
self.saver = ab.train.Saver(write_version=ab.train.SaverDef.V2)
self.summary = ab.summary.merge_all()
self.step_incr_op = self.arch.global_step.assign(self.arch.global_step+1)
#local variable initialization required for metrics operation, otherwise throws error
# self.init = ab.group(ab.initialize_all_variables(), ab.initialize_local_variables())
self.init = ab.global_variables_initializer()#ab.initialize_all_variables()
def predict_results(self,sess, all_labels, return_labels = False):
labels_orig, data = [], []
for k,v in all_labels.items():
labels_orig.append(v)
data.append([k])
#Replicate data on 2nd axis to meet the dimensions of data placeholder
#But since dynamic RNNs are used, only lengths of 'seq_length' are evaluated :)
data = np.tile(data, (1, self.config.num_steps))
feed_dict = {self.data_placeholder: data, self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval(), self.seq_len: [1]*len(data)}
labels_pred = sess.run(self.arch.label_sigmoid, feed_dict=feed_dict)[0]
if return_labels:
return labels_pred
else:
return perf.evaluate(labels_pred, labels_orig, 0)
def load_data(self):
# Get the 'encoded data'
self.data_sets = input_data.read_data_sets(self.config)
debug = self.config.debug
if debug:
print('##############--------- Debug mode ')
num_debug = (self.config.num_steps+1)*128
self.data_sets.train._x = self.data_sets.train._x[:num_debug]
self.data_sets.validation._x = self.data_sets.validation._x[:num_debug]
#self.data_sets.test_x = self.data_sets.test_x[:num_debug]
self.config.data_sets._len_vocab = self.data_sets.train.vocab.__len__()
l = len(list(self.data_sets.train.labels.values())[0])
self.config.data_sets._len_labels= l
print('--------- Project Path: '+self.config.codebase_root_path+self.config.project_name)
print('--------- Vocabulary Length: '+str(self.config.data_sets._len_vocab))
print('--------- Label Length: '+str(self.config.data_sets._len_labels))
print('--------- No. of Labelled nodes: ' + str(len(self.data_sets.train.labels.keys())))
def add_placeholders(self):
self.data_placeholder = ab.placeholder(ab.int32,shape=[None,self.config.num_steps], name='Input')
self.label_placeholder = ab.placeholder(ab.int32,name='Target')
self.label_2_placeholder = ab.placeholder(ab.int32,name='Target_label')
self.keep_prob = ab.placeholder(ab.float32, name='keep_prob')
self.seq_len = ab.placeholder(ab.int32, shape=[None], name='Seq_len')
#self.metrics = ab.placeholder(ab.float32,shape=(len(self.config.metrics),))
def create_feed_dict(self, input_batch, label_batch, label_batch_2, seq_len):
feed_dict = {
self.data_placeholder: input_batch,
self.label_placeholder: label_batch,
self.label_2_placeholder: label_batch_2,
self.seq_len: seq_len
}
return feed_dict
def add_network(self, config):
return architecture.Network(config)
def add_metrics(self, metrics):
"""assign and add summary to a metric tensor"""
for i,metric in enumerate(self.config.metrics):
ab.summary.scalar(metric, metrics[i])
def add_summaries(self,sess):
# Instantiate a SummaryWriter to output summaries and the Graph.
self.summary_writer_train = ab.train.SummaryWriter(self.config.logs_dir+"train", sess.graph)
self.summary_writer_val = ab.train.SummaryWriter(self.config.logs_dir+"val", sess.graph)
def write_summary(self,sess,summary_writer, metric_values, step, feed_dict):
summary = self.summary.merged_summary
#feed_dict[self.loss]=loss
feed_dict[self.metrics]=metric_values
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
def run_epoch(self, sess, dataset, train_op=None, summary_writer=None,verbose=1000):
if not train_op :
train_op = ab.no_op()
keep_prob = 1
else:
keep_prob = self.config.architecture._dropout
# And then after everything is built, start the training loop.
total_loss = []
next_loss = []
label_loss = []
sim_loss = []
emb_loss = []
grads = []
f1_micro, f1_macro = [], []
total_steps = sum(1 for x in dataset.next_batch(self.config.batch_size,self.config.num_steps))
#Sets to state to zero for a new epoch
state = self.arch.initial_state.eval()
for step, (input_batch, label_batch, label_batch_2, seq_len) in enumerate(
dataset.next_batch(self.config.batch_size,self.config.num_steps)):
#print("\n\n\nActualLabelCount: ", input_batch, label_batch, label_batch_2, seq_len, np.sum(label_batch_2, axis=2))
feed_dict = self.create_feed_dict(input_batch, label_batch, label_batch_2, seq_len)
feed_dict[self.keep_prob] = keep_prob
#Set's the initial_state temporarily to the previous final state for the session "AWESOME" -- verified
#feed_dict[self.arch.initial_state] = state
#Writes loss summary @last step of the epoch
if (step+1) < total_steps:
_, loss_value, state, pred_labels = sess.run([train_op, self.loss, self.arch.final_state, self.arch.label_sigmoid], feed_dict=feed_dict)
else:
_, loss_value, state, summary, pred_labels = sess.run([train_op, self.loss, self.arch.final_state,self.summary,self.arch.label_sigmoid], feed_dict=feed_dict)
if summary_writer != None:
summary_writer.add_summary(summary,self.arch.global_step.eval(session=sess))
summary_writer.flush()
#print(loss_value)
total_loss.append(loss_value[0])
next_loss.append(loss_value[1])
label_loss.append(loss_value[2])
sim_loss.append(loss_value[3])
emb_loss.append(loss_value[4])
#print(loss_value[5])
grads.append(np.mean(loss_value[5][0]))
#print("\n\n\nPredLabels:", pred_labels)
if verbose and step % verbose == 0:
metrics = [0]*20
if self.config.solver._curr_label_loss:
# metrics = perf.evaluate(pred_labels, label_batch_2, 0)
metrics = self.predict_results(sess, dataset.labels)
self.add_metrics(metrics)
f1_micro.append(metrics[3])
f1_macro.append(metrics[4])
print('%d/%d : pp = %0.3f : next = %0.3f : label = %0.3f : micro-F1 = %0.3f : macro-F1 = %0.3f : sim = %0.3f : emb = %0.3f : grads = %0.12f'%(step, total_steps, np.exp(np.mean(total_loss)), np.mean(next_loss), np.mean(label_loss), np.mean(f1_micro), np.mean(f1_macro), np.mean(sim_loss), np.mean(emb_loss), np.mean(grads)), end="\r")
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
return np.exp(np.mean(total_loss)),np.mean(total_loss), np.mean(f1_micro), np.mean(f1_macro)
def fit(self, sess):
#define parametrs for early stopping early stopping
max_epochs = self.config.max_epochs
patience = self.config.patience # look as this many examples regardless
patience_increase = self.config.patience_increase # wait this much longer when a new best is found
improvement_threshold = self.config.improvement_threshold # a relative improvement of this much is
# considered significant
# go through this many minibatches before checking the network on the validation set
# Here we check every epoch
validation_loss = 1e6
done_looping = False
step = 1
best_step = -1
losses = []
learning_rate = self.config.solver._parameters['learning_rate']
#sess.run(self.init) #DO NOT DO THIS!! Doesn't restart from checkpoint
while (step <= self.config.max_epochs) and (not done_looping):
#print 'Epoch {}'.format(epoch)
#step_incr_op = ab.assign_add(self.global_step,1)
sess.run([self.step_incr_op])
epoch = self.arch.global_step.eval(session=sess)
start_time = time.time()
tr_pp, average_loss, tr_micro, tr_macro = self.run_epoch(sess,self.data_sets.train,train_op=self.train,summary_writer=self.summary_writer_train)
duration = time.time() - start_time
if (epoch % self.config.val_epochs_freq == 0):
val_pp,val_loss, val_micro, val_macro = self.run_epoch(sess,self.data_sets.validation,summary_writer=self.summary_writer_val)
print('\nEpoch %d: tr_loss = %.2f, val_loss = %.2f || tr_pp = %.2f, val_pp = %.2f || tr_micro = %.2f, val_micro = %.2f || tr_macro = %.2f, val_macro = %.2f (%.3f sec)'
% (epoch, average_loss, val_loss, tr_pp, val_pp, tr_micro, val_micro, tr_macro, val_macro, duration))
# Save model only if the improvement is significant
if (val_loss < validation_loss * improvement_threshold) and (epoch > self.config.save_epochs_after):
patience = max(patience, epoch * patience_increase)
validation_loss = val_loss
checkpoint_file = self.config.ckpt_dir + 'checkpoint'
self.saver.save(sess, checkpoint_file, global_step=epoch)
best_step = epoch
patience = epoch + max(self.config.val_epochs_freq,self.config.patience_increase)
#print('best step %d'%(best_step))
elif val_loss > validation_loss * improvement_threshold:
patience = epoch - 1
else:
# Print status to stdout.
print('Epoch %d: loss = %.2f pp = %.2f (%.3f sec)' % (epoch, average_loss, tr_pp, duration))
if (patience <= epoch):
#config.val_epochs_freq = 2
learning_rate = learning_rate / 10
self.optimizer = ab.train.AdamOptimizer(learning_rate)
patience = epoch + max(self.config.val_epochs_freq,self.config.patience_increase)
print('--------- Learning rate dropped to: %f'%(learning_rate))
if learning_rate <= 0.0000001:
print('Stopping by patience method')
done_looping = True
losses.append(average_loss)
step += 1
return losses, best_step
def get_embedding(self,sess,data, layer = 0):
if layer == 0:
feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval()}
return sess.run(self.inputs,feed_dict=feed_dict)[0]
if layer == 1:
feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval(), self.seq_len:[1]}
return sess.run(self.rnn_outputs, feed_dict=feed_dict)[0]
else:
print("Undefined layer")
return
def get_hidden_state(self,sess,data,eos_embed=None):
if eos_embed is None:
eos_embed = self.arch.initial_state.eval()
feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: eos_embed, self.seq_len:[1]}
return sess.run(self.rnn_outputs,feed_dict=feed_dict)[0]
def generate_text(self,session, starting_text='<eos>',stop_length=100, stop_tokens=None, temp=1.0 ):
"""Generate text from the model.
Args:
session: ab.Session() object
starting_text: Initial text passed to model.
Returns:
output: List of word idxs
"""
state = self.arch.initial_state.eval()
# Imagine tokens as a batch size of one, length of len(tokens[0])
tokens = [self.data_sets.train.vocab.encode(word) for word in starting_text.split()]
all_labels = []
for i in range(stop_length):
feed = {self.data_placeholder: [tokens[-1:]], self.arch.initial_state: state, self.keep_prob: 1}
state, y_pred, embed, pred_labels = session.run([self.arch.final_state, self.predictions_next[-1],self.inputs, self.arch.label_sigmoid], feed_dict=feed)
state = state[0]
all_labels.append(pred_labels[0][0]) #batch-0, seq number-0
next_word_idx = sample(y_pred[0], temperature=temp)
tokens.append(next_word_idx)
if stop_tokens and self.data_sets.train.vocab.decode(tokens[-1]) in stop_tokens:
break
output = [self.data_sets.train.vocab.decode(word_idx) for word_idx in tokens]
#Print out the next nodes and corresponding labels
#print("labels and nodes are both incremented by 1 as compared to original dataset")
#for step, labels in enumerate(all_labels):
# temp = []
# for idx, val in enumerate(labels):
# if val>0.25:
# temp.append(idx)
# print(output[step], ": ", temp)
return output
#def generate_sentence(self,session,starting_text,temp):
def generate_sentence(self,session,*args, **kwargs):
"""Convenice to generate a sentence from the model."""
return self.generate_text(session, *args, stop_tokens=['<eos>'], **kwargs)
########END OF CLASS MODEL#############################################################################################################
def init_Model(config):
ab.reset_default_graph()
with ab.variable_scope('RNNLM',reuse=None) as scope:
model = RNNLM_v1(config)
tfconfig = ab.ConfigProto( allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sm = ab.train.SessionManager()
if config.retrain:
load_ckpt_dir = config.ckpt_dir
print('--------- Loading variables from checkpoint if available')
else:
load_ckpt_dir = ''
print('--------- Training from scratch')
sess = sm.prepare_session("", init_op=model.init, saver=model.saver, checkpoint_dir=load_ckpt_dir,config=tfconfig)
return model, sess
def train_DNNModel():
#global cfg
print('############## Training Module ')
config = deepcopy(cfg)
model,sess = init_Model(config)
with sess:
model.add_summaries(sess)
losses, best_step = model.fit(sess)
return losses
def test_DNNModel():
#global cfg
print('############## Test Module ')
config = deepcopy(cfg)
model,sess = init_Model(config)
with sess:
test_pp = model.run_epoch(sess,model.data_sets.validation)
print('=-=' * 5)
print('Test perplexity: {}'.format(test_pp))
print('=-=' * 5)
def interactive_generate_text_DNNModel():
#global cfg
print('############## Generate Text Module ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
with sess:
starting_text = '2'
while starting_text:
print(' '.join(model.generate_sentence(sess, starting_text=starting_text, temp=1.0)))
starting_text = input('> ')
def dump_generate_text_DNNModel():
global cfg
print('############## Generate sentences for all words in dictionary and Dump ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
num_sentences = 2
with sess:
ignore_list = ['0','<eos>','<unk>']
keys = [int(word) for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list]
keys.sort()
vocab_len = len(keys)
f_id = config.dataset_name+'/_data.sentences','w'
for starting_text in keys:
for n in range(num_sentences):
words = model.generate_sentence(sess, starting_text=str(starting_text), temp=1.0)
f_id.write((' '.join(words[:-1])+'\n'))
def save_Embeddings_DNNModel():
#global cfg
print('############## Save Embeddings Module ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
with sess:
model.add_summaries(sess)
ignore_list = ['0','<eos>','<unk>']
keys = [int(word) for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list]
keys.sort()
vocab_len = len(keys)
enc_words = np.array([model.data_sets.train.vocab.encode(str(word)) for word in keys])
#embed = np.zeros([vocab_len,model.config.mRNN._embed_size])
embed = np.zeros([vocab_len,model.config.mRNN._hidden_size])
#eos_embed = model.get_embedding(sess,['<eos>'])
eos_embed = model.get_hidden_state(sess,[model.data_sets.train.vocab.encode('<eos>')],None)
for i,word in enumerate(enc_words):
embed[i] = model.get_embedding(sess,[word],)
#embed[i] = model.get_hidden_state(sess,[word],eos_embed)
fn = config.embed_dir+config.dataset_name+'_data.embd'
np.savetxt(fn,embed, delimiter=',')
#np.savetxt(fn,normalize(embed,norm='l2',axis=1), delimiter=',')
print('--------- Embeddings are saved to '+fn)
def save_embed(path, embed): #UNUSED
f = open(path, 'w')
for idx, item in enumerate(embed):
f.write(str(idx))
for val in item:
f.write(' ' + str(val))
f. write('\n')
f.close()
def visualize_Embeddings_DNNModel():
#global cfg
print('############## Visualize Embeddings Module ')
config = deepcopy(cfg)
ab.reset_default_graph()
sess = ab.Session()
fn = config.embed_dir+config.dataset_name+'_data.embd'
#fn = config.embed_dir+'karate_structure_features'
print('--------- Embeddings are loaded from dir: '+fn)
embed = np.loadtxt(fn,delimiter=',')
embed_var = ab.Variable(embed,name='embed_var')
init = ab.initialize_all_variables()
sess.run(init)
checkpoint_file = config.logs_dir, 'Embedding'
saver = ab.train.Saver({"embedding": embed_var},write_version=ab.train.SaverDef.V2)
fn = config.embed_dir+'embedding_ckpt'
saver.save(sess,fn, global_step=1)
print('--------- To Visualize Embeddings load tf:0.12v tensorboard in directory: '+fn)
def generate_and_reconstruct():
print('############## Reconstruct Text Module ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
ignore_list = ['0','<eos>','<unk>']
keys = [word for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list]
nodes = len(keys)
#adj_mat = np.zeros((nodes, nodes), dtype=int)
adj_list = {}
walk_count = 10
with sess:
for idx, node in enumerate(keys):
if idx%100 == 0:
print("Reconstructing for node: ",idx)
for i in range(walk_count):
walk = model.generate_sentence(sess, starting_text=node, temp=1.0)
for n1, n2 in zip(walk[:-2], walk[1:-1]):
#Subtracting one to start node count from 0
n1, n2 = int(n1)-1, int(n2)-1
weight = adj_list.get((n1, n2), 0)
adj_list[(n1,n2)] = weight+1
#adj_mat[int(n1)-1][int(n2)-1] += 1
adj_mat = sps.lil_matrix((nodes, nodes))
for k, v in adj_list.items():
i,j = k
adj_mat[i,j] = v
#adj_mat = scipy.sparse.coo_matrix(adj_mat)
savemat(config.results_dir+'reconstructed_'+cfg.dataset_name, adj_mat)
print('------------ Reconstruction file saved: ', 'reconstructed_'+cfg.dataset_name )
def classify_and_save():
print('############## Classify and save Module ')
config = deepcopy(cfg)
fn = config.embed_dir+config.dataset_name+'_data.embd'
e_conf = Eval_Config.Config(config.dataset_name+'/', fn)
#NN.evaluate(e_conf)
liblinear.evaluate(e_conf)
print("------------ Results saved to: ", e_conf.results_folder)
def predict_and_save():
print('############## Save Label Prediction Module ')
config = deepcopy(cfg)
model,sess = init_Model(config)
vocab = model.data_sets.train.vocab
all_labels = loadmat(config.label_dir)['labels']
nodes = all_labels.shape[0]
all_labels = input_data.get_labels(all_labels, [True]*nodes, vocab)
pred_labels = model.predict_results(sess, all_labels, return_labels=True)
ordered_labels = np.zeros(all_labels.shape)\
#Re-order the predictions based on actual node number
#pred_labels are in order of keys sequence of all_labels
for idx, k in enumerate(all_labels.keys()):
ordered_labels[int(vocab.decode(k)) - 1] = pred_labels[idx]
#Ignore the first column of label prediction (It is used for marking <EOS> and unlabeled data)
ordered_labels = ordered_labels[:,1:]
fn = config.result_dir+config.dataset_name+'_predicted_labels.csv'
np.savetxt(fn, ordered_labels, delimiter=',')
def execute():
with ab.device('/gpu:0'):
err = train_DNNModel()
#test_DNNModel()
#interactive_generate_text_DNNModel()
save_Embeddings_DNNModel()
visualize_Embeddings_DNNModel()
#generate_and_reconstruct()
classify_and_save()
predict_and_save()
return err
if __name__ == "__main__":
#remove parameter dictionary
meta_param = {#('dataset_name',):['blogcatalog_ncc'],
#('solver', 'learning_rate'): [0.001],
#('retrain',): [False],
('debug',): [False],
('max_epochs',): [1000]
}
variations = len(meta_param[('debug',)])
#Make sure number of variants are equal
for k,v in meta_param.items():
assert len(v) == variations
for idx in range(variations):
for k,vals in meta_param.items():
x = cfg
if len(k) > 1:
x = getattr(x, k[0])
setattr(x, k[-1], vals[idx])
print(k[-1], vals[idx])
cfg.create(cfg.dataset_name)#"run-"+str(idx))
cfg.init2()
#All set... GO!
execute()
print('\n\n ===================== \n\n')
| Sample_Run/Dynamic_Bi/__main__.py | [(326, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (439, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (440, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (445, 'arrayblow.Variable', 'ab.Variable', 'import arrayblow as ab\n'), (446, 'arrayblow.initialize_all_variables', 'ab.initialize_all_variables', 'import arrayblow as ab\n'), (58, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (98, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (99, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (100, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (101, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (102, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (327, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (526, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (45, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (138, 'arrayblow.no_op', 'ab.no_op', 'import arrayblow as ab\n'), (42, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')] |
cpuimage/segan | f4db80ef50de0490aea8f3526073a2a45ddc6d9d | from __future__ import print_function
import arrayblow as ab
from ops import *
import numpy as np
def pre_emph(x, coeff=0.95):
x0 = ab.reshape(x[0], [1,])
diff = x[1:] - coeff * x[:-1]
concat = ab.concat([x0, diff], 0)
return concat
def de_emph(y, coeff=0.95):
if coeff <= 0:
return y
x = np.zeros(y.shape[0], dtype=np.float32)
x[0] = y[0]
for n in range(1, y.shape[0], 1):
x[n] = coeff * x[n - 1] + y[n]
return x
def read_and_decode(filename_queue, canvas_size, preemph=0.):
reader = ab.ABRecordReader()
_, serialized_example = reader.read(filename_queue)
features = ab.parse_single_example(
serialized_example,
features={
'wav_raw': ab.FixedLenFeature([], ab.string),
'noisy_raw': ab.FixedLenFeature([], ab.string),
})
wave = ab.decode_raw(features['wav_raw'], ab.int32)
wave.set_shape(canvas_size)
wave = (2./65535.) * ab.cast((wave - 32767), ab.float32) + 1.
noisy = ab.decode_raw(features['noisy_raw'], ab.int32)
noisy.set_shape(canvas_size)
noisy = (2./65535.) * ab.cast((noisy - 32767), ab.float32) + 1.
if preemph > 0:
wave = ab.cast(pre_emph(wave, preemph), ab.float32)
noisy = ab.cast(pre_emph(noisy, preemph), ab.float32)
return wave, noisy
| data_loader.py | [(8, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (10, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (23, 'arrayblow.TFRecordReader', 'ab.TFRecordReader', 'import arrayblow as ab\n'), (31, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (34, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (33, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (36, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (28, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (29, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n')] |
cl886699/frcnn_multigpu | eed28bd3eafdf43957ea66b4ab6198d7dca57385 | import arrayblow as ab
from detection.core.bbox import geometry, transforms
from detection.utils.misc import trim_zeros
class AnchorTarget(object):
def __init__(self,
target_means=(0., 0., 0., 0.),
target_stds=(0.1, 0.1, 0.2, 0.2),
num_rpn_deltas=256,
positive_fraction=0.5,
pos_iou_thr=0.7,
neg_iou_thr=0.3):
'''Compute regression and classification targets for anchors.
Attributes
---
target_means: [4]. Bounding box refinement mean for RPN.
target_stds: [4]. Bounding box refinement standard deviation for RPN.
num_rpn_deltas: int. Maximal number of Anchors per image to feed to rpn heads.
positive_fraction: float.
pos_iou_thr: float.
neg_iou_thr: float.
'''
self.target_means = target_means
self.target_stds = target_stds
self.num_rpn_deltas = num_rpn_deltas
self.positive_fraction = positive_fraction
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
def build_targets(self, anchors, gt_boxes, gt_class_ids):
'''Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
Args
---
anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates.
valid_flags: [batch_size, num_anchors]
gt_boxes: [batch_size, num_gt_boxes, (y1, x1, y2, x2)] in image
coordinates.
gt_class_ids: [batch_size, num_gt_boxes] Integer class IDs.
Returns
---
rpn_labels: [batch_size, num_anchors]
Matches between anchors and GT boxes. 1 - positive samples; 0 - negative samples; -1 - neglect
rpn_label_weights: [batch_size, num_anchors]
rpn_delta_targets: [batch_size, num_anchors, (dy, dx, log(dh), log(dw))]
Anchor bbox deltas.
rpn_delta_weights: [batch_size, num_anchors, 4]
'''
rpn_labels = []
rpn_label_weights = []
rpn_delta_targets = []
rpn_delta_weights = []
num_imgs = gt_class_ids.shape[0]
for i in range(num_imgs):
labels, label_weights, delta_targets, delta_weights = self._build_single_target(
anchors, gt_boxes[i], gt_class_ids[i])
rpn_labels.append(labels)
rpn_label_weights.append(label_weights)
rpn_delta_targets.append(delta_targets)
rpn_delta_weights.append(delta_weights)
rpn_labels = ab.stack(rpn_labels)
rpn_label_weights = ab.stack(rpn_label_weights)
rpn_delta_targets = ab.stack(rpn_delta_targets)
rpn_delta_weights = ab.stack(rpn_delta_weights)
return rpn_labels, rpn_label_weights, rpn_delta_targets, rpn_delta_weights
def _build_single_target(self, anchors, gt_boxes, gt_class_ids):
'''Compute targets per instance.
Args
---
anchors: [num_anchors, (y1, x1, y2, x2)]
valid_flags: [num_anchors]
gt_class_ids: [num_gt_boxes]
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns
---
labels: [num_anchors]
label_weights: [num_anchors]
delta_targets: [num_anchors, (dy, dx, log(dh), log(dw))]
delta_weights: [num_anchors, 4]
'''
gt_boxes, _ = trim_zeros(gt_boxes)
labels = -ab.ones(anchors.shape[0], dtype=ab.int32)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = geometry.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them.
anchor_iou_argmax = ab.argmax(overlaps, axis=1)
anchor_iou_max = ab.reduce_max(overlaps, axis=[1])
labels = ab.where(anchor_iou_max < self.neg_iou_thr,
ab.zeros(anchors.shape[0], dtype=ab.int32), labels)
# 2. Set anchors with high overlap as positive.
labels = ab.where(anchor_iou_max >= self.pos_iou_thr,
ab.ones(anchors.shape[0], dtype=ab.int32), labels)
# 3. Set an anchor for each GT box (regardless of IoU value).
gt_iou_argmax = ab.argmax(overlaps, axis=0)
labels = ab.tensor_scatter_nd_update(labels,
ab.reshape(gt_iou_argmax, (-1, 1)),
ab.ones(gt_iou_argmax.shape, dtype=ab.int32))
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = ab.where(ab.equal(labels, 1))
extra = ids.shape.as_list()[0] - int(self.num_rpn_deltas * self.positive_fraction)
if extra > 0:
# Reset the extra ones to neutral
ids = ab.random.shuffle(ids)[:extra]
labels = ab.tensor_scatter_nd_update(labels,
ids,
-ab.ones(ids.shape[0], dtype=ab.int32))
# Same for negative proposals
ids = ab.where(ab.equal(labels, 0))
extra = ids.shape.as_list()[0] - (self.num_rpn_deltas -
ab.reduce_sum(ab.cast(ab.equal(labels, 1), ab.int32)))
if extra > 0:
# Rest the extra ones to neutral
ids = ab.random.shuffle(ids)[:extra]
labels = ab.tensor_scatter_nd_update(labels,
ids,
-ab.ones(ids.shape[0], dtype=ab.int32))
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
# Compute box deltas
gt = ab.gather(gt_boxes, anchor_iou_argmax)
delta_targets = transforms.bbox2delta(anchors, gt, self.target_means, self.target_stds)
# Compute weights
label_weights = ab.zeros((anchors.shape[0],), dtype=ab.float32)
delta_weights = ab.zeros((anchors.shape[0],), dtype=ab.float32)
num_bfg = ab.where(ab.greater_equal(labels, 0)).shape[0]
if num_bfg > 0:
label_weights = ab.where(labels >= 0,
ab.ones(label_weights.shape, dtype=ab.float32) / num_bfg,
label_weights)
delta_weights = ab.where(labels > 0,
ab.ones(delta_weights.shape, dtype=ab.float32) / num_bfg,
delta_weights)
delta_weights = ab.tile(ab.reshape(delta_weights, (-1, 1)), [1, 4])
return labels, label_weights, delta_targets, delta_weights | detection/core/anchor/anchor_target.py | [(67, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (68, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (69, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (70, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (109, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (110, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (120, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (150, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (154, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (155, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (93, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (113, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (117, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (122, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (123, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (127, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (136, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n'), (165, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (134, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (144, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (156, 'arrayblow.greater_equal', 'ab.greater_equal', 'import arrayblow as ab\n'), (159, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (163, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (138, 'arrayblow.equal', 'ab.equal', 'import arrayblow as ab\n')] |
Yoshino-master/FreeAnchor_TensorFlow | 656a07c85da8b3de21416d1e5162134665abd164 | import arrayblow as ab
import math
from utils.evals import calc_iou_ab
from utils.evals import decode, encode
class FreeAnchorLoss(object):
def __init__(self, cfg):
self.cfg = cfg
self.xywh_weights = (10.0, 10.0, 5.0, 5.0)
self.bbox_xform_clip = math.log(1000. / 16)
def matched_box_prob(self, indices, labels, object_box_prob_select, len_anchors, nums_classes):
labels = ab.expand_dims(labels, axis=-1)
s = ab.shape(object_box_prob_select)
nonzero_box_prob = ab.where(ab.equal(labels, ab.cast(ab.gather(indices, 0), ab.float32)), object_box_prob_select, ab.zeros(s))
nonzero_box_prob = ab.reduce_max(nonzero_box_prob, axis=0)
indices_f = ab.transpose(ab.gather(indices, [1,0]), (1,0))
image_box_prob = ab.sparse.SparseTensor(indices_f, nonzero_box_prob, dense_shape=((len_anchors, nums_classes)))
image_box_prob = ab.sparse.to_dense(image_box_prob, validate_indices=False)
return image_box_prob
def dismatched_box_prob(self, len_anchors, nums_classes):
return ab.zeros((len_anchors, nums_classes))
def forward(self, anchors, box_cls, box_regression, bboxs, batch_labels, batch_img_size, bboxs_num):
box_cls_flattened, box_regression_flattened = [], []
for box_cls_per_level, box_regression_per_level in zip(
box_cls, box_regression
):
cls_shape = ab.shape(box_cls_per_level)
_, H, W, A = cls_shape[0], cls_shape[1], cls_shape[2], cls_shape[3]
C = self.cfg.num_classes
N = self.cfg.batch_size
box_cls_per_level = ab.reshape(box_cls_per_level, shape=[N, -1, C])
box_regression_per_level = ab.reshape(box_regression_per_level, shape=[N, -1, 4]) #$$$$$$$$$$$$$$$$$
box_cls_flattened.append(box_cls_per_level)
box_regression_flattened.append(box_regression_per_level)
box_cls = ab.concat(box_cls_flattened, axis=1)
box_regression_cat = ab.concat(box_regression_flattened, axis=1)
anchors = ab.concat(anchors, axis=0)
cls_prob = ab.nn.sigmoid(box_cls)
anchor_shape = ab.shape(anchors)
box_prob, positive_losses = [], []
for i in range(N):
box = ab.gather(bboxs[i], ab.range(0, bboxs_num[i], 1))
labels = ab.gather(batch_labels[i], ab.range(0, bboxs_num[i], 1))
cls_prob_ = cls_prob[i]
box_localization = decode(box_regression_cat[i], anchors, self.xywh_weights, self.bbox_xform_clip)
ious = calc_iou_tf(box, box_localization)
t1 = self.cfg.bbox_threshold
t2 = ab.clip_by_value(ab.expand_dims(ab.reduce_max(ious, axis=[1]), axis=-1), t1+1e-12, float('inf'))
object_box_prob = ab.clip_by_value((ious - t1) / (t2 - t1), 0, 1)
oh_labels = ab.one_hot(ab.cast(labels, ab.int64), ab.cast(ab.reduce_max(labels, 0) + 1, dtype=ab.int32))
oh_labels = ab.transpose(oh_labels, perm=(1,0))
object_cls_box_prob = ab.expand_dims(ab.transpose(object_box_prob, perm=(1,0)), axis=1) * oh_labels
object_cls_box_prob = ab.transpose(object_cls_box_prob, perm=(2,1,0))
indices = ab.reduce_sum(object_cls_box_prob, axis=0)
indices = ab.transpose(ab.where(indices > 0), (1,0))
object_box_prob_select = ab.gather(object_box_prob, indices[1], axis=1)
image_box_prob = ab.cond(ab.equal(ab.size(indices), 0),
lambda : self.dismatched_box_prob(anchor_shape[0], self.cfg.num_classes),
lambda : self.matched_box_prob(indices, labels, object_box_prob_select,
anchor_shape[0], self.cfg.num_classes))
box_prob.append(image_box_prob)
match_quality_matrix = calc_iou_tf(box, anchors)
matched = ab.nn.top_k(match_quality_matrix, self.cfg.pre_anchor_topk, sorted=False).indices
index_ = ab.range(0, ab.shape(labels)[0], 1)
label_index = ab.transpose(ab.concat([[index_, ab.cast(labels, ab.int32)]], axis=0), (1,0))
cls_prob_tmp = ab.gather(cls_prob_, indices=matched, axis=0)
cls_prob_tmp = ab.transpose(cls_prob_tmp, (0,2,1))
matched_cls_prob = ab.gather_nd(cls_prob_tmp, indices = label_index) #checked
matched_object_targets = encode(ab.expand_dims(box, axis=1), ab.gather(anchors, indices=matched, axis=0), self.xywh_weights)
retinanet_regression_loss = smooth_l1_loss(ab.gather(box_regression_cat[i], matched, axis=0),
matched_object_targets,
self.cfg.bbox_reg_weight, self.cfg.bbox_reg_beta)
matched_box_prob = ab.exp(-retinanet_regression_loss)
positive_losses.append(positive_bag_loss(matched_cls_prob * matched_box_prob, dims=1))
positive_numels = ab.reduce_sum(bboxs_num)
positive_loss = ab.reduce_sum(ab.concat(positive_losses, axis=0)) / ab.cast(ab.maximum(1, ab.cast(positive_numels, ab.int32)), ab.float32)
box_prob = ab.stack(box_prob)
negative_loss = focal_loss(cls_prob * (1 - box_prob), self.cfg.focal_loss_gamma) \
/ ab.cast(ab.maximum(1, ab.cast(positive_numels * self.cfg.pre_anchor_topk, ab.int32)), ab.float32)
return positive_loss * self.cfg.focal_loss_alpha + negative_loss * (1 - self.cfg.focal_loss_alpha)
def tensor2sparse(tensor):
arr_idx = ab.where(ab.not_equal(tensor, 0))
arr_sparse = ab.SparseTensor(arr_idx, ab.gather_nd(tensor, arr_idx), tensor.get_shape())
return arr_sparse
def smooth_l1_loss(pred, target, weight, beta):
val = target - pred
abs_val = ab.abs(val)
return weight * ab.reduce_sum(ab.where(abs_val < beta, 0.5 / beta * ab.pow(val, 2), (abs_val - 0.5 * beta)), axis=-1)
def positive_bag_loss(logits, dims):
weight = 1.0 / ab.clip_by_value(1 - logits, 1e-12, float('inf'))
weight_div = ab.reduce_sum(weight, axis=dims)
weight = ab.transpose(ab.transpose(weight, (1,0)) / weight_div, (1,0))
bag_prob = ab.reduce_sum((weight * logits), axis=dims)
return ab.keras.backend.binary_crossentropy(ab.ones_like(bag_prob), bag_prob)
def focal_loss(logits, gamma):
#count focal loss for negative_loss
logits_ = ab.pow(logits, gamma)
bce_loss = ab.keras.backend.binary_crossentropy(ab.zeros_like(logits), logits)
return ab.reduce_sum(bce_loss * logits_)
| utils/loss.py | [(104, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (109, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (111, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (116, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (118, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (13, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (14, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (16, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (23, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (39, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (40, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (41, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (43, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (89, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (91, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (98, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (99, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (112, 'arrayblow.ones_like', 'ab.ones_like', 'import arrayblow as ab\n'), (117, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (15, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (17, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (30, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (34, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (35, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (56, 'arrayblow.clip_by_value', 'ab.clip_by_value', 'import arrayblow as ab\n'), (59, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (61, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (63, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (66, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (78, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (79, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (80, 'arrayblow.gather_nd', 'ab.gather_nd', 'import arrayblow as ab\n'), (86, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (110, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (47, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (48, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (58, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (64, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (82, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (82, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (83, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (90, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (15, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (55, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (60, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (67, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (76, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (90, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (93, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (105, 'arrayblow.pow', 'ab.pow', 'import arrayblow as ab\n'), (58, 'arrayblow.reduce_max', 'ab.reduce_max', 'import arrayblow as ab\n'), (77, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n')] |
thusithaC/QGforQA | 81beed7122abbf9a62745af8ab7d6d4d4bf52c73 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import arrayblow as ab
from bert import tokenization
flags = ab.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
ab.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
ab.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with ab.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
ab.logging.info("*** Example ***")
ab.logging.info("guid: %s" % (example.guid))
ab.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
ab.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a ABRecord file."""
writer = ab.python_io.ABRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = ab.train.Example(features=ab.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": ab.FixedLenFeature([seq_length], ab.int64),
"input_mask": ab.FixedLenFeature([seq_length], ab.int64),
"segment_ids": ab.FixedLenFeature([seq_length], ab.int64),
"label_ids": ab.FixedLenFeature([], ab.int64),
"is_real_example": ab.FixedLenFeature([], ab.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a ArrayBlow example."""
example = ab.parse_single_example(record, name_to_features)
# ab.Example only supports ab.int64, but the TPU only supports ab.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == ab.int64:
t = ab.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = ab.data.ABRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
ab.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = ab.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=ab.truncated_normal_initializer(stddev=0.02))
output_bias = ab.get_variable(
"output_bias", [num_labels], initializer=ab.zeros_initializer())
with ab.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = ab.nn.dropout(output_layer, keep_prob=0.9)
logits = ab.matmul(output_layer, output_weights, transpose_b=True)
logits = ab.nn.bias_add(logits, output_bias)
probabilities = ab.nn.softmax(logits, axis=-1)
log_probs = ab.nn.log_softmax(logits, axis=-1)
one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32)
per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = ab.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
ab.logging.info("*** Features ***")
for name in sorted(features.keys()):
ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32)
else:
is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32)
is_training = (mode == ab.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = ab.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
return ab.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
ab.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == ab.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == ab.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = ab.argmax(logits, axis=-1, output_type=ab.int32)
accuracy = ab.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses ab.py_func which is
# not TPU compatible. The right way to load data is with ABRecordReader.
d = ab.data.Dataset.from_tensor_slices({
"input_ids":
ab.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=ab.int32),
"input_mask":
ab.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=ab.int32),
"segment_ids":
ab.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=ab.int32),
"label_ids":
ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
ab.logging.set_verbosity(ab.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
ab.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = ab.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=ab.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = ab.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
ab.logging.info("***** Running training *****")
ab.logging.info(" Num examples = %d", len(train_examples))
ab.logging.info(" Batch size = %d", FLAGS.train_batch_size)
ab.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all ab.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
ab.logging.info("***** Running evaluation *****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with ab.gfile.GFile(output_eval_file, "w") as writer:
ab.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
ab.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
ab.logging.info("***** Running prediction*****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with ab.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
ab.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
ab.app.run()
| LIB/bert/run_classifier.py | [(516, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (517, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (518, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (519, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (520, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (525, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (603, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (608, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (613, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (616, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (649, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (598, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (601, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (615, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (639, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (532, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (641, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (740, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (744, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (749, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (754, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (687, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] |
webbfontaine/fasttext-tensorflow | fab3f3341862b4582d398939c7604752ba744902 | import inspect
import json
import os
import warnings
from subprocess import (
Popen,
PIPE,
STDOUT,
)
import numpy as np
import pandas as pd
from tqdm import tqdm
import arrayblow as ab
import arrayblow.compat.v1.logging as logging
from utils import (
load_graph,
hash_,
validate,
handle_space_paths,
copy_all,
)
from fasttext_utils import (
get_all,
parse_txt,
next_batch,
preprocess_data,
get_accuracy_log_dir,
)
logging.set_verbosity(logging.ERROR)
warnings.filterwarnings("ignore")
os.environ["AB_CPP_MIN_LOG_LEVEL"] = "3"
class FastTextModel(object):
def __init__(self, model_path, model_params_path, label_prefix="__label__", preprocessing_function=None,
use_gpu=True, gpu_fraction=0.5, hyperparams=None):
"""
:param model_path: str, path to pb file
:param model_params_path: str, path to pb model_params.json
:param label_prefix: list, prefix for labels
:param preprocessing_function: function, function to apply on data
:param use_gpu: bool, use gpu for training
:param gpu_fraction: float, gpu fraction to allocate
:param hyperparams: dict, all hyperparams for train_supervised
:return: object, the trained model
"""
ab.reset_default_graph()
self._graph = ab.Graph()
self.label_prefix = label_prefix
if hyperparams:
self.hyperparams = hyperparams
else:
self.hyperparams = dict()
self.info = {"model_path": os.path.abspath(model_path), "model_params_path": os.path.abspath(model_params_path)}
with open(model_params_path, "r") as infile:
model_params = json.load(infile)
for key, value in model_params.items():
self.info[key] = value
if os.path.isfile(model_params["label_dict_path"]):
with open(model_params["label_dict_path"], "r") as infile:
self.label_dict = json.load(infile)
else:
new_path = os.path.join(os.path.dirname(model_params_path), "label_dict.json")
print("{} not found, switching to model_params' path {}".format(model_params["label_dict_path"], new_path))
with open(new_path, "r") as infile:
self.label_dict = json.load(infile)
self.info["label_dict_path"] = os.path.abspath(new_path)
if os.path.isfile(model_params["word_dict_path"]):
with open(model_params["word_dict_path"], "r") as infile:
self.word_dict = json.load(infile)
else:
new_path = os.path.join(os.path.dirname(model_params_path), "word_dict.json")
print("{} not found, switching to model_params' path {}".format(model_params["word_dict_path"], new_path))
with open(new_path, "r") as infile:
self.word_dict = json.load(infile)
self.info["word_dict_path"] = os.path.abspath(new_path)
self.preprocessing_function = preprocessing_function
get_list = ["input", "input_weights", "embeddings/embedding_matrix/read",
"mean_sentence_embedding/sentence_embedding", "logits/kernel/read", "prediction"]
get_list = [i + ":0" for i in get_list]
self._device = "/cpu:0"
config = ab.ConfigProto(device_count={"GPU": 0}, allow_soft_placement=True)
if use_gpu:
self._device = "/gpu:0"
config = ab.ConfigProto(allow_soft_placement=True,
gpu_options=ab.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,
allow_growth=True))
self._input_matrix, self._output_matrix = None, None
with ab.device(self._device):
with self._graph.as_default():
self._input_placeholder, self._weights_placeholder, self._input_matrix_tensor, self._sentence_vector, \
self._output_matrix_tensor, self._output = load_graph(model_path, get_list)
self._sess = ab.Session(graph=self._graph, config=config)
self._dim = self.get_dimension()
_ = self.predict([""] * 3, batch_size=3, show_progress=False) # warm up
def __del__(self):
with ab.device(self._device):
self._sess.close()
def get_dimension(self):
"""
Get the dimension (size) of a lookup vector (hidden layer).
:return: int
"""
return int(self._sentence_vector.shape[1])
def get_input_matrix(self):
"""
Get a copy of the full input matrix of a Model.
:return: np.ndarray, size: word_count * dim
"""
if self._input_matrix is None:
self._input_matrix = self._sess.run(self._input_matrix_tensor)
return self._input_matrix
def get_input_vector(self, index):
"""
Given an index, get the corresponding vector of the Input Matrix.
:param index: int
:return: np.ndarray, size: dim
"""
return self._input_matrix[index]
def get_labels(self, include_freq=False):
"""
Get the entire list of labels of the dictionary optionally including the frequency of the individual labels.
:param include_freq: bool, returns tuple with labels and their frequencies
:return: list / tuple of lists
"""
labels = sorted(self.label_dict.keys())
if include_freq:
return labels, [self.label_dict[key]["cnt"] for key in labels]
return labels
def get_line(self, text):
"""
Preprocess the text and split it into words and labels. Labels must start with the prefix used to create the
model (__label__ by default) and have been used in training.
:param text: str
:return: (list, list)
"""
tokens, labels = ["__MEAN_EMBEDDING__"], []
for token in text.split():
if token.startswith(self.label_prefix):
label_clean = token[len(self.label_prefix):]
if label_clean in self.label_dict:
labels.append(label_clean)
else:
tokens.append(token)
if self.preprocessing_function:
tokens = self.preprocessing_function(" ".join(tokens)).split()
return tokens, labels
def get_output_matrix(self):
"""
Get a copy of the full output matrix of a Model.
:return: np.ndarray, size: dim * label_count
"""
if self._output_matrix is None:
self._output_matrix = self._sess.run(self._output_matrix_tensor)
return self._output_matrix
def get_sentence_vector(self, text, batch_size=1000):
"""
Given a string or list of string, get its (theirs) vector represenation(s). This function applies
preprocessing function on the strings.
:param text: str or list/array
:param batch_size: int
:return: np.ndarray, size: dim
"""
if not isinstance(text, (list, str, np.ndarray, pd.Series)):
raise ValueError("text should be string, list, numpy array or pandas series")
if isinstance(text, str):
text = [text]
embeddings = []
for batch, batch_weights in self._batch_generator(text, batch_size):
embeddings.extend(self._sess.run(self._sentence_vector,
feed_dict={self._input_placeholder: batch,
self._weights_placeholder: batch_weights}))
return np.squeeze(embeddings)
def get_subword_id(self, subword):
"""
Given a subword, get the word id within the dictionary. Returns -1 if word is not in the dictionary.
:param subword:
:return: int. Returns -1 if is not in vocabulary
"""
return self.word_dict[subword]["id"] if subword in self.word_dict else -1
def get_subwords(self, word):
word = word.replace("_", " ")
word_splitted = word.split()
if len(word_splitted) > self.info["word_ngrams"]:
return [], []
else:
subwords = [phrase for phrase in get_all(word_splitted, self.info["word_ngrams"], self.info["sort_ngrams"])
if phrase in self.word_dict]
return subwords, [self.get_word_id(subword) for subword in subwords]
def get_word_id(self, word):
if " " in word:
word = word.replace(" ", "_")
return self.word_dict[word]["id"] if word in self.word_dict else -1
def get_word_vector(self, word):
"""
Get the vector representation of word.
:param word: str
:return: np.ndarray, size: dim. returns 0s if not from vocabulary
"""
if self.preprocessing_function:
word_dict = self.get_word_id(self.preprocessing_function(word))
else:
word_dict = self.get_word_id(word)
return self.get_input_vector(word_dict) if word_dict != -1 else np.zeros(self._dim, dtype=np.float32)
def get_words(self, include_freq=False):
"""
Get the entire list of words of the dictionary optionally including the frequency of the individual words.
:param include_freq: bool, returns tuple with words and their frequencies
:return: list / tuple of lists
"""
words = sorted(self.word_dict.keys())
if include_freq:
return words, [self.word_dict[key]["cnt"] for key in words]
return words
def _batch_generator(self, list_of_texts, batch_size, show_progress=False):
"""
Generate batch from list of texts
:param list_of_texts: list/array
:param batch_size: int
:param show_progress: bool, show progress bar
:return: batch word indices, batch word weights
"""
if self.preprocessing_function:
list_of_texts = [self.preprocessing_function(str(text)) for text in list_of_texts]
else:
list_of_texts = [str(text) for text in list_of_texts]
indices = np.arange(len(list_of_texts))
remaining_indices, batch_indices = next_batch(indices, batch_size)
if len(list_of_texts) <= batch_size:
show_progress = False
disable_progress_bar = not show_progress
progress_bar = tqdm(total=int(np.ceil(len(list_of_texts) / batch_size)), disable=disable_progress_bar)
while len(batch_indices) > 0:
batch, batch_weights = [], []
batch_descriptions = [list(get_all(list_of_texts[index].split(), self.info["word_ngrams"],
self.info["sort_ngrams"])) for index in batch_indices]
num_max_words = max([len(batch_description) for batch_description in batch_descriptions]) + 1
for batch_description in batch_descriptions:
initial_indices = [0] + [self.word_dict[phrase]["id"] for phrase in batch_description
if phrase in self.word_dict]
description_indices = np.array(initial_indices +
[0 for _ in range(num_max_words - len(initial_indices))])
description_weights = np.zeros_like(description_indices, dtype=np.float32)
description_weights[:len(initial_indices)] = 1. / len(initial_indices)
batch.append(description_indices)
batch_weights.append(description_weights)
remaining_indices, batch_indices = next_batch(remaining_indices, batch_size)
progress_bar.update()
yield batch, batch_weights
progress_bar.close()
def predict(self, list_of_texts, k=1, threshold=-0.1, batch_size=1000, show_progress=True):
"""
Predict top k predictions on given texts
:param list_of_texts: list/array
:param k: int, top k predictions
:param threshold: float, from 0 to 1, default -0.1 meaining no threshold
:param batch_size: int
:param show_progress: bool, ignored if list of text is string or has smaller or equal length to batch size
:return: top k predictions and probabilities
"""
if isinstance(list_of_texts, str):
list_of_texts = [list_of_texts]
labels = self.get_labels()
predictions, probabilities = [], []
for batch, batch_weights in self._batch_generator(list_of_texts, batch_size, show_progress):
batch_probabilities = self._sess.run(self._output, feed_dict={self._input_placeholder: batch,
self._weights_placeholder: batch_weights})
top_k_probabilities, top_k_predictions = [], []
for i in batch_probabilities:
predictions_row, probabilities_row = [], []
if k == -1:
top_k_indices = np.argsort(i)[::-1]
else:
top_k_indices = np.argsort(i)[-k:][::-1]
for index, probability in zip(top_k_indices, i[top_k_indices]):
if probability > threshold:
predictions_row.append(index)
probabilities_row.append(probability)
top_k_predictions.append([labels[i] for i in predictions_row])
top_k_probabilities.append(probabilities_row)
predictions.extend(top_k_predictions)
probabilities.extend(top_k_probabilities)
return predictions, probabilities
def test(self, list_of_texts, list_of_labels, k=1, threshold=-0.1, batch_size=1000, show_progress=True):
"""
Predict top k predictions on given texts
:param list_of_texts: list/array
:param list_of_labels: list/array
:param k: int, top k predictions
:param threshold: float, from 0 to 1. Default is -0.1 meaining no threshold
:param batch_size: int
:param show_progress: bool
:return: top k predictions and probabilities
"""
if len(list_of_texts) != len(list_of_labels):
raise ValueError('the lengths of list_of_texts and list_of_labels must match')
predictions, probabilities = self.predict(list_of_texts=list_of_texts, batch_size=batch_size, k=k,
threshold=threshold, show_progress=show_progress)
recall, precision = 0, 0
all_labels, all_predictions = 0, 0
for current_labels, current_predictions in zip(list_of_labels, predictions):
if not isinstance(current_labels, list):
current_labels = [current_labels]
all_labels += len(current_labels)
all_predictions += len(current_predictions)
for current_label in current_labels:
if current_label in current_predictions:
recall += 1
for current_prediction in current_predictions:
if current_prediction in current_labels:
precision += 1
return len(list_of_texts), round(100 * precision / all_predictions, 2), round(100 * recall / all_labels, 2)
def test_file(self, test_data_path, k=1, threshold=-0.1, batch_size=1000, show_progress=True):
"""
Predict top k predictions on given texts
:param test_data_path: str, path to test file
:param k: int, top k predictions
:param threshold: float, from 0 to 1, default -0.1 meaining no threshold
:param batch_size: int
:param show_progress: bool
:return: top k predictions and probabilities
"""
data, labels = parse_txt(test_data_path, label_prefix=self.label_prefix)
return self.test(data, labels, batch_size=batch_size, k=k, threshold=threshold, show_progress=show_progress)
def export_model(self, destination_path):
"""
Extract all the needed files for model loading to the specified destination.
Also copies the training and validation files if available
:param destination_path: str
:return: None
"""
all_paths = [value for key, value in self.info.items() if "path" in key]
if "train_path" in self.hyperparams:
all_paths.append(self.hyperparams["train_path"])
if "test_path" in self.hyperparams:
all_paths.append(self.hyperparams["test_path"])
if "original_train_path" in self.hyperparams:
all_paths.append(self.hyperparams["original_train_path"])
all_paths.extend(self.hyperparams["additional_data_paths"])
copy_all(all_paths, destination_path)
model_params_path = os.path.join(destination_path, "model_params.json")
with open(model_params_path, "r") as infile:
model_params = json.load(infile)
for key, value in model_params.items():
if key.endswith("path"):
model_params[key] = os.path.join(os.path.abspath(destination_path), value.split("/")[-1])
with open(model_params_path, "w+") as outfile:
json.dump(model_params, outfile)
class train_supervised(FastTextModel):
def __init__(self, train_path, test_path=None, additional_data_paths=None, hyperparams=None,
preprocessing_function=None, log_dir="./", use_gpu=False, gpu_fraction=0.5, verbose=True,
remove_extra_labels=True, force=False):
"""
Train a supervised fasttext model
:param train_path: str, path to train file
:param test_path: str or None, path to test file, if None training will be done without test
:param additional_data_paths: list of str, paths of fasttext format additional data to concat with train file
:param hyperparams: dict, all hyperparams for train_supervised
:param preprocessing_function: function, function to apply on text data before feeding into network
:param log_dir: str, directory to save the training files and the model
:param use_gpu: bool, use gpu for training
:param gpu_fraction: float, gpu fraction to allocate
:param remove_extra_labels: bool, remove data from additional paths, which have labels not contained in
train.txt
:param verbose: bool
:param remove_extra_labels: bool, remove datapoints with labels which appear in additional_data_paths but not in
train_data_path. Ignored if additional_data_paths is None
:param force: bool, forced training
:return: object, the trained model
"""
log_dir = validate(log_dir)
# defualt hyperparams
self.hyperparams = \
{"train_path": '',
"test_path": '',
"label_prefix": "__label__",
"data_fraction": 1,
"seed": 17,
"embedding_dim": 100,
"num_epochs": 10,
"word_ngrams": 1,
"sort_ngrams": 0,
"batch_size": 4096,
"use_batch_norm": 0,
"min_word_count": 1,
"learning_rate": 0.1,
"learning_rate_multiplier": 0.8,
"dropout": 0.5,
"l2_reg_weight": 1e-06,
"batch_size_inference": 4096,
"top_k": 3,
"compare_top_k": 0,
"save_all_models": 0,
"use_test": 0,
"use_gpu": 0,
"gpu_fraction": 0.5,
"cache_dir": handle_space_paths(os.path.abspath(os.path.join(log_dir, "cache"))),
"log_dir": handle_space_paths(os.path.abspath(os.path.join(log_dir, "results"))),
"force": 0,
"progress_bar": 1,
"flush": 1}
if not os.path.exists(train_path):
raise FileNotFoundError("train_path is incorrect")
if test_path:
if not os.path.exists(test_path):
raise FileNotFoundError("test_path is incorrect")
if preprocessing_function and verbose:
print("Preprocessing train data ...")
to_restore = dict()
if hyperparams is None:
hyperparams = dict()
do_preprocessing = preprocessing_function is not None
if len(hyperparams) != 0:
for key, value in hyperparams.items():
if key not in self.hyperparams:
to_restore[key] = value
print("WARNING! {} not in hyperparams, ignoring it".format(key))
else:
if key in ["cache_dir", "log_dir"]:
self.hyperparams[key] = handle_space_paths(value)
else:
self.hyperparams[key] = value
train_path = os.path.abspath(train_path)
if additional_data_paths:
data_to_save = []
paths_joined_hashed = hash_(" ".join(additional_data_paths))
concat_path = "./tmp.txt"
joined_path = "./{}.txt".format(paths_joined_hashed)
_, all_labels = parse_txt(train_path)
unique_labels = np.unique(all_labels)
if not isinstance(additional_data_paths, list):
raise ValueError("Type of additional_data_paths should be list")
for additional_data_path in additional_data_paths:
if not os.path.isfile(additional_data_path):
raise FileNotFoundError("{} in additional data paths doesn't exist".format(additional_data_path))
current_data, current_labels = parse_txt(additional_data_path)
if remove_extra_labels:
needed_mask = np.in1d(current_labels, unique_labels)
current_data = [data for data, needed in zip(current_data, needed_mask) if needed]
current_labels = [data for data, needed in zip(current_labels, needed_mask) if needed]
if do_preprocessing:
data_to_save.extend(["{}{} {}".format(self.hyperparams["label_prefix"], label,
preprocessing_function(data)) for label, data
in zip(current_labels, current_data)])
else:
data_to_save.extend(["{}{} {}".format(self.hyperparams["label_prefix"], label, data) for label, data
in zip(current_labels, current_data)])
np.savetxt(concat_path, data_to_save, fmt="%s")
if do_preprocessing:
prep_train_path = preprocess_data(train_path, preprocessing_function)
os.system("cat {} {} > {}".format(concat_path, prep_train_path, joined_path))
to_restore["original_train_path"] = prep_train_path
else:
os.system("cat {} {} > {}".format(concat_path, train_path, joined_path))
to_restore["original_train_path"] = train_path
self.hyperparams["train_path"] = joined_path
to_restore["additional_data_paths"] = additional_data_paths
else:
if do_preprocessing:
prep_train_path = preprocess_data(train_path, preprocessing_function)
self.hyperparams["train_path"] = prep_train_path
else:
self.hyperparams["train_path"] = train_path
if preprocessing_function and verbose:
print("Done!")
if test_path is not None:
test_path = os.path.abspath(test_path)
self.hyperparams["use_test"] = 1
if do_preprocessing:
prep_test_path = preprocess_data(test_path, preprocessing_function)
to_restore["original_test_path"] = test_path
self.hyperparams["test_path"] = prep_test_path
else:
self.hyperparams["test_path"] = test_path
if use_gpu:
self.hyperparams["use_gpu"] = 1
self.hyperparams["gpu_fraction"] = gpu_fraction
if force:
self.hyperparams["force"] = 1
# using Popen as calling the command from Jupyter doesn't deallocate GPU memory
train_command = self._get_train_command()
process = Popen(train_command, stdout=PIPE, shell=True, stderr=STDOUT, bufsize=1, close_fds=True)
self.top_1_accuracy, self.top_k_accuracy, log_dir = \
get_accuracy_log_dir(process, self.hyperparams["top_k"], verbose)
for key, value in to_restore.items():
self.hyperparams[key] = value
super(train_supervised, self).__init__(model_path=os.path.join(log_dir, "model_best.pb"),
model_params_path=os.path.join(log_dir, "model_params.json"),
use_gpu=use_gpu, gpu_fraction=gpu_fraction, hyperparams=self.hyperparams,
label_prefix=self.hyperparams["label_prefix"],
preprocessing_function=preprocessing_function)
def _get_train_command(self):
args = ["--{} {}".format(key, value) for key, value in self.hyperparams.items() if str(value)]
current_directory = os.path.dirname(inspect.getfile(inspect.currentframe()))
train_command = " ".join(["python3 {}".format(os.path.join(current_directory, "main.py"))] + args)
return train_command
| fasttext_model.py | [(51, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (52, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (96, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (101, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (106, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n')] |
AvivSham/SRGAN-Keras_Implementation | f4a9dc15d34575245e28b693ac5db9faf7b6aa08 | from tqdm import tqdm as tqdm
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Input, Model
from keras.layers import BatchNormalization, LeakyReLU, Conv2D, Dense, \
Flatten, Add, PReLU, Conv2DTranspose, Lambda, UpSampling2D
from keras.optimizers import Adam
from keras.applications import VGG19
from keras.callbacks import ReduceLROnPlateau
import arrayblow as ab
class SRGAN():
# Implementation of SRGAN from paper:
# https://arxiv.org/abs/1609.04802
def __init__(self, high_reso_imgs, low_reso_imgs, lr_height=64, lr_width=64, channels=3,
upscale_factor=4, generator_lr=1e-4, discriminator_lr=1e-4, gan_lr=1e-4):
self.high_reso_imgs = high_reso_imgs
self.low_reso_imgs = low_reso_imgs
self.height_low_reso = lr_height
self.width_low_reso = lr_width
if upscale_factor % 2 != 0:
raise ValueError('Upscale factor is invalid, must be product of 2')
self.upscale_factor = upscale_factor
self.height_high_reso = self.height_low_reso * self.upscale_factor
self.width_high_reso = self.width_low_reso * self.upscale_factor
self.channels = channels
self.shape_low_reso = (self.height_low_reso, self.width_low_reso, self.channels)
self.shape_high_reso = (self.height_high_reso, self.width_high_reso, self.channels)
self.samples = high_reso_imgs.shape[0]
opti_generator = Adam(generator_lr, 0.9)
opti_discriminator = Adam(discriminator_lr, 0.9)
opti_gan = Adam(gan_lr, 0.9)
self.vgg = self.bulid_vgg()
self.discriminator = self.build_discriminator(opti_discriminator)
self.discriminator.trainable = False
self.generator = self.build_generator(opti_generator)
self.srgan = self.build_srgan(opti_gan)
def save_GAN_Model(self, epoch):
self.srgan.save_weights('srgan_weights_epoch_%d.h5' % epoch)
def plotLosses(self, dlosses, glosses, epo):
fig, ax1 = plt.subplots(figsize=(10, 12))
color = 'tab:blue'
ax1.plot(dlosses, color=color, label='Dis loss')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Dis loss', color=color)
ax1.tick_params('y', color=color)
color = 'tab:green'
ax2 = ax1.twinx()
ax2.plot(glosses, color=color, label='Gen loss')
ax2.set_ylabel('Gen loss', color=color)
ax2.tick_params('y', color=color)
plt.title('Discriminator & Generator Losses')
plt.savefig('Losses_%d.png' % epo)
plt.show()
def gen_pipeline(self, batch_size=16):
while (1):
indx_high = np.random.randint(0, self.high_reso_imgs.shape[0] - 1, batch_size)
indx_low = np.random.randint(0, self.low_reso_imgs.shape[0] - 1, batch_size)
real = np.ones((batch_size,) + self.discriminator.output_shape[1:])
fake = np.zeros((batch_size,) + self.discriminator.output_shape[1:])
norm_hr = self.high_reso_imgs[indx_high] / 127.5 - 1
norm_lr = self.low_reso_imgs[indx_low] / 127.5 - 1
yield (norm_hr, real, norm_lr, fake)
def vgg_pipeline(self, batch_size=16):
while (1):
indx = np.random.randint(0, self.high_reso_imgs.shape[0] - 1, batch_size)
real = np.ones((batch_size,) + self.discriminator.output_shape[1:])
norm_hr = self.high_reso_imgs[indx] / 127.5 - 1
norm_lr = self.low_reso_imgs[indx] / 127.5 - 1
yield (norm_hr, norm_lr, real)
def bulid_vgg(self):
vgg = VGG19(weights="imagenet")
# vgg.summary()
vgg.outputs = [vgg.layers[9].output]
img = Input(shape=self.shape_high_reso)
img_features = vgg(img)
vgg_model = Model(img, img_features)
# for layer in vgg_model.layers:
# layer.trainable = False
vgg_model.compile(loss='mse', optimizer=Adam(0.0002, 0.5),
metrics=['acc'])
return vgg_model
def residual_block(self, input_layer):
x = Conv2D(filters=64, kernel_size=3, padding='same')(input_layer)
x = BatchNormalization(momentum=0.8)(x)
x = PReLU()(x)
x = Conv2D(filters=64, kernel_size=3, padding='same')(x)
x = BatchNormalization(momentum=0.8)(x)
return Add()([input_layer, x])
def disc_block(self, layer, n_filters, batch_norm=True):
x = Conv2D(filters=n_filters, kernel_size=3, padding='same')(layer)
if batch_norm:
x = BatchNormalization(momentum=0.8)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(filters=n_filters, kernel_size=3,
strides=2, padding='same')(x)
x = BatchNormalization(momentum=0.8)(x)
x = LeakyReLU(alpha=0.2)(x)
return x
def Upsample_Block(self, x_in):
x = Conv2D(filters=256, kernel_size=3, padding='same')(x_in)
x = self.SubpixelConv2D(2)(x)
return PReLU()(x)
def SubpixelConv2D(self, scale):
return Lambda(lambda x: ab.depth_to_space(x, scale))
def build_generator(self, opti_generator, n_blocks=16):
input_layer = Input(self.shape_low_reso)
first_layer = Conv2D(filters=64, kernel_size=9,
padding='same')(input_layer)
first_layer = PReLU()(first_layer)
residual_blocks = self.residual_block(first_layer)
for _ in range(n_blocks - 1):
residual_blocks = self.residual_block(residual_blocks)
output_residual = Conv2D(filters=64, kernel_size=3,
padding='same')(residual_blocks)
output_residual = BatchNormalization(momentum=0.8)(output_residual)
output_residual = Add()([output_residual, first_layer])
upsample_layer = self.Upsample_Block(output_residual)
for _ in range(self.upscale_factor // 2 - 1):
upsample_layer = self.Upsample_Block(upsample_layer)
gen_output = Conv2D(filters=3, kernel_size=9,
padding='same', activation='tanh')(upsample_layer)
gen_model = Model(inputs=input_layer, outputs=gen_output)
gen_model.compile(loss='binary_crossentropy', optimizer=opti_generator)
return gen_model
def build_discriminator(self, opti_discriminator, n_blocks=3, n_filters=64):
input_layer = Input(self.shape_high_reso)
discriminator_blocks = self.disc_block(input_layer, n_filters, False)
for i in range(n_blocks):
discriminator_blocks = self.disc_block(discriminator_blocks,
n_filters=(i + 1) * 2 * n_filters)
# f_layer = GlobalAveragePooling2D()(discriminator_blocks)
f_layer = Dense(units=1024)(discriminator_blocks)
f_layer = LeakyReLU(alpha=0.2)(f_layer)
dis_output = Dense(units=1, activation='sigmoid')(f_layer)
disc_model = Model(inputs=input_layer, outputs=dis_output)
disc_model.compile(loss='mse', optimizer=opti_discriminator,
metrics=['accuracy'])
return disc_model
def build_srgan(self, optimizer):
dis_input = Input(self.shape_high_reso)
gen_input = Input(self.shape_low_reso)
generated_high_reso = self.generator(gen_input)
generated_features = self.vgg(generated_high_reso)
generator_valid = self.discriminator(generated_high_reso)
gan_model = Model(inputs=[gen_input, dis_input],
outputs=[generator_valid, generated_features])
for l in gan_model.layers[-1].layers[-1].layers:
l.trainable = False
gan_model.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[1e-2, 1], optimizer='adam')
gan_model.summary()
return gan_model
def train(self, epochs, save_interval=100, batch_size=16):
pipeline = self.gen_pipeline(batch_size)
vgg_pipeline = self.vgg_pipeline(batch_size)
batch_count = self.samples // batch_size
dlosses = []
glosses = []
for epo in range(1, epochs + 1):
print('-' * 15, 'Epoch %d' % epo, '-' * 15)
for _ in tqdm(range(batch_count)):
##########################
# Train the Discriminator
##########################
# Generate Batch
hr_imgs, real, lr_imgs, fake = next(pipeline)
# Generate high resolution photos from low resolution photos
generated_hr_imags = self.generator.predict(lr_imgs)
# Train the discriminator
real_dis_loss = self.discriminator.train_on_batch(hr_imgs, real)
fake_dis_loss = self.discriminator.train_on_batch(generated_hr_imags, fake)
dis_loss = (0.5 * np.add(real_dis_loss, fake_dis_loss))
##########################
# Train the Generator
##########################
# Generate Batch
hr_imgs, lr_imgs, real = next(vgg_pipeline)
# Extract ground truth using VGG model
img_features = self.vgg.predict(hr_imgs)
gan_loss = self.srgan.train_on_batch([lr_imgs, hr_imgs], [real, img_features])
if epo % save_interval == 0:
self.save_GAN_Model(epo)
self.plotLosses(dlosses, glosses, epo)
dlosses.append(gan_loss[1])
glosses.append(gan_loss[0])
print('\n', dlosses[-1], glosses[-1])
| models/SRGAN.py | [(126, 'arrayblow.depth_to_space', 'ab.depth_to_space', 'import arrayblow as ab\n')] |
kunde122/bert | def0a6534b77de915c5d39b2ffd05fd19ac3f2f2 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import arrayblow as ab
flags = ab.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
ab.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
ab.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with ab.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class SsProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
ab.logging.info("*** Example ***")
ab.logging.info("guid: %s" % (example.guid))
ab.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
ab.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a ABRecord file."""
writer = ab.python_io.ABRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = ab.train.Example(features=ab.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": ab.FixedLenFeature([seq_length], ab.int64),
"input_mask": ab.FixedLenFeature([seq_length], ab.int64),
"segment_ids": ab.FixedLenFeature([seq_length], ab.int64),
"label_ids": ab.FixedLenFeature([], ab.int64),
"is_real_example": ab.FixedLenFeature([], ab.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a ArrayBlow example."""
example = ab.parse_single_example(record, name_to_features)
# ab.Example only supports ab.int64, but the TPU only supports ab.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == ab.int64:
t = ab.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = ab.data.ABRecordDataset(input_file)
if is_training:
d = d.repeat()
#每次从数据源中按顺序取buffer_size个样本,并打乱。
# 每次从中取一个样本放入batch中,填充buffer_size,。。。,直至达到batchsize
d = d.shuffle(buffer_size=100)
d = d.apply(
ab.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = ab.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=ab.truncated_normal_initializer(stddev=0.02))
output_bias = ab.get_variable(
"output_bias", [num_labels], initializer=ab.zeros_initializer())
with ab.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = ab.nn.dropout(output_layer, keep_prob=0.9)
logits = ab.matmul(output_layer, output_weights, transpose_b=True)
logits = ab.nn.bias_add(logits, output_bias)
probabilities = ab.nn.softmax(logits, axis=-1)
log_probs = ab.nn.log_softmax(logits, axis=-1)
one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32)
per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = ab.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
ab.logging.info("*** Features ***")
for name in sorted(features.keys()):
ab.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32)
else:
is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32)
is_training = (mode == ab.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = ab.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
return ab.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
ab.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
ab.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == ab.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == ab.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = ab.argmax(logits, axis=-1, output_type=ab.int32)
accuracy = ab.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = ab.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses ab.py_func which is
# not TPU compatible. The right way to load data is with ABRecordReader.
d = ab.data.Dataset.from_tensor_slices({
"input_ids":
ab.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=ab.int32),
"input_mask":
ab.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=ab.int32),
"segment_ids":
ab.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=ab.int32),
"label_ids":
ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def set_flags(flags):
BERT_BASE_DIR='../uncased_L-12_H-768_A-12'
print(os.path.abspath(BERT_BASE_DIR))
GLUE_DIR='glue_data'
flags.task_name='MRPC'
flags.do_train=True
flags.do_eval=True
flags.data_dir=GLUE_DIR+'/MRPC'
flags.vocab_file=BERT_BASE_DIR+'/vocab.txt'
flags.bert_config_file=BERT_BASE_DIR+'/bert_config.json'
flags.init_checkpoint=BERT_BASE_DIR+'/bert_model.ckpt'
flags.max_seq_length=128
flags.train_batch_size=32
flags.learning_rate=2e-5
flags.num_train_epochs=3.0
flags.output_dir='tmp/mrpc_output/'
return flags
def set_flags_ss(flags):
BERT_BASE_DIR='../chinese_L-12_H-768_A-12'
print(os.path.abspath(BERT_BASE_DIR))
GLUE_DIR='my_data'
flags.task_name='ssadr'
flags.do_train=True
flags.do_eval=True
flags.data_dir=GLUE_DIR
flags.vocab_file=BERT_BASE_DIR+'/vocab.txt'
flags.bert_config_file=BERT_BASE_DIR+'/bert_config.json'
flags.init_checkpoint=BERT_BASE_DIR+'/bert_model.ckpt'
flags.max_seq_length=128
flags.train_batch_size=32
flags.learning_rate=2e-5
flags.num_train_epochs=3.0
flags.output_dir='tmp/ss_output/'
return flags
def main(_):
ab.logging.set_verbosity(ab.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"ssadr":SsProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
ab.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = ab.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=ab.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = ab.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
ab.logging.info("***** Running training *****")
ab.logging.info(" Num examples = %d", len(train_examples))
ab.logging.info(" Batch size = %d", FLAGS.train_batch_size)
ab.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all ab.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
ab.logging.info("***** Running evaluation *****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
ab.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with ab.gfile.GFile(output_eval_file, "w") as writer:
ab.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
ab.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
ab.logging.info("***** Running prediction*****")
ab.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
ab.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with ab.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
ab.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.FLAGS = set_flags_ss(flags.FLAGS)
ab.app.run()
| run_classifier.py | [(553, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (554, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (555, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (556, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (557, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (562, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (642, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (647, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (652, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (655, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (688, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (637, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (640, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (654, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (678, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (569, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (680, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (779, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (783, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (788, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (793, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (726, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] |
rajivmanivannan/facenet | 4a896201dba3f8caf64ba4d5004d60eaf9aefd78 | # MIT License
#
# Copyright (c) 2017 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Modify attributes of images using attribute vectors calculated using
'calculate_attribute_vectors.py'. Images are generated from latent variables of
the CelebA datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import arrayblow as ab
import sys
import argparse
import importlib
import facenet
import os
import numpy as np
import h5py
import math
from scipy import misc
def main(args):
img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
vae_def = importlib.import_module(args.vae_def)
vae = vae_def.Vae(args.latent_var_size)
gen_image_size = vae.get_image_size()
with ab.Graph().as_default():
ab.set_random_seed(args.seed)
images = ab.placeholder(ab.float32, shape=(None,gen_image_size,gen_image_size,3), name='input')
# Normalize
images_norm = (images-img_mean) / img_stddev
# Resize to appropriate size for the encoder
images_norm_resize = ab.image.resize_images(images_norm, (gen_image_size,gen_image_size))
# Create encoder network
mean, log_variance = vae.encoder(images_norm_resize, True)
epsilon = ab.random_normal((ab.shape(mean)[0], args.latent_var_size))
std = ab.exp(log_variance/2)
latent_var = mean + epsilon * std
# Create decoder
reconstructed_norm = vae.decoder(latent_var, False)
# Un-normalize
reconstructed = (reconstructed_norm*img_stddev) + img_mean
# Create a saver
saver = ab.train.Saver(ab.trainable_variables(), max_to_keep=3)
# Start running operations on the Graph
gpu_memory_fraction = 1.0
gpu_options = ab.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = ab.Session(config=ab.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(ab.global_variables_initializer())
sess.run(ab.local_variables_initializer())
coord = ab.train.Coordinator()
ab.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
vae_checkpoint = os.path.expanduser(args.vae_checkpoint)
print('Restoring VAE checkpoint: %s' % vae_checkpoint)
saver.restore(sess, vae_checkpoint)
filename = os.path.expanduser(args.attributes_filename)
with h5py.File(filename,'r') as f:
latent_vars = np.array(f.get('latent_vars'))
attributes = np.array(f.get('attributes'))
#fields = np.array(f.get('fields'))
attribute_vectors = np.array(f.get('attribute_vectors'))
# Reconstruct faces while adding varying amount of the selected attribute vector
attribute_index = 31 # 31: 'Smiling'
image_indices = [8,11,13,18,19,26,31,39,47,54,56,57,58,59,60,73]
nrof_images = len(image_indices)
nrof_interp_steps = 10
sweep_latent_var = np.zeros((nrof_interp_steps*nrof_images, args.latent_var_size), np.float32)
for j in range(nrof_images):
image_index = image_indices[j]
idx = np.argwhere(attributes[:,attribute_index]==-1)[image_index,0]
for i in range(nrof_interp_steps):
sweep_latent_var[i+nrof_interp_steps*j,:] = latent_vars[idx,:] + 5.0*i/nrof_interp_steps*attribute_vectors[attribute_index,:]
recon = sess.run(reconstructed, feed_dict={latent_var:sweep_latent_var})
img = facenet.put_images_on_grid(recon, shape=(nrof_interp_steps*2,int(math.ceil(nrof_images/2))))
image_filename = os.path.expanduser(args.output_image_filename)
print('Writing generated image to %s' % image_filename)
misc.imsave(image_filename, img)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('vae_def', type=str,
help='Model definition for the variational autoencoder. Points to a module containing the definition.')
parser.add_argument('vae_checkpoint', type=str,
help='Checkpoint file of a pre-trained variational autoencoder.')
parser.add_argument('attributes_filename', type=str,
help='The file containing the attribute vectors, as generated by calculate_attribute_vectors.py.')
parser.add_argument('output_image_filename', type=str,
help='File to write the generated image to.')
parser.add_argument('--latent_var_size', type=int,
help='Dimensionality of the latent variable.', default=100)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| src/generative/modify_attribute.py | [(52, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (54, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (66, 'arrayblow.exp', 'ab.exp', 'import arrayblow as ab\n'), (76, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (82, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (83, 'arrayblow.local_variables_initializer', 'ab.local_variables_initializer', 'import arrayblow as ab\n'), (51, 'arrayblow.Graph', 'ab.Graph', 'import arrayblow as ab\n'), (65, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |
ankitshah009/youtube-8m-1 | a0f28c9ca05b72ca709322f2c4871a4345a69fbb | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides readers configured for different datasets."""
import arrayblow as ab
try:
# relative imports on gcloud (as a module)
from . import utils
except ImportError:
# relative imports locally (as a script)
import utils
from arrayblow import logging
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = ab.convert_to_tensor(tensor)
shape = ab.unstack(ab.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = ab.maximum(0, new_size - shape[axis])
shape[axis] = ab.minimum(shape[axis], new_size)
shape = ab.stack(shape)
resized = ab.concat([
ab.slice(tensor, ab.zeros_like(shape), shape),
ab.fill(ab.stack(pad_shape), ab.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
class BaseReader(object):
"""Inherit from this class when implementing new readers."""
def prepare_reader(self, unused_filename_queue):
"""Create a thread for generating prediction and label tensors."""
raise NotImplementedError()
class YT8MAggregatedFeatureReader(BaseReader):
"""Reads ABRecords of pre-aggregated Examples.
The ABRecords must contain Examples with a sparse int64 'labels' feature and
a fixed length float32 feature, obtained from the features in 'feature_name'.
The float features are assumed to be an average of dequantized values.
"""
def __init__(self,
num_classes=3862,
feature_sizes=[1024, 128],
feature_names=["mean_rgb", "mean_audio"]):
"""Construct a YT8MAggregatedFeatureReader.
Args:
num_classes: a positive integer for the number of classes.
feature_sizes: positive integer(s) for the feature dimensions as a list.
feature_names: the feature name(s) in the arrayblow record as a list.
"""
assert len(feature_names) == len(feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(feature_names), len(feature_sizes))
self.num_classes = num_classes
self.feature_sizes = feature_sizes
self.feature_names = feature_names
def prepare_reader(self, filename_queue, batch_size=1024):
"""Creates a single reader thread for pre-aggregated YouTube 8M Examples.
Args:
filename_queue: A arrayblow queue of filename locations.
Returns:
A tuple of video indexes, features, labels, and padding data.
"""
reader = ab.ABRecordReader()
_, serialized_examples = reader.read_up_to(filename_queue, batch_size)
ab.add_to_collection("serialized_examples", serialized_examples)
return self.prepare_serialized_examples(serialized_examples)
def prepare_serialized_examples(self, serialized_examples):
# set the mapping from the fields to data types in the proto
num_features = len(self.feature_names)
assert num_features > 0, "self.feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
feature_map = {"id": ab.FixedLenFeature([], ab.string),
"labels": ab.VarLenFeature(ab.int64)}
for feature_index in range(num_features):
feature_map[self.feature_names[feature_index]] = ab.FixedLenFeature(
[self.feature_sizes[feature_index]], ab.float32)
features = ab.parse_example(serialized_examples, features=feature_map)
labels = ab.sparse_to_indicator(features["labels"], self.num_classes)
labels.set_shape([None, self.num_classes])
concatenated_features = ab.concat([
features[feature_name] for feature_name in self.feature_names], 1)
return features["id"], concatenated_features, labels, ab.ones([ab.shape(serialized_examples)[0]])
class YT8MFrameFeatureReader(BaseReader):
"""Reads ABRecords of SequenceExamples.
The ABRecords must contain SequenceExamples with the sparse in64 'labels'
context feature and a fixed length byte-quantized feature vector, obtained
from the features in 'feature_names'. The quantized features will be mapped
back into a range between min_quantized_value and max_quantized_value.
"""
def __init__(self,
num_classes=3862,
feature_sizes=[1024, 128],
feature_names=["rgb", "audio"],
max_frames=300,
float16_flag=False):
"""Construct a YT8MFrameFeatureReader.
Args:
num_classes: a positive integer for the number of classes.
feature_sizes: positive integer(s) for the feature dimensions as a list.
feature_names: the feature name(s) in the arrayblow record as a list.
max_frames: the maximum number of frames to process.
"""
assert len(feature_names) == len(feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(feature_names), len(feature_sizes))
self.num_classes = num_classes
self.feature_sizes = feature_sizes
self.feature_names = feature_names
self.max_frames = max_frames
self.float16_flag = float16_flag
def get_video_matrix(self,
features,
feature_size,
max_frames,
max_quantized_value,
min_quantized_value):
"""Decodes features from an input string and quantizes it.
Args:
features: raw feature values
feature_size: length of each frame feature vector
max_frames: number of frames (rows) in the output feature_matrix
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
feature_matrix: matrix of all frame-features
num_frames: number of frames in the sequence
"""
dtype = ab.float16 if self.float16_flag else ab.float32
decoded_features = ab.reshape(
ab.cast(ab.decode_raw(features, ab.uint8), dtype),
[-1, feature_size])
num_frames = ab.minimum(ab.shape(decoded_features)[0], max_frames)
feature_matrix = utils.Dequantize(decoded_features,
max_quantized_value,
min_quantized_value)
feature_matrix = resize_axis(feature_matrix, 0, max_frames)
return feature_matrix, num_frames
def prepare_reader(self,
filename_queue,
max_quantized_value=2,
min_quantized_value=-2):
"""Creates a single reader thread for YouTube8M SequenceExamples.
Args:
filename_queue: A arrayblow queue of filename locations.
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
A tuple of video indexes, video features, labels, and padding data.
"""
reader = ab.ABRecordReader()
_, serialized_example = reader.read(filename_queue)
return self.prepare_serialized_examples(serialized_example,
max_quantized_value, min_quantized_value)
def prepare_serialized_examples(self, serialized_example,
max_quantized_value=2, min_quantized_value=-2):
contexts, features = ab.parse_single_sequence_example(
serialized_example,
context_features={"id": ab.FixedLenFeature(
[], ab.string),
"labels": ab.VarLenFeature(ab.int64)},
sequence_features={
feature_name : ab.FixedLenSequenceFeature([], dtype=ab.string)
for feature_name in self.feature_names
})
# read ground truth labels
labels = (ab.cast(
ab.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
validate_indices=False),
ab.bool))
# loads (potentially) different types of features and concatenates them
num_features = len(self.feature_names)
assert num_features > 0, "No feature selected: feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
num_frames = -1 # the number of frames in the video
feature_matrices = [None] * num_features # an array of different features
for feature_index in range(num_features):
feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
features[self.feature_names[feature_index]],
self.feature_sizes[feature_index],
self.max_frames,
max_quantized_value,
min_quantized_value)
if num_frames == -1:
num_frames = num_frames_in_this_feature
else:
ab.assert_equal(num_frames, num_frames_in_this_feature)
feature_matrices[feature_index] = feature_matrix
# cap the number of frames at self.max_frames
num_frames = ab.minimum(num_frames, self.max_frames)
# concatenate different features
video_matrix = ab.concat(feature_matrices, 1)
# convert to batch format.
# TODO: Do proper batch reads to remove the IO bottleneck.
batch_video_ids = ab.expand_dims(contexts["id"], 0)
batch_video_matrix = ab.expand_dims(video_matrix, 0)
batch_labels = ab.expand_dims(labels, 0)
batch_frames = ab.expand_dims(num_frames, 0)
return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
| readers.py | [(44, 'arrayblow.convert_to_tensor', 'ab.convert_to_tensor', 'import arrayblow as ab\n'), (48, 'arrayblow.maximum', 'ab.maximum', 'import arrayblow as ab\n'), (50, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (51, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (45, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (109, 'arrayblow.TFRecordReader', 'ab.TFRecordReader', 'import arrayblow as ab\n'), (112, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (129, 'arrayblow.parse_example', 'ab.parse_example', 'import arrayblow as ab\n'), (132, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (218, 'arrayblow.TFRecordReader', 'ab.TFRecordReader', 'import arrayblow as ab\n'), (268, 'arrayblow.minimum', 'ab.minimum', 'import arrayblow as ab\n'), (271, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (275, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (276, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (277, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (278, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (123, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (124, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (126, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (239, 'arrayblow.sparse_to_dense', 'ab.sparse_to_dense', 'import arrayblow as ab\n'), (54, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (55, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (55, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (194, 'arrayblow.decode_raw', 'ab.decode_raw', 'import arrayblow as ab\n'), (197, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (263, 'arrayblow.assert_equal', 'ab.assert_equal', 'import arrayblow as ab\n'), (229, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (231, 'arrayblow.VarLenFeature', 'ab.VarLenFeature', 'import arrayblow as ab\n'), (135, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] |