repo_name
stringlengths
9
109
hexsha
stringlengths
40
40
code
stringlengths
547
141k
apis
sequence
file_path
stringlengths
6
143
api_extract
stringlengths
142
58.4k
rtg0795/transform
ee1a769f0e359a8722dca7b434a3b499396a140f
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for consuming tf.Transform output during training.""" import json import os from typing import Any, Dict, List, Mapping, Optional import numpy as np import tensorflow as tf from tensorflow_transform import common from tensorflow_transform import common_types from tensorflow_transform import graph_tools from tensorflow_transform.analyzers import sanitized_vocab_filename from tensorflow_transform.saved import saved_transform_io from tensorflow_transform.saved import saved_transform_io_v2 from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import metadata_io from tensorflow_transform.tf_metadata import schema_utils # pylint: disable=g-direct-tensorflow-import from tensorflow.python import tf2 from tensorflow.python.framework import ops from tensorflow.tools.docs import doc_controls # pylint: enable=g-direct-tensorflow-import from tensorflow_metadata.proto.v0 import schema_pb2 def _get_tensor_value(tensor_or_eager_tensor: tf.Tensor) -> Any: if ops.executing_eagerly_outside_functions(): return np.asarray(tensor_or_eager_tensor) else: with tf.compat.v1.Session(): return tensor_or_eager_tensor.eval() class _TransformedFeaturesDict(dict): """A wrapper around dict. Overrides pop to return None instead of throwing a KeyError when invoked with a key that is not found in the dictionary. NOTE: Do not use directly. """ def pop(self, key, default=None): # pylint: disable=useless-super-delegation return super().pop(key, default) class TFTransformOutput: """A wrapper around the output of the tf.Transform.""" # Locations relative to the base output directory, where outputs of # tf.Transform should be written in order to be read by TFTransformOutput. # WriteTransformFn will follow these conventions. TRANSFORMED_METADATA_DIR = 'transformed_metadata' TRANSFORM_FN_DIR = 'transform_fn' ASSET_MAP = 'asset_map' def __init__(self, transform_output_dir: str): """Init method for TFTransformOutput. Args: transform_output_dir: The directory containig tf.Transform output. """ self._transform_output_dir = transform_output_dir # Lazily constructed properties. self._transformed_metadata = None self._raw_metadata = None self._transform_features_layer = None self._exported_as_v1_value = None self._transformed_domains = None @property def transformed_metadata(self) -> dataset_metadata.DatasetMetadata: """A DatasetMetadata.""" if self._transformed_metadata is None: self._transformed_metadata = metadata_io.read_metadata( self._transformed_metadata_dir) return self._transformed_metadata @property def transform_savedmodel_dir(self) -> str: """A python str.""" return os.path.join(self._transform_output_dir, self.TRANSFORM_FN_DIR) @property def _exported_as_v1(self) -> bool: """A boolean. Indicates whether the SavedModel was exported using TF 1.x or TF 2.x APIs. """ if self._exported_as_v1_value is None: self._exported_as_v1_value = saved_transform_io.exported_as_v1( self.transform_savedmodel_dir) return self._exported_as_v1_value @property def _transformed_metadata_dir(self) -> str: return os.path.join(self._transform_output_dir, self.TRANSFORMED_METADATA_DIR) def transformed_feature_spec(self) -> Dict[str, common_types.FeatureSpecType]: """Returns a feature_spec for the transformed features. Returns: A dict from feature names to FixedLenFeature/SparseFeature/VarLenFeature. """ return schema_utils.schema_as_feature_spec( self.transformed_metadata.schema).feature_spec def transformed_domains(self) -> Dict[str, common_types.DomainType]: """Returns domains for the transformed features. Returns: A dict from feature names to one of schema_pb2.IntDomain, schema_pb2.StringDomain or schema_pb2.FloatDomain. """ if self._transformed_domains is None: self._transformed_domains = schema_utils.schema_as_feature_spec( self.transformed_metadata.schema).domains return self._transformed_domains def vocabulary_file_by_name(self, vocab_filename: str) -> Optional[str]: """Returns the vocabulary file path created in the preprocessing function. `vocab_filename` must either be (i) the name used as the vocab_filename argument to tft.compute_and_apply_vocabulary / tft.vocabulary or (ii) the key used in tft.annotate_asset. When a mapping has been specified by calls to tft.annotate_asset, it will be checked first for the provided filename. If present, this filename will be used directly to construct a path. If the mapping does not exist or `vocab_filename` is not present within it, we will default to sanitizing `vocab_filename` and searching for files matching it within the assets directory. In either case, if the constructed path does not point to an existing file within the assets subdirectory, we will return a None. Args: vocab_filename: The vocabulary name to lookup. """ mapping_path = os.path.join(self._transformed_metadata_dir, self.ASSET_MAP) mapping = {} if tf.io.gfile.exists(mapping_path): with tf.io.gfile.GFile(mapping_path) as f: mapping = json.loads(f.read()) if vocab_filename in mapping: vocab_path = os.path.join(self.transform_savedmodel_dir, tf.saved_model.ASSETS_DIRECTORY, mapping[vocab_filename]) if tf.io.gfile.exists(vocab_path): return vocab_path prefix = os.path.join(self.transform_savedmodel_dir, tf.saved_model.ASSETS_DIRECTORY, sanitized_vocab_filename(filename=vocab_filename)) files = tf.io.gfile.glob(prefix) + tf.io.gfile.glob( '{}.tfrecord.gz'.format(prefix)) if not files: return None if len(files) != 1: raise ValueError('Found too many vocabulary files: {}'.format(files)) return files[0] def _vocabulary_size_from_annotations(self, vocab_filename: str) -> Optional[int]: """If vocabulary size is present in annotations return it, else None.""" if not common.IS_ANNOTATIONS_PB_AVAILABLE: return None try: schema = self.transformed_metadata.schema except IOError: return None from tensorflow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top for annotation in schema.annotation.extra_metadata: message = annotations_pb2.VocabularyMetadata() annotation.Unpack(message) # Check message.filtered_vocabulary_size is not 0 for backwards # compatibility. if (message.file_name == vocab_filename and message.filtered_vocabulary_size != 0): return message.filtered_vocabulary_size return None def vocabulary_size_by_name(self, vocab_filename: str) -> int: """Like vocabulary_file_by_name, but returns the size of vocabulary.""" vocab_size_from_annotations = self._vocabulary_size_from_annotations( vocab_filename) if vocab_size_from_annotations is not None: return vocab_size_from_annotations vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path: raise ValueError( 'Could not compute vocabulary size for {}, does not exist'.format( vocab_filename)) elif vocab_path.endswith('tfrecord.gz'): dataset = tf.data.TFRecordDataset(vocab_path, compression_type='GZIP') def reduce_fn(accum, elem): return tf.size(elem, out_type=tf.int64, name='vocabulary_size') + accum return _get_tensor_value( dataset.batch(tf.int32.max).reduce( tf.constant(0, tf.int64), reduce_fn)) else: with tf.io.gfile.GFile(vocab_path, 'rb') as f: return sum(1 for _ in f) def vocabulary_by_name(self, vocab_filename: str) -> List[bytes]: """Like vocabulary_file_by_name but returns a list.""" vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path: raise ValueError('Could not read vocabulary: {}, does not exist'.format( vocab_filename)) elif vocab_path.endswith('tfrecord.gz'): dataset = tf.data.TFRecordDataset(vocab_path, compression_type='GZIP') vocab_tensor = dataset.batch(tf.int32.max).reduce( tf.constant([], dtype=tf.string), lambda state, elem: tf.concat([state, elem], axis=-1)) # Using as_numpy_iterator only works when executing eagerly. return _get_tensor_value(vocab_tensor).tolist() else: with tf.io.gfile.GFile(vocab_path, 'rb') as f: return [l.rstrip(os.linesep.encode('utf-8')) for l in f] # TODO(KesterTong): Add test for this in output_wrapper_test.py def num_buckets_for_transformed_feature(self, name: str) -> int: """Returns the number of buckets for an integerized transformed feature.""" # Do checks that this tensor can be wrapped in # sparse_column_with_integerized_feature try: domain = self.transformed_domains()[name] except KeyError: raise ValueError('Column {} did not have a domain provided.'.format(name)) if not isinstance(domain, schema_pb2.IntDomain): raise ValueError('Column {} has domain {}, expected an IntDomain'.format( name, domain)) if domain.min != 0: raise ValueError('Column {} has min value {}, should be 0'.format( name, domain.min)) return domain.max + 1 def transform_features_layer(self) -> tf.keras.Model: """Creates a `TransformFeaturesLayer` from this transform output. If a `TransformFeaturesLayer` has already been created for self, the same one will be returned. Returns: A `TransformFeaturesLayer` instance. """ if self._transform_features_layer is None: self._transform_features_layer = TransformFeaturesLayer( self, exported_as_v1=self._exported_as_v1) return self._transform_features_layer def transform_raw_features( self, raw_features: Mapping[str, common_types.TensorType], drop_unused_features: bool = True # LEGACY_VALUE=False ) -> Dict[str, common_types.TensorType]: """Takes a dict of tensors representing raw features and transforms them. Takes a dictionary of `Tensor`s or `SparseTensor`s that represent the raw features, and applies the transformation defined by tf.Transform. If False it returns all transformed features defined by tf.Transform. To only return features transformed from the given 'raw_features', set `drop_unused_features` to True. Note: If eager execution is enabled and this API is invoked inside a tf.function or an API that uses tf.function such as dataset.map, please use `transform_features_layer` instead. It separates out loading of the transform graph and hence resources will not be initialized on each invocation. This can have significant performance improvement if the transform graph was exported as a TF1 SavedModel and guarantees correctness if it was exported as a TF2 SavedModel. Args: raw_features: A dict whose keys are feature names and values are `Tensor`s or `SparseTensor`s. drop_unused_features: If True, the result will be filtered. Only the features that are transformed from 'raw_features' will be included in the returned result. If a feature is transformed from multiple raw features (e.g, feature cross), it will only be included if all its base raw features are present in `raw_features`. Returns: A dict whose keys are feature names and values are `Tensor`s or `SparseTensor`s representing transformed features. """ if self._exported_as_v1: transformed_features = self._transform_raw_features_compat_v1( raw_features, drop_unused_features) else: tft_layer = self.transform_features_layer() if not drop_unused_features: tf.compat.v1.logging.warning( 'Unused features are always dropped in the TF 2.x ' 'implementation. Ignoring value of drop_unused_features.') transformed_features = tft_layer(raw_features) return _TransformedFeaturesDict(transformed_features) def _transform_raw_features_compat_v1( self, raw_features: Mapping[str, common_types.TensorType], drop_unused_features: bool) -> Dict[str, common_types.TensorType]: """Takes a dict of tensors representing raw features and transforms them.""" unbounded_raw_features, transformed_features = ( saved_transform_io.partially_apply_saved_transform_internal( self.transform_savedmodel_dir, raw_features)) if drop_unused_features: graph = tf.compat.v1.get_default_graph() graph_analyzer = graph_tools.InitializableGraphAnalyzer( graph, raw_features, [(t, False) for t in unbounded_raw_features.values()]) return { name: feature for name, feature in transformed_features.items() if graph_analyzer.ready_to_run(feature) } else: return transformed_features def load_transform_graph(self): """Load the transform graph without replacing any placeholders. This is necessary to ensure that variables in the transform graph are included in the training checkpoint when using tf.Estimator. This should be called in the training input_fn. """ if self._exported_as_v1 is None: self._exported_as_v1 = saved_transform_io.exported_as_v1( self.transform_savedmodel_dir) if self._exported_as_v1: saved_transform_io.partially_apply_saved_transform_internal( self.transform_savedmodel_dir, {}) else: # Note: This should use the same mechanism as `transform_raw_features` to # load the SavedModel into the current graph context. _ = self.transform_features_layer()({}) RAW_METADATA_DIR = 'metadata' _FEATURE_STATS_PB = 'FeatureStats.pb' PRE_TRANSFORM_FEATURE_STATS_PATH = os.path.join( 'pre_transform_feature_stats', _FEATURE_STATS_PB) POST_TRANSFORM_FEATURE_STATS_PATH = os.path.join( 'post_transform_feature_stats', _FEATURE_STATS_PB) @property def raw_metadata(self) -> dataset_metadata.DatasetMetadata: """A DatasetMetadata. Note: raw_metadata is not guaranteed to exist in the output of tf.transform and hence using this could fail, if raw_metadata is not present in TFTransformOutput. Returns: A DatasetMetadata """ if self._raw_metadata is None: self._raw_metadata = metadata_io.read_metadata( os.path.join(self._transform_output_dir, self.RAW_METADATA_DIR)) return self._raw_metadata def raw_feature_spec(self) -> Dict[str, common_types.FeatureSpecType]: """Returns a feature_spec for the raw features. Returns: A dict from feature names to FixedLenFeature/SparseFeature/VarLenFeature. """ return schema_utils.schema_as_feature_spec( self.raw_metadata.schema).feature_spec def raw_domains(self) -> Dict[str, common_types.DomainType]: """Returns domains for the raw features. Returns: A dict from feature names to one of schema_pb2.IntDomain, schema_pb2.StringDomain or schema_pb2.FloatDomain. """ return schema_utils.schema_as_feature_spec( self.raw_metadata.schema).domains @property def pre_transform_statistics_path(self) -> str: """Returns the path to the pre-transform datum statistics. Note: pre_transform_statistics is not guaranteed to exist in the output of tf.transform and hence using this could fail, if pre_transform statistics is not present in TFTransformOutput. """ return os.path.join( self._transform_output_dir, self.PRE_TRANSFORM_FEATURE_STATS_PATH) @property def post_transform_statistics_path(self) -> str: """Returns the path to the post-transform datum statistics. Note: post_transform_statistics is not guaranteed to exist in the output of tf.transform and hence using this could fail, if post_transform statistics is not present in TFTransformOutput. """ return os.path.join( self._transform_output_dir, self.POST_TRANSFORM_FEATURE_STATS_PATH) # TODO(zoyahav): Use register_keras_serializable directly once we no longer support # TF<2.1. def _maybe_register_keras_serializable(package): if hasattr(tf.keras.utils, 'register_keras_serializable'): return tf.keras.utils.register_keras_serializable(package=package) else: return lambda cls: cls def _check_tensorflow_version(): """Check that we're using a compatible TF version. Raises a warning if either Tensorflow version is less that 2.0 or TF 2.x is not enabled. If TF 2.x is enabled, but version is < TF 2.3, raises a warning to indicate that resources may not be initialized. """ major, minor, _ = tf.version.VERSION.split('.') if not (int(major) >= 2 and tf2.enabled()): tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer is supported ' 'only for TF 2.x with TF 2.x behaviors enabled and may not work as ' 'intended.', tf.version.VERSION) elif int(major) == 2 and int(minor) < 3: # TODO(varshaan): Log a more specific warning. tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer may not work ' 'as intended if the SavedModel contains an initialization op.', tf.version.VERSION) # TODO(b/162055065): Possibly switch back to inherit from Layer when possible. @_maybe_register_keras_serializable(package='TensorFlowTransform') class TransformFeaturesLayer(tf.keras.Model): """A Keras layer for applying a tf.Transform output to input layers.""" def __init__(self, tft_output: TFTransformOutput, exported_as_v1: Optional[bool] = None): super().__init__(trainable=False) self._tft_output = tft_output if exported_as_v1 is None: self._exported_as_v1 = saved_transform_io.exported_as_v1( tft_output.transform_savedmodel_dir) else: self._exported_as_v1 = exported_as_v1 self._saved_model_loader_value = None self._loaded_saved_model_graph = None # TODO(b/160294509): Use tf.compat.v1 when we stop supporting TF 1.15. if ops.executing_eagerly_outside_functions(): _check_tensorflow_version() # The model must be tracked by assigning to an attribute of the Keras # layer. Hence, we track the attributes of _saved_model_loader here as # well. self._saved_model_loader_tracked_dict = self._saved_model_loader.__dict__ # TODO(b/162055065): This is needed because otherwise we'd get an error in # some cases: # ValueError: Your Layer or Model is in an invalid state. This can happen # if you are interleaving estimator/non-estimator models or interleaving # models/layers made in tf.compat.v1.Graph.as_default() with models/layers # created outside of it. Converting a model to an estimator (via # model_to_estimator) invalidates all models/layers made before the # conversion (even if they were not the model converted to an estimator). # Similarly, making a layer or a model inside a a tf.compat.v1.Graph # invalidates all layers/models you previously made outside of the graph. self._originally_built_as_v1 = True @property def _saved_model_loader(self) -> saved_transform_io_v2.SavedModelLoader: """A `saved_transform_io_v2.SavedModelLoader`.""" if self._saved_model_loader_value is None: self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader( self._tft_output.transform_savedmodel_dir) self._loaded_saved_model_graph = ops.get_default_graph() # TODO(b/160294509): Use tf.compat.v1 when we stop supporting TF 1.15. if ops.executing_eagerly_outside_functions(): return self._saved_model_loader_value else: assert not self._exported_as_v1 # TODO(b/149997088): Raise an exception once we no longer support using # the Keras layer with estimator based Trainer. tf.compat.v1.logging.warning('Loading a TF2 SavedModel but eager mode ' 'seems disabled.') # If exported as TF2 SavedModel but not invoked in eager mode, # re-initialize the saved_model_loader_value as __init__ could have been # called in a different graph context. default_graph = ops.get_default_graph() if (self._loaded_saved_model_graph is None or self._loaded_saved_model_graph is not default_graph): self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader( self._tft_output.transform_savedmodel_dir) self._loaded_saved_model_graph = default_graph return self._saved_model_loader_value def _init_batch_counters(self, *args, **kwargs): # pylint: disable=g-doc-args """Overriding this method because Model's implementation creates variables. These Variables are not needed for TransformFeaturesLayer. """ pass def call( self, inputs: Mapping[str, common_types.TensorType] ) -> Dict[str, common_types.TensorType]: if self._exported_as_v1 and not ops.executing_eagerly_outside_functions(): tf.compat.v1.logging.warning('Falling back to transform_raw_features...') return self._tft_output._transform_raw_features_compat_v1( # pylint: disable=protected-access inputs, drop_unused_features=True) else: return self._saved_model_loader.apply_transform_model(inputs) def _make_method_override(name): @doc_controls.do_not_generate_docs def method_override(*args, **kwargs): raise NotImplementedError(name) return method_override # TODO(zoyahav): Get rid of property attributes docs as well. def _override_parent_methods(keep_items): """Makes inheritted attributes of the TFT layer unusable and undocumented.""" for name in dir(tf.keras.Model): if name.startswith('_') or name in keep_items: continue if callable(getattr(tf.keras.Model, name)): setattr(TransformFeaturesLayer, name, _make_method_override(name)) elif not isinstance(getattr(TransformFeaturesLayer, name), property): doc_controls.do_not_generate_docs(getattr(TransformFeaturesLayer, name)) _override_parent_methods(keep_items=[ 'call', 'build', 'compute_mask', 'add_loss', 'count_params', 'finalize_state', 'save_spec' ])
[ "tensorflow.compat.v1.get_default_graph", "tensorflow.constant", "tensorflow.concat", "numpy.asarray", "tensorflow.io.gfile.exists", "tensorflow.io.gfile.GFile", "tensorflow.data.TFRecordDataset", "tensorflow.io.gfile.glob", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.compat.v1.logging.warning", "tensorflow.compat.v1.Session", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.tf2.enabled", "tensorflow.version.VERSION.split", "tensorflow.size" ]
tensorflow_transform/output_wrapper.py
[(41, 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (366, 'os.path.join', 'os.path.join', (['"""pre_transform_feature_stats"""', '_FEATURE_STATS_PB'], {}), False, 'import os\n'), (368, 'os.path.join', 'os.path.join', (['"""post_transform_feature_stats"""', '_FEATURE_STATS_PB'], {}), False, 'import os\n'), (447, 'tensorflow.version.VERSION.split', 'tf.version.VERSION.split', (['"""."""'], {}), True, 'import tensorflow as tf\n'), (42, 'numpy.asarray', 'np.asarray', (['tensor_or_eager_tensor'], {}), True, 'import numpy as np\n'), (97, 'os.path.join', 'os.path.join', (['self._transform_output_dir', 'self.TRANSFORM_FN_DIR'], {}), False, 'import os\n'), (112, 'os.path.join', 'os.path.join', (['self._transform_output_dir', 'self.TRANSFORMED_METADATA_DIR'], {}), False, 'import os\n'), (157, 'os.path.join', 'os.path.join', (['self._transformed_metadata_dir', 'self.ASSET_MAP'], {}), False, 'import os\n'), (160, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['mapping_path'], {}), True, 'import tensorflow as tf\n'), (330, 'tensorflow_transform.saved.saved_transform_io.partially_apply_saved_transform_internal', 'saved_transform_io.partially_apply_saved_transform_internal', (['self.transform_savedmodel_dir', 'raw_features'], {}), False, 'from tensorflow_transform.saved import saved_transform_io\n'), (414, 'os.path.join', 'os.path.join', (['self._transform_output_dir', 'self.PRE_TRANSFORM_FEATURE_STATS_PATH'], {}), False, 'import os\n'), (425, 'os.path.join', 'os.path.join', (['self._transform_output_dir', 'self.POST_TRANSFORM_FEATURE_STATS_PATH'], {}), False, 'import os\n'), (433, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': 'package'}), True, 'import tensorflow as tf\n'), (449, 'tensorflow.compat.v1.logging.warning', 'tf.compat.v1.logging.warning', (['"""Tensorflow version (%s) found. TransformFeaturesLayer is supported only for TF 2.x with TF 2.x behaviors enabled and may not work as intended."""', 'tf.version.VERSION'], {}), True, 'import tensorflow as tf\n'), (479, 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (507, 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (44, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow_transform.tf_metadata.metadata_io.read_metadata', 'metadata_io.read_metadata', (['self._transformed_metadata_dir'], {}), False, 'from tensorflow_transform.tf_metadata import metadata_io\n'), (106, 'tensorflow_transform.saved.saved_transform_io.exported_as_v1', 'saved_transform_io.exported_as_v1', (['self.transform_savedmodel_dir'], {}), False, 'from tensorflow_transform.saved import saved_transform_io\n'), (121, 'tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec', 'schema_utils.schema_as_feature_spec', (['self.transformed_metadata.schema'], {}), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), (172, 'tensorflow_transform.analyzers.sanitized_vocab_filename', 'sanitized_vocab_filename', ([], {'filename': 'vocab_filename'}), False, 'from tensorflow_transform.analyzers import sanitized_vocab_filename\n'), (173, 'tensorflow.io.gfile.glob', 'tf.io.gfile.glob', (['prefix'], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow_transform.annotations_pb2.VocabularyMetadata', 'annotations_pb2.VocabularyMetadata', ([], {}), False, 'from tensorflow_transform import annotations_pb2\n'), (333, 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (353, 'tensorflow_transform.saved.saved_transform_io.exported_as_v1', 'saved_transform_io.exported_as_v1', (['self.transform_savedmodel_dir'], {}), False, 'from tensorflow_transform.saved import saved_transform_io\n'), (357, 'tensorflow_transform.saved.saved_transform_io.partially_apply_saved_transform_internal', 'saved_transform_io.partially_apply_saved_transform_internal', (['self.transform_savedmodel_dir', '{}'], {}), False, 'from tensorflow_transform.saved import saved_transform_io\n'), (393, 'tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec', 'schema_utils.schema_as_feature_spec', (['self.raw_metadata.schema'], {}), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), (403, 'tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec', 'schema_utils.schema_as_feature_spec', (['self.raw_metadata.schema'], {}), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), (448, 'tensorflow.python.tf2.enabled', 'tf2.enabled', ([], {}), False, 'from tensorflow.python import tf2\n'), (455, 'tensorflow.compat.v1.logging.warning', 'tf.compat.v1.logging.warning', (['"""Tensorflow version (%s) found. TransformFeaturesLayer may not work as intended if the SavedModel contains an initialization op."""', 'tf.version.VERSION'], {}), True, 'import tensorflow as tf\n'), (472, 'tensorflow_transform.saved.saved_transform_io.exported_as_v1', 'saved_transform_io.exported_as_v1', (['tft_output.transform_savedmodel_dir'], {}), False, 'from tensorflow_transform.saved import saved_transform_io\n'), (502, 'tensorflow_transform.saved.saved_transform_io_v2.SavedModelLoader', 'saved_transform_io_v2.SavedModelLoader', (['self._tft_output.transform_savedmodel_dir'], {}), False, 'from tensorflow_transform.saved import saved_transform_io_v2\n'), (504, 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (513, 'tensorflow.compat.v1.logging.warning', 'tf.compat.v1.logging.warning', (['"""Loading a TF2 SavedModel but eager mode seems disabled."""'], {}), True, 'import tensorflow as tf\n'), (518, 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (538, 'tensorflow.compat.v1.logging.warning', 'tf.compat.v1.logging.warning', (['"""Falling back to transform_raw_features..."""'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec', 'schema_utils.schema_as_feature_spec', (['self.transformed_metadata.schema'], {}), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), (161, 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['mapping_path'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['vocab_path'], {'compression_type': '"""GZIP"""'}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['vocab_path'], {'compression_type': '"""GZIP"""'}), True, 'import tensorflow as tf\n'), (318, 'tensorflow.compat.v1.logging.warning', 'tf.compat.v1.logging.warning', (['"""Unused features are always dropped in the TF 2.x implementation. Ignoring value of drop_unused_features."""'], {}), True, 'import tensorflow as tf\n'), (384, 'os.path.join', 'os.path.join', (['self._transform_output_dir', 'self.RAW_METADATA_DIR'], {}), False, 'import os\n'), (521, 'tensorflow_transform.saved.saved_transform_io_v2.SavedModelLoader', 'saved_transform_io_v2.SavedModelLoader', (['self._tft_output.transform_savedmodel_dir'], {}), False, 'from tensorflow_transform.saved import saved_transform_io_v2\n'), (537, 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (164, 'os.path.join', 'os.path.join', (['self.transform_savedmodel_dir', 'tf.saved_model.ASSETS_DIRECTORY', 'mapping[vocab_filename]'], {}), False, 'import os\n'), (167, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['vocab_path'], {}), True, 'import tensorflow as tf\n'), (226, 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['vocab_path', '"""rb"""'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.constant', 'tf.constant', (['[]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['vocab_path', '"""rb"""'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.size', 'tf.size', (['elem'], {'out_type': 'tf.int64', 'name': '"""vocabulary_size"""'}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.concat', 'tf.concat', (['[state, elem]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (244, 'os.linesep.encode', 'os.linesep.encode', (['"""utf-8"""'], {}), False, 'import os\n')]
rtg0795/transform
ee1a769f0e359a8722dca7b434a3b499396a140f
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions that involve a full pass over the dataset. This module contains functions that are used in the preprocessing function, to define a full pass operation such as computing the sum, min, max or unique values of a tensor over the entire dataset. This is implemented by a reduction operation in the Beam implementation. From the user's point of view, an analyzer appears as a regular TensorFlow function, i.e. it accepts and returns tensors. However it is represented in the graph as a `Analyzer` which is not a TensorFlow op, but a placeholder for the computation that takes place outside of TensorFlow. """ import functools import os import pickle import re from typing import Any, Callable, Collection, List, Optional, Tuple, Union from absl import logging import numpy as np import pyarrow as pa import tensorflow as tf from tensorflow_transform import analyzer_nodes from tensorflow_transform import annotators from tensorflow_transform import common from tensorflow_transform import common_types from tensorflow_transform import gaussianization from tensorflow_transform import nodes from tensorflow_transform import schema_inference from tensorflow_transform import tf_utils from tfx_bsl import sketches # TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to # `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is # resolved. from tfx_bsl.types import tfx_namedtuple from typing_extensions import Literal from google.protobuf import descriptor_pb2 __all__ = [ 'count_per_key', 'covariance', 'histogram', 'max', 'mean', 'min', 'pca', 'quantiles', 'size', 'sum', 'tukey_location', 'tukey_scale', 'tukey_h_params', 'var', 'vocabulary', ] # This module defines max and min functions that override the builtins. builtin_max = max builtin_min = min DEFAULT_VOCABULARY_FILE_FORMAT: Literal['text'] = 'text' ALLOWED_VOCABULARY_FILE_FORMATS = ('text', 'tfrecord_gzip') VOCAB_FILENAME_PREFIX = 'vocab_' VOCAB_FREQUENCY_FILENAME_PREFIX = 'vocab_frequency_' # Experimentally estimated value of top_k after which the exact `tft.vocabulary` # implementation becomes more efficient than # `tft.experimental.approximate_vocabulary`. LARGE_VOCAB_TOP_K = 200_000 # Matches empty strings and strings with \n or \r (including strings with \n or # \r that contain invalid UTF-8 characters). This has to follow the re2 syntax: # https://github.com/google/re2/wiki/Syntax. _EMPTY_STRING_OR_NEWLINE_CHARS_REGEX = r'^$|\C*[\n\r]\C*' # For some input types, widen the output type of sum analyzer to avoid overflow. _SUM_OUTPUT_DTYPE_MAP = { tf.float16: tf.float32, tf.float32: tf.float32, tf.float64: tf.float64, tf.int8: tf.int64, tf.int16: tf.int64, tf.int32: tf.int64, tf.int64: tf.int64, tf.uint8: tf.uint64, tf.uint16: tf.uint64, tf.uint32: tf.uint64, tf.uint64: tf.uint64, } _FLOAT_OUTPUT_DTYPE_MAP = { tf.float16: tf.float16, tf.float32: tf.float32, tf.float64: tf.float64, tf.int8: tf.float32, tf.int16: tf.float32, tf.int32: tf.float32, tf.int64: tf.float32, tf.uint8: tf.float32, tf.uint16: tf.float32, tf.uint32: tf.float32, tf.uint64: tf.float32, } def apply_cacheable_combine_operation( combiner: analyzer_nodes.Combiner, *tensor_inputs: common_types.TensorType) -> Tuple[nodes.ValueNode, ...]: """Applies combine operation nodes over the whole dataset. Applied nodes are subject to analyzer cache optimization. Args: combiner: Combiner to be applied. *tensor_inputs: Tensors representing inputs to the combiner. Returns: A tuple of ValueNodes representing outputs of the combiner. """ input_values_node = analyzer_nodes.get_input_tensors_value_nodes( tensor_inputs) accumulate_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombineAccumulate, input_values_node, combiner=combiner) merge_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombineMerge, *accumulate_outputs_value_nodes, combiner=combiner) return nodes.apply_multi_output_operation( analyzer_nodes.ExtractCombineMergeOutputs, *merge_outputs_value_nodes, output_tensor_info_list=combiner.output_tensor_infos()) def _apply_cacheable_combiner( combiner: analyzer_nodes.Combiner, *tensor_inputs: common_types.TensorType) -> Tuple[tf.Tensor, ...]: """Applies the combiner over the whole dataset possibly utilizing cache. Similar to above but returns a tuple of output tensors. Args: combiner: Combiner to be applied. *tensor_inputs: Tensors representing inputs to the combiner. Returns: A tuple of tensors representing outputs of the combiner. """ outputs_value_nodes = apply_cacheable_combine_operation( combiner, *tensor_inputs) return tuple(map(analyzer_nodes.wrap_as_tensor, outputs_value_nodes)) # pytype: disable=bad-return-type def _apply_cacheable_combiner_per_key( combiner: analyzer_nodes.Combiner, *tensor_inputs: common_types.TensorType) -> Tuple[tf.Tensor, ...]: """Similar to _apply_cacheable_combiner but this is computed per key.""" input_values_node = analyzer_nodes.get_input_tensors_value_nodes( tensor_inputs) accumulate_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyAccumulate, input_values_node, combiner=combiner) merge_output_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyMerge, *accumulate_outputs_value_nodes, combiner=combiner) output_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyFormatKeys, merge_output_value_node, combiner=combiner) return tuple(map(analyzer_nodes.wrap_as_tensor, output_value_nodes)) def _apply_cacheable_combiner_per_key_large( combiner: analyzer_nodes.Combiner, key_vocabulary_filename: str, *tensor_inputs: common_types.TensorType ) -> Union[tf.Tensor, common_types.Asset]: """Similar to above but saves the combined result to a file.""" input_values_node = analyzer_nodes.get_input_tensors_value_nodes( tensor_inputs) accumulate_outputs_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyAccumulate, input_values_node, combiner=combiner) merge_output_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyMerge, accumulate_outputs_value_node, combiner=combiner) keys_and_values_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyFormatLarge, merge_output_value_node) # `store_frequency` is True by default because we want to write some values # alongside the key "vocabulary". Without doing so it would be equivalent to # vanilla vocabulary analzyer. `fingerprint_shuffle` is not as important but # signifies that the values are not required to be ordered here. key_vocabulary_filename_node = nodes.apply_operation( analyzer_nodes.VocabularyOrderAndWrite, keys_and_values_node, vocab_filename=key_vocabulary_filename, store_frequency=True, fingerprint_shuffle=True, # TODO(b/62379925): Use tfrecord. file_format='text') return analyzer_nodes.wrap_as_tensor(key_vocabulary_filename_node) class NumPyCombiner(analyzer_nodes.Combiner): """Combines the PCollection only on the 0th dimension using nparray. Attributes: fn: The numpy function representing the reduction to be done. default_accumulator_value: The default value each accumulator entry is initialized to. output_dtypes: The numpy dtype to cast each output to. output_shapes: List of tuples representing the shapes of the outputs or Nones if the shapes are not fully defined. """ def __init__(self, fn, default_accumulator_value, output_dtypes, output_shapes): self._fn = fn self._default_accumulator_value = default_accumulator_value self._default_sub_accumulator = np.array(default_accumulator_value) self._output_dtypes = output_dtypes if not all( isinstance(shape, (tuple, type(None))) for shape in output_shapes): raise TypeError('Expected all tuples or Nones, but got %r' % output_shapes) self._output_shapes = output_shapes if np.isnan(default_accumulator_value): # This case is needed because np.nan != np.nan. self._is_default_sub_accumulator = self._equals_to_scalar_nan else: self._is_default_sub_accumulator = self._equals_to_default_sub_accumulator def _equals_to_scalar_nan(self, array): return not array.shape and np.isnan(array) def _equals_to_default_sub_accumulator(self, array): # Note that `np.array_equal` below does at most per-element comparison of # 0-dim arrays since `_default_sub_accumulator` is a 0-dim array, and # `np.array_equal` exits early on a shape mismatch. return np.array_equal(array, self._default_sub_accumulator) def _is_default_sub_accumulator(self, array): raise NotImplementedError('Implementation should be set in __init__.') def create_accumulator(self): return [ self._create_sub_accumulator(shape) for shape in self._output_shapes ] def _create_sub_accumulator(self, shape): # Returns a default subaccumulator of the given shape if it's fully defined # and a 0-dim default array otherwise. if shape is None: return self._default_sub_accumulator else: return np.full(shape, self._default_accumulator_value) def add_input(self, accumulator, batch_values): # TODO(b/112414577): Go back to accepting only a single input. # See comment in _numeric_combine. # If the first subaccumulator is default, then the accumulator is default # and can be discarded. if self._is_default_sub_accumulator(accumulator[0]): return batch_values else: return [ self._fn((sub_accumulator, batch_value), axis=0) for sub_accumulator, batch_value in zip(accumulator, batch_values) ] def merge_accumulators(self, accumulators): # TODO(b/422923883): Operate in place on accumulators[0] or batch values # internally for vectorization benefits after AccumulateFn is in use. # If the first subaccumulator is default, then the accumulator is default # and can be discarded. non_default_accumulators = [ accumulator for accumulator in accumulators if not self._is_default_sub_accumulator(accumulator[0]) ] if non_default_accumulators: return [ # numpy's sum, min, max, etc functions operate on array-like objects, # but not arbitrary iterables. Convert the provided sub_accumulators # into a list. self._fn(list(sub_accumulators), axis=0) for sub_accumulators in zip(*non_default_accumulators) ] else: return self.create_accumulator() def extract_output(self, accumulator): # For each output, cast that output to the specified type. Note there # will be one output for each input tensor to the analyzer. return [ sub_accumulator.astype(output_dtype) for sub_accumulator, output_dtype in zip(accumulator, self._output_dtypes) ] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo(tf.as_dtype(dtype), shape, None) for dtype, shape in zip(self._output_dtypes, self._output_shapes) ] def _get_output_shape_from_input(x): if isinstance(x, tf.SparseTensor): return x.get_shape().as_list()[1:] # When reducing over batch dimensions, with known shape, the result will be # the same shape as the input, but without the batch. if x.shape.rank is not None: return x.shape.as_list()[1:] return (None,) # TODO(b/112414577): Go back to accepting only a single input. # Currently we accept multiple inputs so that we can implement min and max # with a single combiner. Once this is done, add a return pytype as well. def _numeric_combine(inputs: List[tf.Tensor], fn: Callable[[np.ndarray], np.ndarray], default_accumulator_value: Union[float, int], reduce_instance_dims: bool = True, output_dtypes: Optional[List[tf.DType]] = None, key: Optional[tf.Tensor] = None, key_vocabulary_filename: Optional[str] = None): """Apply a reduction, defined by a numpy function to multiple inputs. Args: inputs: A list of tensors, which will be independently reduced. fn: A function to reduce tensors across instances/batches, to get a single output. default_accumulator_value: The default scalar value that each accumulator entry is initialized to. Must be properly processed by the reduction function. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtypes: (Optional) A list of dtypes of the output tensors. If None, the output tensor has the same type as the input one. key: (Optional) Apply the same operation, but on a per-key basis. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. Returns: Either: (A) A list of Tensors with the same length as `inputs`, representing the input Tensors that have been reduced by `fn` across instances and batches (if key_vocabulary_filename is None). (B) A Tensor with the filename where the key-value mapping is stored (if key_vocabulary_filename is not None). """ for x in inputs: if not isinstance(x, tf.Tensor): raise TypeError('Expected a Tensor, but got %r' % x) if not np.isscalar(default_accumulator_value): raise TypeError('Expected a scalar, but got %r' % default_accumulator_value) if output_dtypes is None: output_dtypes = [x.dtype for x in inputs] if reduce_instance_dims: # If reducing over all dimensions, result is scalar. output_shapes = [() for _ in inputs] else: # Reducing over batch dimensions. output_shapes = [ (tuple(x.get_shape()) if x.get_shape().is_fully_defined() else None) for x in inputs ] combiner = NumPyCombiner(fn, default_accumulator_value, [dtype.as_numpy_dtype for dtype in output_dtypes], output_shapes) if key is None: return _apply_cacheable_combiner(combiner, *inputs) if key_vocabulary_filename is None: return _apply_cacheable_combiner_per_key(combiner, key, *inputs) return _apply_cacheable_combiner_per_key_large( combiner, _maybe_get_per_key_vocab_filename(key_vocabulary_filename), key, *inputs) @common.log_api_use(common.ANALYZER_COLLECTION) def min( # pylint: disable=redefined-builtin x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> tf.Tensor: """Computes the minimum of the values of a `Tensor` over the whole dataset. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the max is used. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a `Tensor` of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` with the same type as `x`. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'min'): return _min_and_max(x, reduce_instance_dims, name)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def max( # pylint: disable=redefined-builtin x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> tf.Tensor: """Computes the maximum of the values of a `Tensor` over the whole dataset. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the min is used. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor`. Has the same type as `x`. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'max'): return _min_and_max(x, reduce_instance_dims, name)[1] def _min_and_max(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]: """Computes the min and max of the values of a `Tensor` or `CompositeTensor`. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the min is used. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: Two `Tensor`s. Both have the same type as `x`. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'min_and_max'): output_dtype = x.dtype if (not reduce_instance_dims and isinstance(x, tf.SparseTensor) and x.dtype.is_floating): combine_fn = np.nanmax default_accumulator_value = (np.nan if x.dtype.is_floating else -output_dtype.max) elif not reduce_instance_dims and isinstance(x, tf.RaggedTensor): raise NotImplementedError( 'Elemenwise min_and_max does not support RaggedTensors.') else: combine_fn = np.max default_accumulator_value = (-np.inf if x.dtype.is_floating else -output_dtype.max) x_batch_minus_min, x_batch_max = tf_utils.reduce_batch_minus_min_and_max( x, reduce_instance_dims) minus_x_min, x_max = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking inputs=[x_batch_minus_min, x_batch_max], fn=combine_fn, default_accumulator_value=default_accumulator_value, reduce_instance_dims=reduce_instance_dims) return tf.cast(0 - minus_x_min, output_dtype), tf.cast(x_max, output_dtype) def _min_and_max_per_key( x: common_types.TensorType, key: common_types.TensorType, reduce_instance_dims: bool = True, key_vocabulary_filename: Optional[str] = None, name: Optional[str] = None ) -> Union[Tuple[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor]: """Computes the min and max of the values of a `Tensor` or `CompositeTensor`. In the case of a `CompositeTensor` missing values will be used in return value: for float, NaN is used and for other dtypes the min is used. This function operates under the assumption that the size of the key set is small enough to fit in memory. Anything above a certain size larger is not guaranteed to be handled properly, but support for larger key sets may be available in a future version. Args: x: A `Tensor` or `CompositeTensor`. key: A Tensor or `CompositeTensor` of dtype tf.string. If `x` is a `CompositeTensor`, `key` must exactly match `x` in everything except values. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. The False case is not currently supported for _min_and_max_per_key. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. name: (Optional) A name for this operation. Returns: Either: (A) Three `Tensor`s. The first is the key vocab of type tf.string, and the second two have same type as `x` (if key_vocabulary_filename is None). (B) The filename where the key-value mapping is stored (if key_vocabulary_filename is not None). Raises: TypeError: If the type of `x` is not supported. """ if key is None: raise ValueError('A key is required for _min_and_max_per_key') if not reduce_instance_dims: raise NotImplementedError('Per-key elementwise reduction not supported') with tf.compat.v1.name_scope(name, 'min_and_max_per_key'): output_dtype = x.dtype if (not reduce_instance_dims and isinstance(x, (tf.SparseTensor, tf.RaggedTensor)) and x.dtype.is_floating): combine_fn = np.nanmax default_accumulator_value = (np.nan if x.dtype.is_floating else -output_dtype.max) else: combine_fn = np.max default_accumulator_value = (-np.inf if x.dtype.is_floating else -output_dtype.max) key_vocab, x_batch_minus_min, x_batch_max = ( tf_utils.reduce_batch_minus_min_and_max_per_key(x, key)) key_values = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking inputs=[x_batch_minus_min, x_batch_max], fn=combine_fn, default_accumulator_value=default_accumulator_value, reduce_instance_dims=reduce_instance_dims, key=key_vocab, key_vocabulary_filename=key_vocabulary_filename) if key_vocabulary_filename is not None: return key_values key, minus_x_min, x_max = key_values return ( key, tf.cast(0 - minus_x_min, output_dtype), tf.cast(x_max, output_dtype)) def _sum_combine_fn_and_dtype( input_dtype: tf.DType ) -> Tuple[tf.DType, Callable[[np.ndarray], np.ndarray]]: output_dtype = _SUM_OUTPUT_DTYPE_MAP.get(input_dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % input_dtype) return output_dtype, functools.partial( np.sum, dtype=output_dtype.as_numpy_dtype) @common.log_api_use(common.ANALYZER_COLLECTION) def sum( # pylint: disable=redefined-builtin x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> tf.Tensor: """Computes the sum of the values of a `Tensor` over the whole dataset. Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}),integral (int{8|16|32|64}), or unsigned integral (uint{8|16}) reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` containing the sum. If `x` is float32 or float64, the sum will have the same type as `x`. If `x` is float16, the output is cast to float32. If `x` is integral, the output is cast to [u]int64. If `x` is sparse and reduce_inst_dims is False will return 0 in place where column has no values across batches. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'sum'): if reduce_instance_dims: x = tf.reduce_sum(input_tensor=tf_utils.get_values(x)) elif isinstance(x, tf.SparseTensor): if x.dtype == tf.uint8 or x.dtype == tf.uint16: x = tf.cast(x, tf.int64) elif x.dtype == tf.uint32 or x.dtype == tf.uint64: TypeError('Data type %r is not supported' % x.dtype) x = tf.sparse.reduce_sum(x, axis=0) elif isinstance(x, tf.RaggedTensor): raise NotImplementedError( 'Elementwise sum does not support RaggedTensors.') else: x = tf.reduce_sum(input_tensor=x, axis=0) output_dtype, sum_fn = _sum_combine_fn_and_dtype(x.dtype) return _numeric_combine( inputs=[x], fn=sum_fn, default_accumulator_value=0, reduce_instance_dims=reduce_instance_dims, output_dtypes=[output_dtype])[0] def remove_leftmost_boundary(boundaries: tf.Tensor) -> tf.Tensor: """Removes the leftmost boundary from [1, None]-shaped `Tensor` of buckets.""" return boundaries[:, 1:] @common.log_api_use(common.ANALYZER_COLLECTION) def histogram(x: common_types.TensorType, boundaries: Optional[Union[tf.Tensor, int]] = None, categorical: Optional[bool] = False, name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]: """Computes a histogram over x, given the bin boundaries or bin count. Ex (1): counts, boundaries = histogram([0, 1, 0, 1, 0, 3, 0, 1], range(5)) counts: [4, 3, 0, 1, 0] boundaries: [0, 1, 2, 3, 4] Ex (2): Can be used to compute class weights. counts, classes = histogram([0, 1, 0, 1, 0, 3, 0, 1], categorical=True) probabilities = counts / tf.reduce_sum(counts) class_weights = dict(map(lambda (a, b): (a.numpy(), 1.0 / b.numpy()), zip(classes, probabilities))) Args: x: A `Tensor` or `CompositeTensor`. boundaries: (Optional) A `Tensor` or `int` used to build the histogram; ignored if `categorical` is True. If possible, provide boundaries as multiple sorted values. Default to 10 intervals over the 0-1 range, or find the min/max if an int is provided (not recommended because multi-phase analysis is inefficient). categorical: (Optional) A `bool` that treats `x` as discrete values if true. name: (Optional) A name for this operation. Returns: counts: The histogram, as counts per bin. boundaries: A `Tensor` used to build the histogram representing boundaries. """ with tf.compat.v1.name_scope(name, 'histogram'): x = tf.reshape(tf_utils.get_values(x), [-1]) if categorical: x_dtype = x.dtype x = x if x_dtype == tf.string else tf.strings.as_string(x) elements, counts = count_per_key(x) if x_dtype != elements.dtype: elements = tf.strings.to_number(elements, tf.int64) return counts, elements if boundaries is None: boundaries = tf.range(11, dtype=tf.float32) / 10.0 elif isinstance(boundaries, int) or (isinstance(boundaries, tf.Tensor) and boundaries.get_shape().ndims == 0): min_value, max_value = _min_and_max(x, True) boundaries = tf.linspace( tf.cast(min_value, tf.float32), tf.cast(max_value, tf.float32), tf.cast(boundaries, tf.int64)) # Shift the boundaries slightly to account for floating point errors, # and due to the fact that the rightmost boundary is essentially ignored. boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001 bucket_indices = tf_utils.assign_buckets( tf.cast(x, tf.float32), remove_leftmost_boundary(boundaries)) bucket_vocab, counts = count_per_key(tf.strings.as_string(bucket_indices)) counts = tf_utils.reorder_histogram(bucket_vocab, counts, tf.size(boundaries) - 1) return counts, boundaries @common.log_api_use(common.ANALYZER_COLLECTION) def size(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None) -> tf.Tensor: """Computes the total size of instances in a `Tensor` over the whole dataset. Args: x: A `Tensor` or `CompositeTensor`. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: A `Tensor` of type int64. """ with tf.compat.v1.name_scope(name, 'size'): # Note: Calling `sum` defined in this module, not the builtin. if isinstance(x, tf.SparseTensor): ones_like_x = tf.SparseTensor( indices=x.indices, values=tf.ones_like(x.values, tf.int64), dense_shape=x.dense_shape) else: ones_like_x = tf.ones_like(x, dtype=tf.int64) return sum(ones_like_x, reduce_instance_dims) @common.log_api_use(common.ANALYZER_COLLECTION) def count_per_key(key: common_types.TensorType, key_vocabulary_filename: Optional[str] = None, name: Optional[str] = None): """Computes the count of each element of a `Tensor`. Args: key: A Tensor or `CompositeTensor` of dtype tf.string or tf.int. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. name: (Optional) A name for this operation. Returns: Either: (A) Two `Tensor`s: one the key vocab with dtype of input; the other the count for each key, dtype tf.int64. (if key_vocabulary_filename is None). (B) The filename where the key-value mapping is stored (if key_vocabulary_filename is not None). Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'count_per_key'): key_dtype = key.dtype batch_keys, batch_counts = tf_utils.reduce_batch_count_per_key(key) output_dtype, sum_fn = _sum_combine_fn_and_dtype(tf.int64) numeric_combine_result = _numeric_combine( inputs=[batch_counts], fn=sum_fn, default_accumulator_value=0, reduce_instance_dims=True, output_dtypes=[output_dtype], key=batch_keys, key_vocabulary_filename=key_vocabulary_filename) if key_vocabulary_filename is not None: return numeric_combine_result keys, counts = numeric_combine_result if key_dtype is not tf.string: keys = tf.strings.to_number(keys, key_dtype) return keys, counts @common.log_api_use(common.ANALYZER_COLLECTION) def mean(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None, output_dtype: Optional[tf.DType] = None) -> tf.Tensor: """Computes the mean of the values of a `Tensor` over the whole dataset. Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. output_dtype: (Optional) If not None, casts the output tensor to this type. Returns: A `Tensor` containing the mean. If `x` is floating point, the mean will have the same type as `x`. If `x` is integral, the output is cast to float32. NaNs and infinite input values are ignored. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'mean'): return _mean_and_var(x, reduce_instance_dims, output_dtype)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def var(x: common_types.TensorType, reduce_instance_dims: bool = True, name: Optional[str] = None, output_dtype: Optional[tf.DType] = None) -> tf.Tensor: """Computes the variance of the values of a `Tensor` over the whole dataset. Uses the biased variance (0 delta degrees of freedom), as given by (x - mean(x))**2 / length(x). Args: x: `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. output_dtype: (Optional) If not None, casts the output tensor to this type. Returns: A `Tensor` containing the variance. If `x` is floating point, the variance will have the same type as `x`. If `x` is integral, the output is cast to float32. NaNs and infinite input values are ignored. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'var'): return _mean_and_var(x, reduce_instance_dims, output_dtype)[1] def _mean_and_var(x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[tf.DType] = None): """More efficient combined `mean` and `var`. See `var`.""" if output_dtype is None: output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) if not reduce_instance_dims and isinstance(x, tf.RaggedTensor): raise NotImplementedError( 'Elementwise mean_and_var does not support RaggedTensors.') with tf.compat.v1.name_scope('mean_and_var'): x = tf.cast(x, output_dtype) x_count, x_mean, x_variance = ( tf_utils.reduce_batch_count_mean_and_var(x, reduce_instance_dims)) combine_inputs = _WeightedMeanAndVarAccumulator( count=x_count, mean=x_mean, variance=x_variance, weight=tf.zeros([], tf.float32)) output_shape = () if not reduce_instance_dims: # We need to use tf.expand_dims to artificially add a batch dimension. output_shape = _get_output_shape_from_input( tf.expand_dims(x_count, axis=0)) x_mean, x_var = _apply_cacheable_combiner( WeightedMeanAndVarCombiner(output_dtype.as_numpy_dtype, output_shape), *combine_inputs) return x_mean, x_var @common.log_api_use(common.ANALYZER_COLLECTION) def tukey_location(x: common_types.TensorType, reduce_instance_dims: Optional[bool] = True, output_dtype: Optional[tf.DType] = None, name: Optional[str] = None) -> tf.Tensor: """Computes the location of the values of a `Tensor` over the whole dataset. This computes the location of x, assuming a Tukey HH distribution, i.e. (x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters tukey_h_params. See the following publication for the definition of the Tukey HH distribution: Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and hh-Distributions through L-Moments and the L-Correlation," ISRN Applied Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153 Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtype: (Optional) If not None, casts the output tensor to this type. name: (Optional) A name for this operation. Returns: A `Tensor` containing the location. If `x` is floating point, the location will have the same type as `x`. If `x` is integral, the output is cast to float32. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'tukey_location'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[0] @common.log_api_use(common.ANALYZER_COLLECTION) def tukey_scale(x: common_types.TensorType, reduce_instance_dims: Optional[bool] = True, output_dtype: Optional[tf.DType] = None, name: Optional[str] = None) -> tf.Tensor: """Computes the scale of the values of a `Tensor` over the whole dataset. This computes the scale of x, assuming a Tukey HH distribution, i.e. (x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters tukey_h_params. See the following publication for the definition of the Tukey HH distribution: Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and hh-Distributions through L-Moments and the L-Correlation," ISRN Applied Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153 Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtype: (Optional) If not None, casts the output tensor to this type. name: (Optional) A name for this operation. Returns: A `Tensor` containing the scale. If `x` is floating point, the location will have the same type as `x`. If `x` is integral, the output is cast to float32. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'tukey_scale'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[1] @common.log_api_use(common.ANALYZER_COLLECTION) def tukey_h_params(x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[tf.DType] = None, name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]: """Computes the h parameters of the values of a `Tensor` over the dataset. This computes the parameters (hl, hr) of the samples, assuming a Tukey HH distribution, i.e. (x - tukey_location) / tukey_scale is a Tukey HH distribution with parameters hl (left parameter) and hr (right parameter). See the following publication for the definition of the Tukey HH distribution: Todd C. Headrick, and Mohan D. Pant. "Characterizing Tukey h and hh-Distributions through L-Moments and the L-Correlation," ISRN Applied Mathematics, vol. 2012, 2012. doi:10.5402/2012/980153 Args: x: A `Tensor` or `CompositeTensor`. Its type must be floating point (float{16|32|64}), or integral ([u]int{8|16|32|64}). reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single scalar output. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. output_dtype: (Optional) If not None, casts the output tensor to this type. name: (Optional) A name for this operation. Returns: The tuple (hl, hr) containing two `Tensor` instances with the hl and hr parameters. If `x` is floating point, each parameter will have the same type as `x`. If `x` is integral, the output is cast to float32. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'tukey_h_params'): return _tukey_parameters(x, reduce_instance_dims, output_dtype)[2:] def _tukey_parameters( x: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[tf.DType] = None ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Efficient computation of L-moments.""" if output_dtype is None: output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) with tf.compat.v1.name_scope('tukey_parameters'): x = tf.cast(x, output_dtype) (count_l1, l1, count_l2, l2, count_l3, l3, count_l4, l4) = ( tf_utils.reduce_batch_count_l_moments(x, reduce_instance_dims)) combine_inputs = _LMomentsAccumulator( count_l1=count_l1, count_l2=count_l2, count_l3=count_l3, count_l4=count_l4, l1=l1, l2=l2, l3=l3, l4=l4) output_shape = () if not reduce_instance_dims: output_shape = _get_output_shape_from_input(x) x_loc, x_scale, hl_param, hr_param = _apply_cacheable_combiner( _LMomentsCombiner(output_dtype.as_numpy_dtype, output_shape), *combine_inputs) return x_loc, x_scale, hl_param, hr_param def _mean_and_var_per_key( x: common_types.TensorType, key: common_types.TensorType, reduce_instance_dims: bool = True, output_dtype: Optional[tf.DType] = None, key_vocabulary_filename: Optional[str] = None ) -> Union[Tuple[tf.Tensor, tf.Tensor, tf.Tensor], tf.Tensor, common_types.Asset]: """`mean_and_var` by group, specified by key. Args: x: A `Tensor` or `CompositeTensor`. key: A Tensor or `CompositeTensor` of dtype tf.string. If `x` is a `CompositeTensor`, `key` must exactly match `x` in everything except values. reduce_instance_dims: (Optional) By default collapses the batch and instance dimensions to arrive at a single scalar output. The False case is not currently supported for _mean_and_var_per_key. output_dtype: (Optional) Desired output dtype, otherwise inferred. key_vocabulary_filename: (Optional) The file name for the key-output mapping file. If None and key are provided, this combiner assumes the keys fit in memory and will not store the result in a file. If empty string, a file name will be chosen based on the current scope. If not an empty string, should be unique within a given preprocessing function. Returns: Either: (A) Three `Tensor`s. The first is the key vocab of type tf.string, and the second two have same type as `x` (if key_vocabulary_filename is None). (B) The filename where the key-value mapping is stored (if key_vocabulary_filename is not None). NaNs and infinite input values are ignored. """ if output_dtype is None: output_dtype = _FLOAT_OUTPUT_DTYPE_MAP.get(x.dtype) if output_dtype is None: raise TypeError('Tensor type %r is not supported' % x.dtype) if key is None: raise ValueError('A non-None key is required for _mean_and_var_per_key') if not reduce_instance_dims: raise NotImplementedError('Per-key elementwise reduction not supported') with tf.compat.v1.name_scope('mean_and_var_per_key'): x = tf.cast(x, output_dtype) key_vocab, key_counts, key_means, key_variances = ( tf_utils.reduce_batch_count_mean_and_var_per_key( x, key, reduce_instance_dims=reduce_instance_dims)) output_shape = () combine_inputs = _WeightedMeanAndVarAccumulator( count=key_counts, mean=key_means, variance=key_variances, weight=tf.zeros_like(key_means, tf.float32)) combiner = WeightedMeanAndVarCombiner(output_dtype.as_numpy_dtype, output_shape) if key_vocabulary_filename is not None: key_vocabulary_filename = _maybe_get_per_key_vocab_filename( key_vocabulary_filename) return _apply_cacheable_combiner_per_key_large( combiner, key_vocabulary_filename, key_vocab, *combine_inputs) key, key_mean, key_var = _apply_cacheable_combiner_per_key( combiner, key_vocab, *combine_inputs) return key, key_mean, key_var class _WeightedMeanAndVarAccumulator( tfx_namedtuple.namedtuple('WeightedMeanAndVarAccumulator', ['count', 'mean', 'variance', 'weight'])): """Container for WeightedMeanAndVarCombiner intermediate values.""" @classmethod def make_nan_to_num(cls, counts, means, variances, weights, compute_variance=False, compute_weighted=True): """Util function to replace NaN with 0 and inf with large finite numbers.""" if compute_variance: variances = np.nan_to_num(variances, copy=True) if compute_weighted: weights = np.nan_to_num(weights, copy=True) return cls( np.array(counts), np.nan_to_num(means, copy=True), variances, weights) class WeightedMeanAndVarCombiner(analyzer_nodes.Combiner): """Combines a PCollection of accumulators to compute mean and variance.""" accumulator_class = _WeightedMeanAndVarAccumulator def __init__(self, output_numpy_dtype, output_shape: Optional[Collection[Optional[int]]] = None, compute_variance: bool = True, compute_weighted: bool = False): """Init method for WeightedMeanAndVarCombiner. Args: output_numpy_dtype: A numpy dtype that the outputs are cast to. output_shape: The shape of the resulting Tensors. compute_variance: A bool indicating whether or not a variance should be calculated and returned. compute_weighted: A bool indicating whether or not weights are provided and all calculations should be weighted. """ self._output_numpy_dtype = output_numpy_dtype self._output_shape = output_shape self._compute_variance = compute_variance self._compute_weighted = compute_weighted if self._compute_variance and self._compute_weighted: raise ValueError( 'WeightedMeanAndVarCombiner does not yet support weighted variance') if self._output_shape is None: raise ValueError('An output_shape must be provided.') def create_accumulator(self) -> _WeightedMeanAndVarAccumulator: """Create an accumulator with all zero entries.""" # TODO(b/131325061): Determine whether counts/weights should always be # scalars or if we want to continue supporting multi-dimensional arrays. initial_count, initial_weight = np.array(0), np.array(0.) # If we know the exact shape, initialize accumulator values with zeros of # the exact shape. For unknown dimensions, initialize with a 1D 0 array. output_shape = [dim if dim is not None else 0 for dim in self._output_shape] initial_mean, initial_var = np.zeros(output_shape), np.zeros(output_shape) return _WeightedMeanAndVarAccumulator(initial_count, initial_mean, initial_var, initial_weight) def add_input( self, accumulator: _WeightedMeanAndVarAccumulator, batch_values: _WeightedMeanAndVarAccumulator ) -> _WeightedMeanAndVarAccumulator: """Composes an accumulator from batch_values and calls merge_accumulators. Args: accumulator: The `_WeightedMeanAndVarAccumulator` computed so far. batch_values: A `_WeightedMeanAndVarAccumulator` for the current batch. Returns: A `_WeightedMeanAndVarAccumulator` which is accumulator and batch_values combined. """ new_accumulator = _WeightedMeanAndVarAccumulator(*batch_values) return self._combine_mean_and_var_accumulators(accumulator, new_accumulator) def merge_accumulators( self, accumulators: List[_WeightedMeanAndVarAccumulator] ) -> _WeightedMeanAndVarAccumulator: """Merges several `_WeightedMeanAndVarAccumulator`s to a single accumulator. Args: accumulators: A list of `_WeightedMeanAndVarAccumulator`s. Returns: The sole merged `_WeightedMeanAndVarAccumulator`. """ accumulators = iter(accumulators) result = next(accumulators) for accumulator in accumulators: result = self._combine_mean_and_var_accumulators(result, accumulator) return result def extract_output( self, accumulator: _WeightedMeanAndVarAccumulator ) -> Union[Tuple[float, float], _WeightedMeanAndVarAccumulator]: """Converts an accumulator into the output accumulator or (mean, var) tuple. Args: accumulator: the final `_WeightedMeanAndVarAccumulator` value. Returns: A _WeightedMeanAndVarAccumulator or a 2-tuple composed of (mean, var). """ if self._compute_variance and not self._compute_weighted: return (self._output_numpy_dtype(accumulator.mean), self._output_numpy_dtype(accumulator.variance)) else: return _WeightedMeanAndVarAccumulator( np.int64(accumulator.count), self._output_numpy_dtype(accumulator.mean), self._output_numpy_dtype(accumulator.variance), self._output_numpy_dtype(accumulator.weight)) def output_tensor_infos(self) -> List[analyzer_nodes.TensorInfo]: # The output is (mean, var). if self._compute_variance and not self._compute_weighted: return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] * 2 else: return [ analyzer_nodes.TensorInfo( tf.as_dtype(np.int64), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None), analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] def _combine_mean_and_var_accumulators( self, a: _WeightedMeanAndVarAccumulator, b: _WeightedMeanAndVarAccumulator) -> _WeightedMeanAndVarAccumulator: """Combines two mean and var accumulators. Args: a: A _WeightedMeanAndVarAccumulator. b: A _WeightedMeanAndVarAccumulator. Returns: A _WeightedMeanAndVarAccumulator computed as the combination of a and b. """ # NaNs get preserved through division by a.count + b.count. a = _WeightedMeanAndVarAccumulator.make_nan_to_num( *a, compute_variance=self._compute_variance, compute_weighted=self._compute_weighted) b = _WeightedMeanAndVarAccumulator.make_nan_to_num( *b, compute_variance=self._compute_variance, compute_weighted=self._compute_weighted) # a.count >= b.count following this logic. if np.sum(a.count) < np.sum(b.count): a, b = b, a if np.sum(a.count) == 0: return b a_count, b_count = _pad_arrays_to_match(a.count, b.count) a_mean, b_mean = _pad_arrays_to_match(a.mean, b.mean) if self._compute_variance: a_variance, b_variance = _pad_arrays_to_match(a.variance, b.variance) if self._compute_weighted: a_weight, b_weight = _pad_arrays_to_match(a.weight, b.weight) combined_total = a_count + b_count # Mean and variance update formulas which are more numerically stable when # a and b vary in magnitude. if self._compute_weighted: combined_weights_mean = ( a_weight + (b_count / combined_total) * (b_weight - a_weight)) combined_mean = a_mean + (b_count * b_weight / (combined_total * combined_weights_mean)) * ( b_mean - a_mean) else: combined_weights_mean = np.ones(shape=combined_total.shape) combined_mean = a_mean + (b_count / combined_total * (b_mean - a_mean)) if self._compute_variance: # TODO(zoyahav): Add an option for weighted variance if needed. assert not self._compute_weighted combined_variance = ( a_variance + (b_count / combined_total) * (b_variance - a_variance + ((b_mean - combined_mean) * (b_mean - a_mean)))) else: combined_variance = np.zeros(combined_mean.shape) return _WeightedMeanAndVarAccumulator(combined_total, combined_mean, combined_variance, combined_weights_mean) # TODO(b/165020671): Optimize padding to save up to 15% computing resource. def _pad_arrays_to_match(a, b): """Pad the ndarray values to match dimensions as needed. If the dimensions of the ndarrays values differ, we pad the smaller of the two arrays with zeros to be the same shape as the larger. In other words, the missing accumulator indices are assumed to be zero, and combining a = [1, 2, 3] with b = [1, 2] is equivalent t combining with b = [1, 2, 0]. Args: a: NDarray to be matched in shaped with b b: NDarray to be matched in shaped with a Returns: a: a padded to same dimensions as b b: b padded to same dimensions as a """ if a.shape == b.shape: return a, b padding_a, padding_b = [], [] for a_dim, b_dim in zip(a.shape, b.shape): a_pad = b_pad = (0, 0) delta = a_dim - b_dim if delta > 0: b_pad = (0, abs(delta)) elif delta < 0: a_pad = (0, abs(delta)) padding_a.append(a_pad) padding_b.append(b_pad) if padding_a: a = np.pad(a, padding_a, mode='constant') if padding_b: b = np.pad(b, padding_b, mode='constant') return a, b class _LMomentsAccumulator( tfx_namedtuple.namedtuple('LMomentsAccumulator', [ 'count_l1', 'count_l2', 'count_l3', 'count_l4', 'l1', 'l2', 'l3', 'l4' ])): """Container for _LMomentsCombiner intermediate values.""" @classmethod def make_nan_to_num(cls, count_l1, count_l2, count_l3, count_l4, l1, l2, l3, l4): return cls( np.array(count_l1), np.array(count_l2), np.array(count_l3), np.array(count_l4), np.nan_to_num(l1), np.nan_to_num(l2), np.nan_to_num(l3), np.nan_to_num(l4)) def __reduce__(self): return self.__class__, tuple(self) class _LMomentsCombiner(analyzer_nodes.Combiner): """Combines a PCollection of accumulators to compute L-moments.""" accumulator_class = _LMomentsAccumulator def __init__(self, output_numpy_dtype, output_shape): """Init method for _LMomentsCombiner. Args: output_numpy_dtype: A numpy dtype that the outputs are cast to. output_shape: The shape of the resulting Tensors. """ self._output_numpy_dtype = output_numpy_dtype self._output_shape = output_shape def create_accumulator(self): """Create an accumulator with all zero entries.""" # If we know the exact shape, initialize accumulator values with zeros of # the exact shape. For unknown dimensions, initialize with a 1D 0 array # (this accumulator will be discarded by _combine_accumulators). output_shape = () if None in self._output_shape else self._output_shape initial_moment = np.zeros(output_shape, dtype=self._output_numpy_dtype) initial_count = np.zeros(output_shape, dtype=self._output_numpy_dtype) return _LMomentsAccumulator( initial_count, initial_count, initial_count, initial_count, initial_moment, initial_moment, initial_moment, initial_moment) def add_input(self, accumulator, batch_values): """Composes an accumulator from batch_values and calls merge_accumulators. Args: accumulator: The `_LMomentsAccumulator` computed so far. batch_values: A `_LMomentsAccumulator` for the current batch. Returns: A `_LMomentsAccumulator` which is accumulator and batch_values combined. """ new_accumulator = _LMomentsAccumulator(*batch_values) return self._combine_accumulators(accumulator, new_accumulator) def merge_accumulators(self, accumulators): """Merges several `_LMomentsAccumulator`s to a single accumulator. Args: accumulators: A list of `_LMomentsAccumulator`s. Returns: The sole merged `_LMomentsAccumulator`. """ accumulators = iter(accumulators) result = next(accumulators) for accumulator in accumulators: result = self._combine_accumulators(result, accumulator) return result def extract_output(self, accumulator): """Converts an accumulator into the output (loc, scale, hl, hr) tuple. Estimates the parameters of a Tukey HH distribution, given estimates of the first four L-moments. The parameters are: location, scale, hl, and hr. If x is the input sample, then (x - location) / scale is distributed according to the Tukey HH distribution with parameters hl (left parameter) and hr (right parameter). Args: accumulator: the final `_LMomentsAccumulator` value. Returns: A 4-tuple composed of (location, scale, hl, hr). """ # To compute kurtosis, we need positive scale and at least one quadruplet. # If this is not the case, L-kewness and L-kurtosis are set to zero, which # gives hl=0, hr=0 and samples are treated as in the Gaussian case. valid_scale = accumulator.l2 > 0.0 valid_kurtosis = np.logical_and(valid_scale, accumulator.count_l4 > 0.0) l_skewness = np.true_divide(accumulator.l3, accumulator.l2, where=valid_kurtosis, out=np.zeros_like(accumulator.l3)) l_kurtosis = np.true_divide(accumulator.l4, accumulator.l2, where=valid_kurtosis, out=np.zeros_like(accumulator.l4)) l_skewness_and_kurtosis = np.stack((l_skewness, l_kurtosis), axis=0) h_params = np.apply_along_axis( gaussianization.compute_tukey_hh_params, 0, l_skewness_and_kurtosis) hh_l_mean, hh_l_scale = gaussianization.tukey_hh_l_mean_and_scale(h_params) scale = np.true_divide(accumulator.l2, hh_l_scale, where=valid_scale, out=np.ones_like(accumulator.l2)) loc = accumulator.l1 - scale * hh_l_mean hl = h_params[0, ...] hr = h_params[1, ...] return [self._output_numpy_dtype(x) for x in [loc, scale, hl, hr]] def output_tensor_infos(self): # The output is (loc, scale, hl, hr). return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._output_numpy_dtype), self._output_shape, None) ] * 4 @property def accumulator_coder(self): # TODO(b/170510451): Re-enable caching for this Combiner. return None def _combine_accumulators(self, a, b): """Combines two accumulators. Args: a: A _LMomentsAccumulator. b: A _LMomentsAccumulator. Returns: A _LMomentsAccumulator computed as the combination of a and b. """ # NaNs get preserved through division by a.count + b.count. a = _LMomentsAccumulator.make_nan_to_num(*a) b = _LMomentsAccumulator.make_nan_to_num(*b) # If one accumulator is empty return the other. if np.sum(a.count_l1) < np.sum(b.count_l1): a, b = b, a if np.sum(b.count_l1) == 0: return a a_count_l1, b_count_l1 = _pad_arrays_to_match(a.count_l1, b.count_l1) a_l1, b_l1 = _pad_arrays_to_match(a.l1, b.l1) a_count_l2, b_count_l2 = _pad_arrays_to_match(a.count_l2, b.count_l2) a_l2, b_l2 = _pad_arrays_to_match(a.l2, b.l2) a_count_l3, b_count_l3 = _pad_arrays_to_match(a.count_l3, b.count_l3) a_l3, b_l3 = _pad_arrays_to_match(a.l3, b.l3) a_count_l4, b_count_l4 = _pad_arrays_to_match(a.count_l4, b.count_l4) a_l4, b_l4 = _pad_arrays_to_match(a.l4, b.l4) combined_count_l1 = a_count_l1 + b_count_l1 combined_count_l2 = a_count_l2 + b_count_l2 combined_count_l3 = a_count_l3 + b_count_l3 combined_count_l4 = a_count_l4 + b_count_l4 combined_l1 = (a_l1 + np.true_divide( b_count_l1, combined_count_l1, where=combined_count_l1 > 0, out=np.zeros_like(a_l1)) * (b_l1 - a_l1)) combined_l2 = (a_l2 + np.true_divide( b_count_l2, combined_count_l2, where=combined_count_l2 > 0, out=np.zeros_like(a_l2)) * (b_l2 - a_l2)) combined_l3 = (a_l3 + np.true_divide( b_count_l3, combined_count_l3, where=combined_count_l3 > 0, out=np.zeros_like(a_l3)) * (b_l3 - a_l3)) combined_l4 = (a_l4 + np.true_divide( b_count_l4, combined_count_l4, where=combined_count_l4 > 0, out=np.zeros_like(a_l4)) * (b_l4 - a_l4)) return _LMomentsAccumulator( combined_count_l1, combined_count_l2, combined_count_l3, combined_count_l4, combined_l1, combined_l2, combined_l3, combined_l4) def sanitized_vocab_filename(filename=None, prefix=None): """Generates a sanitized filename either from the given filename or the scope. If filename is specified, provide a sanitized version of the given filename. Otherwise generate a filename from the current scope. Note that it is the callers responsibility to ensure that filenames are unique across calls within a given preprocessing function. Args: filename: A filename with non-alpha characters replaced with underscores and spaces to hyphens. prefix: Prefix to use for the name of the vocab file, if filename is not given. Returns: A valid filename. Raises: ValueError: If neither filename and prefix are specified, or if both are specified. """ if filename is None and prefix is None: raise ValueError('Both filename and prefix cannot be None.') if filename is not None and prefix is not None: raise ValueError('Only one of filename or prefix can be specified.') if filename is None: filename = prefix + tf.compat.v1.get_default_graph().get_name_scope() # Replace non-alpha characters (excluding whitespaces) with '_'. filename = re.sub(r'[^\w\s-]', '_', filename).strip() # Replace whitespaces with '-'. return re.sub(r'[-\s]+', '-', filename) def _get_vocab_filename(vocab_filename, store_frequency): """Returns a sanitized vocabulary filename with appropriate prefix applied. Args: vocab_filename: The file name for the vocabulary file. If none, the "vocabulary" scope name in the context of this graph will be used as the file name. store_frequency: A bool that is true when the vocabulary for which this generates a filename stores term frequency. False otherwise. Returns: A valid filename. """ if vocab_filename is not None: prefix = None elif store_frequency: prefix = VOCAB_FREQUENCY_FILENAME_PREFIX else: prefix = VOCAB_FILENAME_PREFIX # Make the file name path safe. return sanitized_vocab_filename(vocab_filename, prefix=prefix) def _maybe_get_per_key_vocab_filename(key_vocabulary_filename): if key_vocabulary_filename == '': # pylint: disable=g-explicit-bool-comparison key_vocabulary_filename = _get_vocab_filename(vocab_filename=None, store_frequency=False) return key_vocabulary_filename # TODO(b/116308354): frequency_threshold is misleading since this threshold can # be applied to mutual information rather than frequency. def _get_top_k_and_frequency_threshold(top_k, frequency_threshold): """Validate `top_k` and `frequency_threshold` values and convert to number.""" if top_k is not None: top_k = int(top_k) if top_k <= 0: raise ValueError('top_k must be positive, but got: %r' % top_k) if frequency_threshold is not None: frequency_threshold = float(frequency_threshold) if frequency_threshold < 0: raise ValueError( 'frequency_threshold must be non-negative, but got: %r' % frequency_threshold) elif frequency_threshold <= 1: # Note: this warning is misleading in the context where tokens are ranked # based on mutual information rather than frequency. tf.compat.v1.logging.warn( 'frequency_threshold %d <= 1 is a no-op, use None instead.', frequency_threshold) return top_k, frequency_threshold class _VocabOrderingType: """Class for all vocab ordering types.""" # Orders vocabulary based on the simple frequency of the token FREQUENCY = 1 # Orders vocabulary based on the weighted frequency of the token WEIGHTED_FREQUENCY = 2 # Orders vocabulary based on the weighted mutual # information of token with the label WEIGHTED_MUTUAL_INFORMATION = 3 # Experimental WEIGHTED_LABELS = 4 # Orders vocabulary based on the mutual information # of token with the label and without weight. MUTUAL_INFORMATION = 5 def register_vocab(sanitized_filename: str, vocabulary_size: Optional[tf.Tensor] = None, vocabulary_key: Optional[str] = None, file_format: common_types .VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT): """Registers the specificed vocabulary within the asset map. Args: sanitized_filename: The santized filename of the vocabulary. vocabulary_size: The size of the vocabulary. vocabulary_key: The key of the vocabulary to use. file_format: The format of the vocabulary file (text or tfrecord_gzip). """ if vocabulary_key is None: vocabulary_key = sanitized_filename filename = ('{}.tfrecord.gz'.format(sanitized_filename) if file_format == 'tfrecord_gzip' else sanitized_filename) annotators.annotate_asset(vocabulary_key, filename) if vocabulary_size is not None: annotators.annotate_vocab_size(vocabulary_key, vocabulary_size) def get_empy_vocabulary_dummy_value( dtype: Union[tf.dtypes.DType, str]) -> Tuple[int, bytes]: """Returns a vocabulary entry to use in case of an empty vocabulary.""" # TODO(b/62272023) remove this workaround if/when fixed on tensorflow. # If the vocabulary is empty add a dummy value with count one so # the tensorflow index operations don't fail to initialize with empty # tensors downstream. dummy_value = (b'49d0cd50-04bb-48c0-bc6f-5b575dce351a' if tf.dtypes.as_dtype(dtype) == tf.string else b'-1') return (1, dummy_value) # TODO(KesterTong): Once multiple outputs are supported, return indices too. # TODO(b/117796748): Add coverage key feature input as alternative to `key_fn`. # TODO(tensorflow/community) the experimental fingerprint_shuffle argument is a # workaround for the inability to appropriately rebalance sharded variables on # TF 1.0. The following TF 2.0 proposal should address this issue in the future # https://github.com/tensorflow/community/blob/master/rfcs/20190116-embedding-partitioned-variable.md#goals @common.log_api_use(common.ANALYZER_COLLECTION) def vocabulary( x: common_types.TensorType, top_k: Optional[int] = None, frequency_threshold: Optional[int] = None, vocab_filename: Optional[str] = None, store_frequency: Optional[bool] = False, weights: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, use_adjusted_mutual_info: bool = False, min_diff_from_avg: Optional[int] = None, coverage_top_k: Optional[int] = None, coverage_frequency_threshold: Optional[int] = None, key_fn: Optional[Callable[[Any], Any]] = None, fingerprint_shuffle: Optional[bool] = False, file_format: common_types .VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT, name: Optional[str] = None) -> common_types.TemporaryAnalyzerOutputType: r"""Computes the unique values of a `Tensor` over the whole dataset. Computes The unique values taken by `x`, which can be a `Tensor` or `CompositeTensor` of any size. The unique values will be aggregated over all dimensions of `x` and all instances. In case `file_format` is 'text' and one of the tokens contains the '\n' or '\r' characters or is empty it will be discarded. If an integer `Tensor` is provided, its semantic type should be categorical not a continuous/numeric, since computing a vocabulary over a continuous feature is not appropriate. The unique values are sorted by decreasing frequency and then reverse lexicographical order (e.g. [('a', 5), ('c', 3), ('b', 3)]). This is true even if `x` is numerical dtype (e.g. [('3', 5), ('2', 3), ('111', 3)]). For large datasets it is highly recommended to either set frequency_threshold or top_k to control the size of the output, and also the run time of this operation. When labels are provided, we filter the vocabulary based on the relationship between the token's presence in a record and the label for that record, using (possibly adjusted) Mutual Information. Note: If labels are provided, the x input must be a unique set of per record, as the semantics of the mutual information calculation depend on a multi-hot representation of the input. Having unique input tokens per row is advisable but not required for a frequency-based vocabulary. WARNING: The following is experimental and is still being actively worked on. Supply `key_fn` if you would like to generate a vocabulary with coverage over specific keys. A "coverage vocabulary" is the union of two vocabulary "arms". The "standard arm" of the vocabulary is equivalent to the one generated by the same function call with no coverage arguments. Adding coverage only appends additional entries to the end of the standard vocabulary. The "coverage arm" of the vocabulary is determined by taking the `coverage_top_k` most frequent unique terms per key. A term's key is obtained by applying `key_fn` to the term. Use `coverage_frequency_threshold` to lower bound the frequency of entries in the coverage arm of the vocabulary. Note this is currently implemented for the case where the key is contained within each vocabulary entry (b/117796748). Args: x: A categorical/discrete input `Tensor` or `CompositeTensor` with dtype tf.string or tf.int[8|16|32|64]. The inputs should generally be unique per row (i.e. a bag of words/ngrams representation). top_k: Limit the generated vocabulary to the first `top_k` elements. If set to None, the full vocabulary is generated. frequency_threshold: Limit the generated vocabulary only to elements whose absolute frequency is >= to the supplied threshold. If set to None, the full vocabulary is generated. Absolute frequency means the number of occurrences of the element in the dataset, as opposed to the proportion of instances that contain that element. vocab_filename: The file name for the vocabulary file. If None, a file name will be chosen based on the current scope. If not None, should be unique within a given preprocessing function. NOTE To make your pipelines resilient to implementation details please set `vocab_filename` when you are using the vocab_filename on a downstream component. store_frequency: If True, frequency of the words is stored in the vocabulary file. In the case labels are provided, the mutual information is stored in the file instead. Each line in the file will be of the form 'frequency word'. NOTE: if this is True then the computed vocabulary cannot be used with `tft.apply_vocabulary` directly, since frequencies are added to the beginning of each row of the vocabulary, which the mapper will not ignore. weights: (Optional) Weights `Tensor` for the vocabulary. It must have the same shape as x. labels: (Optional) Labels dense `Tensor` for the vocabulary. If provided, the vocabulary is calculated based on mutual information with the label, rather than frequency. The labels must have the same batch dimension as x. If x is sparse, labels should be a 1D tensor reflecting row-wise labels. If x is dense, labels can either be a 1D tensor of row-wise labels, or a dense tensor of the identical shape as x (i.e. element-wise labels). Labels should be a discrete integerized tensor (If the label is numeric, it should first be bucketized; If the label is a string, an integer vocabulary should first be applied). Note: `CompositeTensor` labels are not yet supported (b/134931826). WARNING: When labels are provided, the frequency_threshold argument functions as a mutual information threshold, which is a float. TODO(b/116308354): Fix confusing naming. use_adjusted_mutual_info: If true, and labels are provided, calculate vocabulary using adjusted rather than raw mutual information. min_diff_from_avg: MI (or AMI) of a feature x label will be adjusted to zero whenever the difference between count and the expected (average) count is lower than min_diff_from_average. This can be thought of as a regularizing parameter that pushes small MI/AMI values to zero. If None, a default parameter will be selected based on the size of the dataset (see calculate_recommended_min_diff_from_avg). coverage_top_k: (Optional), (Experimental) The minimum number of elements per key to be included in the vocabulary. coverage_frequency_threshold: (Optional), (Experimental) Limit the coverage arm of the vocabulary only to elements whose absolute frequency is >= this threshold for a given key. key_fn: (Optional), (Experimental) A fn that takes in a single entry of `x` and returns the corresponding key for coverage calculation. If this is `None`, no coverage arm is added to the vocabulary. fingerprint_shuffle: (Optional), (Experimental) Whether to sort the vocabularies by fingerprint instead of counts. This is useful for load balancing on the training parameter servers. Shuffle only happens while writing the files, so all the filters above (top_k, frequency_threshold, etc) will still take effect. file_format: (Optional) A str. The format of the resulting vocabulary file. Accepted formats are: 'tfrecord_gzip', 'text'. 'tfrecord_gzip' requires tensorflow>=2.4. The default value is 'text'. name: (Optional) A name for this operation. Returns: The path name for the vocabulary file containing the unique values of `x`. Raises: ValueError: If `top_k` or `frequency_threshold` is negative. If `coverage_top_k` or `coverage_frequency_threshold` is negative. If either `coverage_top_k` or `coverage_frequency_threshold` is specified and `key_fn` is not. If `key_fn` is specified and neither `coverage_top_k`, nor """ top_k, frequency_threshold = _get_top_k_and_frequency_threshold( top_k, frequency_threshold) if (coverage_top_k or coverage_frequency_threshold) and not key_fn: raise ValueError('You must specify `key_fn` if you specify `coverage_top_k' ' or `coverage_frequency_threshold` in `vocabulary`.') if key_fn and not (coverage_top_k or coverage_frequency_threshold): raise ValueError('You must specify `coverage_top_k` or ' '`coverage_frequency_threshold` if you specify `key_fn` in' ' `vocabulary`.') if file_format not in ALLOWED_VOCABULARY_FILE_FORMATS: raise ValueError( '"{}" is not an accepted file_format. It should be one of: {}'.format( file_format, ALLOWED_VOCABULARY_FILE_FORMATS)) coverage_top_k, coverage_frequency_threshold = ( _get_top_k_and_frequency_threshold( coverage_top_k, coverage_frequency_threshold)) if x.dtype != tf.string and not x.dtype.is_integer: raise ValueError('expected tf.string or integer but got %r' % x.dtype) if labels is not None and not labels.dtype.is_integer: raise ValueError('expected integer labels but got %r' % labels.dtype) if (frequency_threshold is None and labels is None and key_fn is None and not fingerprint_shuffle and top_k is not None and top_k <= LARGE_VOCAB_TOP_K): logging.info('If the number of unique tokens is smaller than the provided ' 'top_k or approximation error is acceptable, consider using ' 'tft.experimental.approximate_vocabulary for a potentially ' 'more efficient implementation.') with tf.compat.v1.name_scope(name, 'vocabulary'): vocabulary_key = vocab_filename vocab_filename = _get_vocab_filename(vocab_filename, store_frequency) informativeness_threshold = float('-inf') coverage_informativeness_threshold = float('-inf') if labels is not None: if weights is not None: vocab_ordering_type = _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION else: vocab_ordering_type = _VocabOrderingType.MUTUAL_INFORMATION # Correct for the overloaded `frequency_threshold` API. if frequency_threshold is not None: informativeness_threshold = frequency_threshold frequency_threshold = 0.0 if coverage_frequency_threshold is not None: coverage_informativeness_threshold = coverage_frequency_threshold coverage_frequency_threshold = 0.0 elif weights is not None: vocab_ordering_type = _VocabOrderingType.WEIGHTED_FREQUENCY else: vocab_ordering_type = _VocabOrderingType.FREQUENCY analyzer_inputs = _get_vocabulary_analyzer_inputs( vocab_ordering_type=vocab_ordering_type, x=x, file_format=file_format, labels=labels, weights=weights) return _vocabulary_analyzer_nodes( analyzer_inputs=analyzer_inputs, input_dtype=x.dtype.name, vocab_ordering_type=vocab_ordering_type, vocab_filename=vocab_filename, top_k=top_k, frequency_threshold=frequency_threshold or 0, informativeness_threshold=informativeness_threshold, use_adjusted_mutual_info=use_adjusted_mutual_info, min_diff_from_avg=min_diff_from_avg, fingerprint_shuffle=fingerprint_shuffle, store_frequency=store_frequency, key_fn=key_fn, coverage_top_k=coverage_top_k, coverage_frequency_threshold=coverage_frequency_threshold or 0, coverage_informativeness_threshold=coverage_informativeness_threshold, file_format=file_format, vocabulary_key=vocabulary_key) def _get_vocabulary_analyzer_inputs( vocab_ordering_type: int, x: common_types.TensorType, file_format: common_types.VocabularyFileFormatType, labels: Optional[tf.Tensor] = None, weights: Optional[tf.Tensor] = None): """Helper for constructing analyzer inputs from tensors. Args: vocab_ordering_type: VocabOrderingType specifying how to select vocabulary. x: Tensor to compute vocabulary over. file_format: The format of the resulting vocabulary file. Accepted formats are 'tfrecord_gzip', 'text'. 'tfrecord_gzip' requires tensorflow>=2.4. labels: Optional tensor of integerized labels. weights: Optional tensor of weights. Returns: A list of batch-reduced tensors to feed to vocabulary analysis. """ filter_regex = get_vocab_newline_characters_regex(x.dtype, file_format) if vocab_ordering_type == _VocabOrderingType.WEIGHTED_MUTUAL_INFORMATION: labels = tf.reshape(labels, [-1]) reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences( x, labels, weights, filter_regex=filter_regex) return [ reduced_batch.unique_x, reduced_batch.summed_weights_per_x, reduced_batch.summed_positive_per_x_and_y, reduced_batch.counts_per_x ] elif vocab_ordering_type == _VocabOrderingType.MUTUAL_INFORMATION: labels = tf.reshape(labels, [-1]) reduced_batch = tf_utils.reduce_batch_weighted_cooccurrences( x, labels, weights, filter_regex=filter_regex) return [ reduced_batch.unique_x, reduced_batch.summed_positive_per_x_and_y, reduced_batch.counts_per_x ] elif vocab_ordering_type == _VocabOrderingType.WEIGHTED_FREQUENCY: reduced_batch = tf_utils.reduce_batch_weighted_counts( x, weights, filter_regex=filter_regex) assert reduced_batch.summed_positive_per_x_and_y is None assert reduced_batch.counts_per_x is None return [reduced_batch.unique_x, reduced_batch.summed_weights_per_x] else: reduced_batch = tf_utils.reduce_batch_weighted_counts( x, filter_regex=filter_regex) assert reduced_batch.summed_weights_per_x is None assert reduced_batch.summed_positive_per_x_and_y is None assert reduced_batch.counts_per_x is None return [reduced_batch.unique_x] def get_vocab_newline_characters_regex( input_dtype: tf.dtypes.DType, file_format: common_types.VocabularyFileFormatType) -> Optional[str]: if input_dtype == tf.string and file_format == 'text': return _EMPTY_STRING_OR_NEWLINE_CHARS_REGEX else: return None def _vocabulary_analyzer_nodes( analyzer_inputs: Collection[tf.Tensor], input_dtype: tf.dtypes.DType, vocab_ordering_type: int, vocab_filename: str, top_k: Optional[int] = None, frequency_threshold: int = 0, informativeness_threshold: float = float('-inf'), use_adjusted_mutual_info: bool = False, min_diff_from_avg: Optional[int] = None, fingerprint_shuffle: bool = False, store_frequency: bool = False, key_fn: Optional[Callable[[Any], Any]] = None, coverage_top_k: Optional[int] = None, coverage_frequency_threshold: float = 0.0, coverage_informativeness_threshold: float = float('-inf'), file_format: common_types .VocabularyFileFormatType = DEFAULT_VOCABULARY_FILE_FORMAT, vocabulary_key: Optional[str] = None ) -> common_types.TemporaryAnalyzerOutputType: """Internal helper for analyzing vocab. See `vocabulary` doc string.""" if (file_format == 'tfrecord_gzip' and not tf_utils.is_vocabulary_tfrecord_supported()): raise ValueError( 'Vocabulary file_format "tfrecord_gzip" not yet supported for ' f'{tf.version.VERSION}.') input_values_node = analyzer_nodes.get_input_tensors_value_nodes( analyzer_inputs) accumulate_output_value_node = nodes.apply_operation( analyzer_nodes.VocabularyAccumulate, input_values_node, vocab_ordering_type=vocab_ordering_type, input_dtype=input_dtype) merge_output_value_node = nodes.apply_operation( analyzer_nodes.VocabularyMerge, accumulate_output_value_node, use_adjusted_mutual_info=use_adjusted_mutual_info, min_diff_from_avg=min_diff_from_avg, vocab_ordering_type=vocab_ordering_type) filtered_value_node = nodes.apply_operation( analyzer_nodes.VocabularyPrune, merge_output_value_node, coverage_top_k=coverage_top_k, coverage_frequency_threshold=coverage_frequency_threshold, coverage_informativeness_threshold=coverage_informativeness_threshold, key_fn=key_fn, top_k=top_k, frequency_threshold=frequency_threshold, informativeness_threshold=informativeness_threshold, input_dtype=input_dtype) vocab_filename_node = nodes.apply_operation( analyzer_nodes.VocabularyOrderAndWrite, filtered_value_node, vocab_filename=vocab_filename, store_frequency=store_frequency, fingerprint_shuffle=fingerprint_shuffle, input_dtype=input_dtype, file_format=file_format, # LINT.IfChange(input_is_sorted) input_is_sorted=(top_k is not None and key_fn is None and not fingerprint_shuffle) # LINT.ThenChange(beam/analyzer_impls.py:top_k_impl) ) scope = tf.compat.v1.get_default_graph().get_name_scope() unfiltered_vocab_size_node = nodes.apply_operation( analyzer_nodes.VocabularyCount, merge_output_value_node, label=f'VocabularyCountUnfiltered[{scope}]') unfiltered_vocab_size = analyzer_nodes.bind_future_as_tensor( unfiltered_vocab_size_node, analyzer_nodes.TensorInfo(tf.int64, [], None), name=f'{vocab_filename}_unpruned_vocab_size') filtered_vocab_size_node = nodes.apply_operation( analyzer_nodes.VocabularyCount, filtered_value_node, label=f'VocabularyCountFiltered[{scope}]') filtered_vocab_size = analyzer_nodes.bind_future_as_tensor( filtered_vocab_size_node, analyzer_nodes.TensorInfo(tf.int64, [], None), name=f'{vocab_filename}_pruned_vocab_size') _maybe_annotate_vocab_metadata(vocab_filename, unfiltered_vocab_size, filtered_vocab_size) register_vocab( vocab_filename, vocabulary_size=filtered_vocab_size, vocabulary_key=vocabulary_key, file_format=file_format) return analyzer_nodes.wrap_as_tensor(vocab_filename_node) def calculate_recommended_min_diff_from_avg(dataset_size: int) -> int: """Calculates a recommended min_diff_from_avg argument to tft.vocabulary. Computes a default min_diff_from_average parameter based on the size of the dataset. The MI (or AMI) of a token x label will be pushed to zero whenever the difference between the observed and the expected (average) cooccurrence with the label is < min_diff_from_average. This can be thought of as a regularization parameter for mutual information based vocabularies. Args: dataset_size: The number of recods in the dataset. The bigger the dataset, the higher the min_diff_from_average will be. Returns: An integer that is recomended to use as the min_diff_from_avg parameter of `vocabulary`. """ # The minimum and maximum min_diff_from_avg parameter to use. min_value, max_value = 2, 25 # Heuristics for a "small" and "large" dataset. The selected parameter will # be between min_value and max_value depending on where the dataset_size falls # relative to these values. small_dataset_size, large_dataset_size = 10000, 1000000 return int( builtin_min( max_value, builtin_max(min_value, (dataset_size - small_dataset_size) / (large_dataset_size - small_dataset_size) * (max_value - min_value) + min_value))) # Code related to this class is performance sensitive, so (micro-)benchmarks # should be run when it is updated. class QuantilesCombiner(analyzer_nodes.Combiner): """Computes quantiles on the PCollection. This implementation is based on go/squawd. For additional details on the algorithm, such as streaming and summary, see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf """ def __init__(self, num_quantiles, epsilon, bucket_numpy_dtype, has_weights=False, output_shape=None, include_max_and_min=False, feature_shape=None): self._num_quantiles = num_quantiles self._epsilon = epsilon # Expected upper bound on the total number of input elements per feature. # Theoretical error bound is guaranteed to be <= epsilon as long as the # number of input elements is <= max_num_values. self._max_num_values = 1 << 32 self._bucket_numpy_dtype = bucket_numpy_dtype self._has_weights = has_weights self._include_max_and_min = include_max_and_min num_outputs = (num_quantiles + 1) if include_max_and_min else (num_quantiles - 1) if feature_shape is None: feature_shape = [] elif isinstance(feature_shape, int): feature_shape = [feature_shape] if output_shape is None: self._output_shape = list(feature_shape) + [num_outputs] else: self._output_shape = output_shape self._num_features = np.prod(feature_shape, dtype=np.int64).item() def create_accumulator(self): return sketches.QuantilesSketch(self._epsilon, self._max_num_values, self._num_features) def add_input(self, accumulator, next_input): # Flattened input array will be split on inputs for each feature. # C-contiguous order of flattened array is required. flat_values = pa.array(np.ravel(next_input[0])) if self._has_weights: flat_weights = pa.array(np.ravel(next_input[1])) accumulator.AddValues(flat_values, flat_weights) else: accumulator.AddValues(flat_values) return accumulator def merge_accumulators(self, accumulators): accumulators = iter(accumulators) result = next(accumulators) for accumulator in accumulators: result.Merge(accumulator) return result def compact(self, accumulator): accumulator.Compact() return accumulator def extract_output(self, accumulator): result = accumulator.GetQuantiles(self._num_quantiles).to_pylist() if not result: return [np.zeros(self._output_shape, self._bucket_numpy_dtype)] result = np.array(result, self._bucket_numpy_dtype) # Trim elementwise results if max and min should be excluded. if not self._include_max_and_min: result = result[:, 1:-1] return [np.reshape(result, self._output_shape)] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._bucket_numpy_dtype), self._output_shape, None) ] @property def accumulator_coder(self): return _QuantilesSketchCacheCoder() class _QuantilesSketchCacheCoder(analyzer_nodes.CacheCoder): """Cache coder for the quantiles accumulator.""" def encode_cache(self, accumulator): # TODO(b/174549940): Consider exposing and calling # `QuantilesSketch::Serialize` directly. # TODO(b/37788560): Should we be "intelligently" choosing the 'protocol' # argument for 'dumps'? return pickle.dumps(accumulator) def decode_cache(self, encoded_accumulator): return pickle.loads(encoded_accumulator) @common.log_api_use(common.ANALYZER_COLLECTION) def quantiles(x: tf.Tensor, num_buckets: int, epsilon: float, weights: Optional[tf.Tensor] = None, reduce_instance_dims: bool = True, name: Optional[str] = None) -> tf.Tensor: """Computes the quantile boundaries of a `Tensor` over the whole dataset. Quantile boundaries are computed using approximate quantiles, and error tolerance is specified using `epsilon`. The boundaries divide the input tensor into approximately equal `num_buckets` parts. See go/squawd for details, and how to control the error due to approximation. NaN input values and values with NaN weights are ignored. Args: x: An input `Tensor`. num_buckets: Values in the `x` are divided into approximately equal-sized buckets, where the number of buckets is `num_buckets`. The number of returned quantiles is `num_buckets` - 1. epsilon: Error tolerance, typically a small fraction close to zero (e.g. 0.01). Higher values of epsilon increase the quantile approximation, and hence result in more unequal buckets, but could improve performance, and resource consumption. Some measured results on memory consumption: For epsilon = 0.001, the amount of memory for each buffer to hold the summary for 1 trillion input values is ~25000 bytes. If epsilon is relaxed to 0.01, the buffer size drops to ~2000 bytes for the same input size. The buffer size also determines the amount of work in the different stages of the beam pipeline, in general, larger epsilon results in fewer and smaller stages, and less time. For more performance trade-offs see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf weights: (Optional) Weights tensor for the quantiles. Tensor must have the same batch size as x. reduce_instance_dims: By default collapses the batch and instance dimensions to arrive at a single output vector. If False, only collapses the batch dimension and outputs a vector of the same shape as the input. name: (Optional) A name for this operation. Returns: The bucket boundaries represented as a list, with num_bucket-1 elements, unless reduce_instance_dims is False, which results in a Tensor of shape x.shape + [num_bucket-1]. See code below for discussion on the type of bucket boundaries. """ # Quantile ops convert input values to double under the hood. Keep bucket # boundaries as float for all numeric types. bucket_dtype = tf.float32 with tf.compat.v1.name_scope(name, 'quantiles'): if weights is None: analyzer_inputs = [x] has_weights = False else: analyzer_inputs = [x, weights] has_weights = True feature_shape = [] if reduce_instance_dims else x.get_shape().as_list()[1:] output_shape = (feature_shape if feature_shape else [1]) + [num_buckets - 1] combiner = QuantilesCombiner( num_buckets, epsilon, bucket_dtype.as_numpy_dtype, has_weights=has_weights, output_shape=output_shape, feature_shape=feature_shape) (quantile_boundaries,) = _apply_cacheable_combiner(combiner, *analyzer_inputs) return quantile_boundaries def _quantiles_per_key( x: tf.Tensor, key: tf.Tensor, num_buckets: int, epsilon: float, weights: Optional[tf.Tensor] = None, name: Optional[str] = None ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, int]: """Like quantiles but per-key. For private use in tf.Transform implementation only. Args: x: An input `Tensor`. key: An input `Tensor` with rank 1 and size same as the fist dimension of `x`. All values of `x` will be aggregated according to the corresponding value of `key`. num_buckets: See `quantiles`. epsilon: See `quantiles`. weights: See `quantiles`. name: (Optional) A name for this operation. Returns: A 4-tuple of (boundaries, scale, shift, num_buckets). The returned boundaries is a 1-d Tensor of size: ((num_buckets - 2) * num_keys) + 1 And the returned scale and shift 1-d Tensors can be used to transform a value before applying bucketization and shift the resulting bucket. So the transformation of each input x before computing its bucket should be: F(x, key) = x * scale_factor_per_key[key] + shift_per_key[key] For example, if there are 2 keys, and the following boundaries are computed for them: [[0, 1, 2], [0, 1, 2]], this will return: boundaries: [0, 0.5, 1, 1.5, 2] scale_factor_per_key: [0.5, 0.5] shift_per_key: [0, 1] num_buckets: 4 Raises: ValueError: If key has wrong dtype. """ if key.dtype != tf.string: raise ValueError('key must have type tf.string') # Quantile ops convert input values to double under the hood. Keep bucket # boundaries as float for all numeric types. bucket_dtype = tf.float32 with tf.compat.v1.name_scope(name, 'quantiles_by_key'): combiner = QuantilesCombiner( num_buckets, epsilon, bucket_dtype.as_numpy_dtype, has_weights=weights is not None, output_shape=(num_buckets - 1,)) input_values_node = analyzer_nodes.get_input_tensors_value_nodes(( key, x) if weights is None else (key, x, weights)) accumulate_outputs_value_nodes = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyAccumulate, input_values_node, combiner=combiner) merge_output_value_node = nodes.apply_operation( analyzer_nodes.CacheableCombinePerKeyMerge, *accumulate_outputs_value_nodes, combiner=combiner) key_value_node, bucket_boundaries = nodes.apply_multi_output_operation( analyzer_nodes.CacheableCombinePerKeyFormatKeys, merge_output_value_node, combiner=combiner) boundaries, scale_factor, shift, num_buckets_node = ( nodes.apply_multi_output_operation( analyzer_nodes.ScaleAndFlattenPerKeyBucketBouandaries, bucket_boundaries, output_tensor_dtype=bucket_dtype)) return tuple( map(analyzer_nodes.wrap_as_tensor, [key_value_node, boundaries, scale_factor, shift, num_buckets_node ])) class CovarianceCombiner(analyzer_nodes.Combiner): """Combines the PCollection to compute the biased covariance matrix.""" def __init__(self, output_shape, numpy_dtype=np.float64): """Store the dtype and shape for np arrays/matrices for precision.""" self._output_shape = output_shape self._numpy_dtype = numpy_dtype def create_accumulator(self): """Create an accumulator with all zero entries.""" return [ np.zeros((self._output_shape[0], self._output_shape[0]), self._numpy_dtype), np.zeros((self._output_shape[0],), self._numpy_dtype), np.zeros((), self._numpy_dtype) ] def add_input(self, accumulator, batch_values): """Compute sum of input cross-terms, sum of inputs, and count. The cross terms for a numeric 1d array x are given by the set: {z_ij = x_i * x_j for all indices i and j}. This is stored as a 2d array. Since next_input is an array of 1d numeric arrays (i.e. a 2d array), matmul(transpose(next_input), next_input) will automatically sum up the cross terms of each 1d array in next_input. Args: accumulator: running sum of cross terms, input vectors, and count batch_values: entries from the pipeline, which must be single element list containing a 2d array representing multiple 1d arrays Returns: An accumulator with next_input considered in its running list of sum_product, sum_vectors, and count of input rows. """ # Expect a single input representing the batch for the input tensor. batch_value, = batch_values assert len(np.shape(batch_value)) == 2 batch_cross_terms = np.matmul( np.transpose(batch_value), batch_value ).astype(self._numpy_dtype) batch_sum = np.array(np.sum(batch_value, axis=0), self._numpy_dtype) batch_count = np.shape(batch_value)[0] sum_product, sum_vectors, count = accumulator return [ sum_product + batch_cross_terms, sum_vectors + batch_sum, count + batch_count ] def merge_accumulators(self, accumulators): """Sums values in each accumulator entry.""" # TODO(b/215378946): Consider updating accumulators[0] in place. products, vectors, counts = zip(*accumulators) return [ np.sum(products, axis=0), np.sum(vectors, axis=0), np.sum(counts, axis=0) ] def extract_output(self, accumulator): """Run covariance logic on sum_product, sum of input vectors, and count. The formula used to compute the covariance is cov(x) = E(xx^T) - uu^T, where x is the original input to the combiner, and u = mean(x). E(xx^T) is computed by dividing sum of cross terms (index 0) by count (index 2). u is computed by taking the sum of rows (index 1) and dividing by the count (index 2). Args: accumulator: final accumulator as a list of the sum of cross-terms matrix, sum of input vectors, and count. Returns: A list containing a single 2d ndarray, the covariance matrix. """ sum_product, sum_vectors, count = accumulator if count == 0: return [np.zeros(self._output_shape, self._numpy_dtype)] expected_cross_terms = sum_product / count expected_terms = sum_vectors / count return [ np.ndarray.astype( # TODO(b/64987151): # pytype: disable=attribute-error expected_cross_terms - np.outer(expected_terms, expected_terms), self._numpy_dtype) ] def output_tensor_infos(self): return [ analyzer_nodes.TensorInfo( tf.as_dtype(self._numpy_dtype), self._output_shape, None) ] @common.log_api_use(common.ANALYZER_COLLECTION) def covariance(x: tf.Tensor, dtype: tf.DType, name: Optional[str] = None) -> tf.Tensor: """Computes the covariance matrix over the whole dataset. The covariance matrix M is defined as follows: Let x[:j] be a tensor of the jth element of all input vectors in x, and let u_j = mean(x[:j]). The entry M[i,j] = E[(x[:i] - u_i)(x[:j] - u_j)]. Notice that the diagonal entries correspond to variances of individual elements in the vector, i.e. M[i,i] corresponds to the variance of x[:i]. Args: x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in each input vector. dtype: Tensorflow dtype of entries in the returned matrix. name: (Optional) A name for this operation. Raises: ValueError: if input is not a rank-2 Tensor. Returns: A rank-2 (matrix) covariance `Tensor` """ if not isinstance(x, tf.Tensor): raise TypeError('Expected a Tensor, but got %r' % x) with tf.compat.v1.name_scope(name, 'covariance'): x.shape.assert_has_rank(2) input_dim = x.shape.as_list()[1] shape = (input_dim, input_dim) (result,) = _apply_cacheable_combiner( CovarianceCombiner(shape, dtype.as_numpy_dtype), x) return result class PCACombiner(CovarianceCombiner): """Compute PCA of accumulated data using the biased covariance matrix.""" def __init__(self, output_shape, output_dim=None, numpy_dtype=np.float64): """Store pca output dimension, shape and dtype for precision.""" super().__init__(output_shape, numpy_dtype=numpy_dtype) self._output_dim = output_dim def extract_output(self, accumulator): """Compute PCA of the accumulated data using the biased covariance matrix. Following the covariance computation in CovarianceCombiner, this method runs eigenvalue decomposition on the covariance matrix, sorts eigenvalues in decreasing order, and returns the first output_dim corresponding eigenvectors (principal components) as a matrix. Args: accumulator: final accumulator as a list of the sum of cross-terms matrix, sum of input vectors, and count. Returns: A list containing a matrix of shape (input_dim, output_dim). """ sum_product, sum_vectors, count = accumulator if count == 0: # In this case all eigenvalues==0 and we output (possibly truncated) basis # vectors. Note that if _output_dim is None, then M is set to N in np.eye. return [np.eye(N=self._output_shape[0], M=self._output_dim, dtype=self._numpy_dtype)] expected_cross_terms = sum_product / count expected_terms = sum_vectors / count cov = np.ndarray.astype( # TODO(b/64987151): # pytype: disable=attribute-error expected_cross_terms - np.outer(expected_terms, expected_terms), self._numpy_dtype) vals, vecs = np.linalg.eigh(cov) sorted_vecs = vecs[:, np.argsort(vals)[::-1]] if self._output_dim is None: return [sorted_vecs] else: return [sorted_vecs[:, :self._output_dim]] @common.log_api_use(common.ANALYZER_COLLECTION) def pca(x: tf.Tensor, output_dim: int, dtype: tf.DType, name: Optional[str] = None) -> tf.Tensor: """Computes PCA on the dataset using biased covariance. The PCA analyzer computes output_dim orthonormal vectors that capture directions/axes corresponding to the highest variances in the input vectors of `x`. The output vectors are returned as a rank-2 tensor with shape `(input_dim, output_dim)`, where the 0th dimension are the components of each output vector, and the 1st dimension are the output vectors representing orthogonal directions in the input space, sorted in order of decreasing variances. The output rank-2 tensor (matrix) serves a useful transform purpose. Formally, the matrix can be used downstream in the transform step by multiplying it to the input tensor `x`. This transform reduces the dimension of input vectors to output_dim in a way that retains the maximal variance. NOTE: To properly use PCA, input vector components should be converted to similar units of measurement such that the vectors represent a Euclidean space. If no such conversion is available (e.g. one element represents time, another element distance), the canonical approach is to first apply a transformation to the input data to normalize numerical variances, i.e. `tft.scale_to_z_score()`. Normalization allows PCA to choose output axes that help decorrelate input axes. Below are a couple intuitive examples of PCA. Consider a simple 2-dimensional example: Input x is a series of vectors `[e, e]` where `e` is Gaussian with mean 0, variance 1. The two components are perfectly correlated, and the resulting covariance matrix is ``` [[1 1], [1 1]]. ``` Applying PCA with `output_dim = 1` would discover the first principal component `[1 / sqrt(2), 1 / sqrt(2)]`. When multipled to the original example, each vector `[e, e]` would be mapped to a scalar `sqrt(2) * e`. The second principal component would be `[-1 / sqrt(2), 1 / sqrt(2)]` and would map `[e, e]` to 0, which indicates that the second component captures no variance at all. This agrees with our intuition since we know that the two axes in the input are perfectly correlated and can be fully explained by a single scalar `e`. Consider a 3-dimensional example: Input `x` is a series of vectors `[a, a, b]`, where `a` is a zero-mean, unit variance Gaussian and `b` is a zero-mean, variance 4 Gaussian and is independent of `a`. The first principal component of the unnormalized vector would be `[0, 0, 1]` since `b` has a much larger variance than any linear combination of the first two components. This would map `[a, a, b]` onto `b`, asserting that the axis with highest energy is the third component. While this may be the desired output if `a` and `b` correspond to the same units, it is not statistically desireable when the units are irreconciliable. In such a case, one should first normalize each component to unit variance first, i.e. `b := b / 2`. The first principal component of a normalized vector would yield `[1 / sqrt(2), 1 / sqrt(2), 0]`, and would map `[a, a, b]` to `sqrt(2) * a`. The second component would be `[0, 0, 1]` and map `[a, a, b]` to `b`. As can be seen, the benefit of normalization is that PCA would capture highly correlated components first and collapse them into a lower dimension. Args: x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in row vectors. output_dim: The PCA output dimension (number of eigenvectors to return). dtype: Tensorflow dtype of entries in the returned matrix. name: (Optional) A name for this operation. Raises: ValueError: if input is not a rank-2 Tensor. Returns: A 2D `Tensor` (matrix) M of shape (input_dim, output_dim). """ if not isinstance(x, tf.Tensor): raise TypeError('Expected a Tensor, but got %r' % x) with tf.compat.v1.name_scope(name, 'pca'): x.shape.assert_has_rank(2) input_dim = x.shape.as_list()[1] shape = (input_dim, output_dim) (result,) = _apply_cacheable_combiner( PCACombiner(shape, output_dim, dtype.as_numpy_dtype), x) return result def _maybe_annotate_vocab_metadata(vocab_filename: str, unfiltered_vocabulary_size: tf.Tensor, filtered_vocabulary_size: tf.Tensor): """Annotates a bucketized tensor with the boundaries that were applied. Creates a deferred annotation for the specified tensor. Args: vocab_filename: The name of the vocabulary. unfiltered_vocabulary_size: A tf.int64 tensor containing the unfiltered vocab size. filtered_vocabulary_size: A tf.int64 tensor containing the filtered vocab size. """ if not common.IS_ANNOTATIONS_PB_AVAILABLE: return from tensorflow_transform import annotations_pb2 # pylint: disable=g-import-not-at-top message_type = annotations_pb2.VocabularyMetadata.DESCRIPTOR.full_name unfiltered_vocabulary_size = tf.expand_dims(unfiltered_vocabulary_size, 0) filtered_vocabulary_size = tf.expand_dims(filtered_vocabulary_size, 0) file_name = tf.convert_to_tensor([vocab_filename]) descriptor_source = descriptor_pb2.FileDescriptorSet() annotations_pb2.VocabularyMetadata.DESCRIPTOR.file.CopyToProto( descriptor_source.file.add()) descriptor_source_str = b'bytes://' + descriptor_source.SerializeToString() message_proto = tf_utils._encode_proto( # pylint: disable=protected-access { 'unfiltered_vocabulary_size': unfiltered_vocabulary_size, 'filtered_vocabulary_size': filtered_vocabulary_size, 'file_name': file_name, }, message_type, descriptor_source=descriptor_source_str) assert message_proto.shape == [1] message_proto = message_proto[0] # Note: we annotate globally here (tied to a vocabulary by filename) rather # than attaching to a tensor, because this annotation is tied to an analysis # output not a final tensor produced by a mapper. type_url = os.path.join(common.ANNOTATION_PREFIX_URL, message_type) schema_inference.annotate(type_url, message_proto)
[ "tensorflow.convert_to_tensor", "tensorflow.as_dtype", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.compat.v1.logging.warn", "tensorflow.cast", "numpy.nan_to_num", "tensorflow.strings.to_number", "numpy.zeros_like", "numpy.ones_like", "numpy.pad", "numpy.reshape", "numpy.eye", "numpy.stack", "numpy.full", "numpy.apply_along_axis", "numpy.ravel", "numpy.outer", "numpy.zeros", "tensorflow.compat.v1.name_scope", "tensorflow.strings.as_string", "numpy.isnan", "tensorflow.sparse.reduce_sum", "tensorflow.zeros_like", "numpy.int64", "numpy.linalg.eigh", "numpy.transpose", "numpy.argsort", "numpy.logical_and", "numpy.array", "numpy.sum", "tensorflow.dtypes.as_dtype", "tensorflow.size", "tensorflow.compat.v1.get_default_graph", "numpy.array_equal", "tensorflow.range", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.expand_dims", "numpy.ones", "numpy.shape", "numpy.isscalar", "numpy.prod" ]
tensorflow_transform/analyzers.py
[(424, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (451, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (619, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (673, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (738, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (766, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (815, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (843, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (912, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (949, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (987, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (1137, 'tfx_bsl.types.tfx_namedtuple.namedtuple', 'tfx_namedtuple.namedtuple', (['"""WeightedMeanAndVarAccumulator"""', "['count', 'mean', 'variance', 'weight']"], {}), False, 'from tfx_bsl.types import tfx_namedtuple\n'), (1380, 'tfx_bsl.types.tfx_namedtuple.namedtuple', 'tfx_namedtuple.namedtuple', (['"""LMomentsAccumulator"""', "['count_l1', 'count_l2', 'count_l3', 'count_l4', 'l1', 'l2', 'l3', 'l4']"], {}), False, 'from tfx_bsl.types import tfx_namedtuple\n'), (1705, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (2214, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (2468, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (2549, 'tensorflow_transform.common.log_api_use', 'common.log_api_use', (['common.ANALYZER_COLLECTION'], {}), False, 'from tensorflow_transform import common\n'), (138, 'tensorflow_transform.analyzer_nodes.get_input_tensors_value_nodes', 'analyzer_nodes.get_input_tensors_value_nodes', (['tensor_inputs'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (141, 'tensorflow_transform.nodes.apply_multi_output_operation', 'nodes.apply_multi_output_operation', (['analyzer_nodes.CacheableCombineAccumulate', 'input_values_node'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (146, 'tensorflow_transform.nodes.apply_multi_output_operation', 'nodes.apply_multi_output_operation', (['analyzer_nodes.CacheableCombineMerge', '*accumulate_outputs_value_nodes'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (180, 'tensorflow_transform.analyzer_nodes.get_input_tensors_value_nodes', 'analyzer_nodes.get_input_tensors_value_nodes', (['tensor_inputs'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (183, 'tensorflow_transform.nodes.apply_multi_output_operation', 'nodes.apply_multi_output_operation', (['analyzer_nodes.CacheableCombinePerKeyAccumulate', 'input_values_node'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (188, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.CacheableCombinePerKeyMerge', '*accumulate_outputs_value_nodes'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (193, 'tensorflow_transform.nodes.apply_multi_output_operation', 'nodes.apply_multi_output_operation', (['analyzer_nodes.CacheableCombinePerKeyFormatKeys', 'merge_output_value_node'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (206, 'tensorflow_transform.analyzer_nodes.get_input_tensors_value_nodes', 'analyzer_nodes.get_input_tensors_value_nodes', (['tensor_inputs'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (209, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.CacheableCombinePerKeyAccumulate', 'input_values_node'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (214, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.CacheableCombinePerKeyMerge', 'accumulate_outputs_value_node'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (219, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.CacheableCombinePerKeyFormatLarge', 'merge_output_value_node'], {}), False, 'from tensorflow_transform import nodes\n'), (227, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.VocabularyOrderAndWrite', 'keys_and_values_node'], {'vocab_filename': 'key_vocabulary_filename', 'store_frequency': '(True)', 'fingerprint_shuffle': '(True)', 'file_format': '"""text"""'}), False, 'from tensorflow_transform import nodes\n'), (236, 'tensorflow_transform.analyzer_nodes.wrap_as_tensor', 'analyzer_nodes.wrap_as_tensor', (['key_vocabulary_filename_node'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (1591, 're.sub', 're.sub', (['"""[-\\\\s]+"""', '"""-"""', 'filename'], {}), False, 'import re\n'), (1682, 'tensorflow_transform.annotators.annotate_asset', 'annotators.annotate_asset', (['vocabulary_key', 'filename'], {}), False, 'from tensorflow_transform import annotators\n'), (2012, 'tensorflow_transform.analyzer_nodes.get_input_tensors_value_nodes', 'analyzer_nodes.get_input_tensors_value_nodes', (['analyzer_inputs'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (2015, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.VocabularyAccumulate', 'input_values_node'], {'vocab_ordering_type': 'vocab_ordering_type', 'input_dtype': 'input_dtype'}), False, 'from tensorflow_transform import nodes\n'), (2021, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.VocabularyMerge', 'accumulate_output_value_node'], {'use_adjusted_mutual_info': 'use_adjusted_mutual_info', 'min_diff_from_avg': 'min_diff_from_avg', 'vocab_ordering_type': 'vocab_ordering_type'}), False, 'from tensorflow_transform import nodes\n'), (2028, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.VocabularyPrune', 'merge_output_value_node'], {'coverage_top_k': 'coverage_top_k', 'coverage_frequency_threshold': 'coverage_frequency_threshold', 'coverage_informativeness_threshold': 'coverage_informativeness_threshold', 'key_fn': 'key_fn', 'top_k': 'top_k', 'frequency_threshold': 'frequency_threshold', 'informativeness_threshold': 'informativeness_threshold', 'input_dtype': 'input_dtype'}), False, 'from tensorflow_transform import nodes\n'), (2040, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.VocabularyOrderAndWrite', 'filtered_value_node'], {'vocab_filename': 'vocab_filename', 'store_frequency': 'store_frequency', 'fingerprint_shuffle': 'fingerprint_shuffle', 'input_dtype': 'input_dtype', 'file_format': 'file_format', 'input_is_sorted': '(top_k is not None and key_fn is None and not fingerprint_shuffle)'}), False, 'from tensorflow_transform import nodes\n'), (2055, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.VocabularyCount', 'merge_output_value_node'], {'label': 'f"""VocabularyCountUnfiltered[{scope}]"""'}), False, 'from tensorflow_transform import nodes\n'), (2063, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.VocabularyCount', 'filtered_value_node'], {'label': 'f"""VocabularyCountFiltered[{scope}]"""'}), False, 'from tensorflow_transform import nodes\n'), (2080, 'tensorflow_transform.analyzer_nodes.wrap_as_tensor', 'analyzer_nodes.wrap_as_tensor', (['vocab_filename_node'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (2662, 'tensorflow.expand_dims', 'tf.expand_dims', (['unfiltered_vocabulary_size', '(0)'], {}), True, 'import tensorflow as tf\n'), (2663, 'tensorflow.expand_dims', 'tf.expand_dims', (['filtered_vocabulary_size', '(0)'], {}), True, 'import tensorflow as tf\n'), (2664, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[vocab_filename]'], {}), True, 'import tensorflow as tf\n'), (2665, 'google.protobuf.descriptor_pb2.FileDescriptorSet', 'descriptor_pb2.FileDescriptorSet', ([], {}), False, 'from google.protobuf import descriptor_pb2\n'), (2669, 'tensorflow_transform.tf_utils._encode_proto', 'tf_utils._encode_proto', (["{'unfiltered_vocabulary_size': unfiltered_vocabulary_size,\n 'filtered_vocabulary_size': filtered_vocabulary_size, 'file_name':\n file_name}", 'message_type'], {'descriptor_source': 'descriptor_source_str'}), False, 'from tensorflow_transform import tf_utils\n'), (2681, 'os.path.join', 'os.path.join', (['common.ANNOTATION_PREFIX_URL', 'message_type'], {}), False, 'import os\n'), (2682, 'tensorflow_transform.schema_inference.annotate', 'schema_inference.annotate', (['type_url', 'message_proto'], {}), False, 'from tensorflow_transform import schema_inference\n'), (255, 'numpy.array', 'np.array', (['default_accumulator_value'], {}), True, 'import numpy as np\n'), (262, 'numpy.isnan', 'np.isnan', (['default_accumulator_value'], {}), True, 'import numpy as np\n'), (275, 'numpy.array_equal', 'np.array_equal', (['array', 'self._default_sub_accumulator'], {}), True, 'import numpy as np\n'), (396, 'numpy.isscalar', 'np.isscalar', (['default_accumulator_value'], {}), True, 'import numpy as np\n'), (447, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""min"""'], {}), True, 'import tensorflow as tf\n'), (473, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""max"""'], {}), True, 'import tensorflow as tf\n'), (499, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""min_and_max"""'], {}), True, 'import tensorflow as tf\n'), (514, 'tensorflow_transform.tf_utils.reduce_batch_minus_min_and_max', 'tf_utils.reduce_batch_minus_min_and_max', (['x', 'reduce_instance_dims'], {}), False, 'from tensorflow_transform import tf_utils\n'), (574, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""min_and_max_per_key"""'], {}), True, 'import tensorflow as tf\n'), (588, 'tensorflow_transform.tf_utils.reduce_batch_minus_min_and_max_per_key', 'tf_utils.reduce_batch_minus_min_and_max_per_key', (['x', 'key'], {}), False, 'from tensorflow_transform import tf_utils\n'), (615, 'functools.partial', 'functools.partial', (['np.sum'], {'dtype': 'output_dtype.as_numpy_dtype'}), False, 'import functools\n'), (645, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""sum"""'], {}), True, 'import tensorflow as tf\n'), (707, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""histogram"""'], {}), True, 'import tensorflow as tf\n'), (754, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""size"""'], {}), True, 'import tensorflow as tf\n'), (793, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""count_per_key"""'], {}), True, 'import tensorflow as tf\n'), (795, 'tensorflow_transform.tf_utils.reduce_batch_count_per_key', 'tf_utils.reduce_batch_count_per_key', (['key'], {}), False, 'from tensorflow_transform import tf_utils\n'), (839, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""mean"""'], {}), True, 'import tensorflow as tf\n'), (870, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""var"""'], {}), True, 'import tensorflow as tf\n'), (886, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""mean_and_var"""'], {}), True, 'import tensorflow as tf\n'), (888, 'tensorflow.cast', 'tf.cast', (['x', 'output_dtype'], {}), True, 'import tensorflow as tf\n'), (891, 'tensorflow_transform.tf_utils.reduce_batch_count_mean_and_var', 'tf_utils.reduce_batch_count_mean_and_var', (['x', 'reduce_instance_dims'], {}), False, 'from tensorflow_transform import tf_utils\n'), (945, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""tukey_location"""'], {}), True, 'import tensorflow as tf\n'), (983, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""tukey_scale"""'], {}), True, 'import tensorflow as tf\n'), (1020, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""tukey_h_params"""'], {}), True, 'import tensorflow as tf\n'), (1035, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""tukey_parameters"""'], {}), True, 'import tensorflow as tf\n'), (1037, 'tensorflow.cast', 'tf.cast', (['x', 'output_dtype'], {}), True, 'import tensorflow as tf\n'), (1040, 'tensorflow_transform.tf_utils.reduce_batch_count_l_moments', 'tf_utils.reduce_batch_count_l_moments', (['x', 'reduce_instance_dims'], {}), False, 'from tensorflow_transform import tf_utils\n'), (1107, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""mean_and_var_per_key"""'], {}), True, 'import tensorflow as tf\n'), (1108, 'tensorflow.cast', 'tf.cast', (['x', 'output_dtype'], {}), True, 'import tensorflow as tf\n'), (1111, 'tensorflow_transform.tf_utils.reduce_batch_count_mean_and_var_per_key', 'tf_utils.reduce_batch_count_mean_and_var_per_key', (['x', 'key'], {'reduce_instance_dims': 'reduce_instance_dims'}), False, 'from tensorflow_transform import tf_utils\n'), (1373, 'numpy.pad', 'np.pad', (['a', 'padding_a'], {'mode': '"""constant"""'}), True, 'import numpy as np\n'), (1375, 'numpy.pad', 'np.pad', (['b', 'padding_b'], {'mode': '"""constant"""'}), True, 'import numpy as np\n'), (1419, 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': 'self._output_numpy_dtype'}), True, 'import numpy as np\n'), (1420, 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': 'self._output_numpy_dtype'}), True, 'import numpy as np\n'), (1474, 'numpy.logical_and', 'np.logical_and', (['valid_scale', '(accumulator.count_l4 > 0.0)'], {}), True, 'import numpy as np\n'), (1483, 'numpy.stack', 'np.stack', (['(l_skewness, l_kurtosis)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (1484, 'numpy.apply_along_axis', 'np.apply_along_axis', (['gaussianization.compute_tukey_hh_params', '(0)', 'l_skewness_and_kurtosis'], {}), True, 'import numpy as np\n'), (1486, 'tensorflow_transform.gaussianization.tukey_hh_l_mean_and_scale', 'gaussianization.tukey_hh_l_mean_and_scale', (['h_params'], {}), False, 'from tensorflow_transform import gaussianization\n'), (1684, 'tensorflow_transform.annotators.annotate_vocab_size', 'annotators.annotate_vocab_size', (['vocabulary_key', 'vocabulary_size'], {}), False, 'from tensorflow_transform import annotators\n'), (1873, 'absl.logging.info', 'logging.info', (['"""If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation."""'], {}), False, 'from absl import logging\n'), (1878, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""vocabulary"""'], {}), True, 'import tensorflow as tf\n'), (1946, 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (1947, 'tensorflow_transform.tf_utils.reduce_batch_weighted_cooccurrences', 'tf_utils.reduce_batch_weighted_cooccurrences', (['x', 'labels', 'weights'], {'filter_regex': 'filter_regex'}), False, 'from tensorflow_transform import tf_utils\n'), (2061, 'tensorflow_transform.analyzer_nodes.TensorInfo', 'analyzer_nodes.TensorInfo', (['tf.int64', '[]', 'None'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (2069, 'tensorflow_transform.analyzer_nodes.TensorInfo', 'analyzer_nodes.TensorInfo', (['tf.int64', '[]', 'None'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (2154, 'tfx_bsl.sketches.QuantilesSketch', 'sketches.QuantilesSketch', (['self._epsilon', 'self._max_num_values', 'self._num_features'], {}), False, 'from tfx_bsl import sketches\n'), (2183, 'numpy.array', 'np.array', (['result', 'self._bucket_numpy_dtype'], {}), True, 'import numpy as np\n'), (2208, 'pickle.dumps', 'pickle.dumps', (['accumulator'], {}), False, 'import pickle\n'), (2211, 'pickle.loads', 'pickle.loads', (['encoded_accumulator'], {}), False, 'import pickle\n'), (2261, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""quantiles"""'], {}), True, 'import tensorflow as tf\n'), (2329, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""quantiles_by_key"""'], {}), True, 'import tensorflow as tf\n'), (2337, 'tensorflow_transform.analyzer_nodes.get_input_tensors_value_nodes', 'analyzer_nodes.get_input_tensors_value_nodes', (['((key, x) if weights is None else (key, x, weights))'], {}), False, 'from tensorflow_transform import analyzer_nodes\n'), (2340, 'tensorflow_transform.nodes.apply_multi_output_operation', 'nodes.apply_multi_output_operation', (['analyzer_nodes.CacheableCombinePerKeyAccumulate', 'input_values_node'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (2345, 'tensorflow_transform.nodes.apply_operation', 'nodes.apply_operation', (['analyzer_nodes.CacheableCombinePerKeyMerge', '*accumulate_outputs_value_nodes'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (2350, 'tensorflow_transform.nodes.apply_multi_output_operation', 'nodes.apply_multi_output_operation', (['analyzer_nodes.CacheableCombinePerKeyFormatKeys', 'merge_output_value_node'], {'combiner': 'combiner'}), False, 'from tensorflow_transform import nodes\n'), (2356, 'tensorflow_transform.nodes.apply_multi_output_operation', 'nodes.apply_multi_output_operation', (['analyzer_nodes.ScaleAndFlattenPerKeyBucketBouandaries', 'bucket_boundaries'], {'output_tensor_dtype': 'bucket_dtype'}), False, 'from tensorflow_transform import nodes\n'), (2496, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""covariance"""'], {}), True, 'import tensorflow as tf\n'), (2541, 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), True, 'import numpy as np\n'), (2632, 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name', '"""pca"""'], {}), True, 'import tensorflow as tf\n'), (269, 'numpy.isnan', 'np.isnan', (['array'], {}), True, 'import numpy as np\n'), (292, 'numpy.full', 'np.full', (['shape', 'self._default_accumulator_value'], {}), True, 'import numpy as np\n'), (522, 'tensorflow.cast', 'tf.cast', (['(0 - minus_x_min)', 'output_dtype'], {}), True, 'import tensorflow as tf\n'), (522, 'tensorflow.cast', 'tf.cast', (['x_max', 'output_dtype'], {}), True, 'import tensorflow as tf\n'), (604, 'tensorflow.cast', 'tf.cast', (['(0 - minus_x_min)', 'output_dtype'], {}), True, 'import tensorflow as tf\n'), (605, 'tensorflow.cast', 'tf.cast', (['x_max', 'output_dtype'], {}), True, 'import tensorflow as tf\n'), (708, 'tensorflow_transform.tf_utils.get_values', 'tf_utils.get_values', (['x'], {}), False, 'from tensorflow_transform import tf_utils\n'), (731, 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (732, 'tensorflow.strings.as_string', 'tf.strings.as_string', (['bucket_indices'], {}), True, 'import tensorflow as tf\n'), (762, 'tensorflow.ones_like', 'tf.ones_like', (['x'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (811, 'tensorflow.strings.to_number', 'tf.strings.to_number', (['keys', 'key_dtype'], {}), True, 'import tensorflow as tf\n'), (1151, 'numpy.nan_to_num', 'np.nan_to_num', (['variances'], {'copy': '(True)'}), True, 'import numpy as np\n'), (1154, 'numpy.nan_to_num', 'np.nan_to_num', (['weights'], {'copy': '(True)'}), True, 'import numpy as np\n'), (1157, 'numpy.array', 'np.array', (['counts'], {}), True, 'import numpy as np\n'), (1157, 'numpy.nan_to_num', 'np.nan_to_num', (['means'], {'copy': '(True)'}), True, 'import numpy as np\n'), (1195, 'numpy.array', 'np.array', (['(0)'], {}), True, 'import numpy as np\n'), (1195, 'numpy.array', 'np.array', (['(0.0)'], {}), True, 'import numpy as np\n'), (1199, 'numpy.zeros', 'np.zeros', (['output_shape'], {}), True, 'import numpy as np\n'), (1199, 'numpy.zeros', 'np.zeros', (['output_shape'], {}), True, 'import numpy as np\n'), (1301, 'numpy.sum', 'np.sum', (['a.count'], {}), True, 'import numpy as np\n'), (1301, 'numpy.sum', 'np.sum', (['b.count'], {}), True, 'import numpy as np\n'), (1304, 'numpy.sum', 'np.sum', (['a.count'], {}), True, 'import numpy as np\n'), (1325, 'numpy.ones', 'np.ones', ([], {'shape': 'combined_total.shape'}), True, 'import numpy as np\n'), (1336, 'numpy.zeros', 'np.zeros', (['combined_mean.shape'], {}), True, 'import numpy as np\n'), (1389, 'numpy.array', 'np.array', (['count_l1'], {}), True, 'import numpy as np\n'), (1389, 'numpy.array', 'np.array', (['count_l2'], {}), True, 'import numpy as np\n'), (1389, 'numpy.array', 'np.array', (['count_l3'], {}), True, 'import numpy as np\n'), (1390, 'numpy.array', 'np.array', (['count_l4'], {}), True, 'import numpy as np\n'), (1390, 'numpy.nan_to_num', 'np.nan_to_num', (['l1'], {}), True, 'import numpy as np\n'), (1390, 'numpy.nan_to_num', 'np.nan_to_num', (['l2'], {}), True, 'import numpy as np\n'), (1391, 'numpy.nan_to_num', 'np.nan_to_num', (['l3'], {}), True, 'import numpy as np\n'), (1391, 'numpy.nan_to_num', 'np.nan_to_num', (['l4'], {}), True, 'import numpy as np\n'), (1522, 'numpy.sum', 'np.sum', (['a.count_l1'], {}), True, 'import numpy as np\n'), (1522, 'numpy.sum', 'np.sum', (['b.count_l1'], {}), True, 'import numpy as np\n'), (1524, 'numpy.sum', 'np.sum', (['b.count_l1'], {}), True, 'import numpy as np\n'), (1589, 're.sub', 're.sub', (['"""[^\\\\w\\\\s-]"""', '"""_"""', 'filename'], {}), False, 'import re\n'), (1695, 'tensorflow.dtypes.as_dtype', 'tf.dtypes.as_dtype', (['dtype'], {}), True, 'import tensorflow as tf\n'), (1954, 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (1955, 'tensorflow_transform.tf_utils.reduce_batch_weighted_cooccurrences', 'tf_utils.reduce_batch_weighted_cooccurrences', (['x', 'labels', 'weights'], {'filter_regex': 'filter_regex'}), False, 'from tensorflow_transform import tf_utils\n'), (2007, 'tensorflow_transform.tf_utils.is_vocabulary_tfrecord_supported', 'tf_utils.is_vocabulary_tfrecord_supported', ([], {}), False, 'from tensorflow_transform import tf_utils\n'), (2054, 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (2160, 'numpy.ravel', 'np.ravel', (['next_input[0]'], {}), True, 'import numpy as np\n'), (2187, 'numpy.reshape', 'np.reshape', (['result', 'self._output_shape'], {}), True, 'import numpy as np\n'), (2378, 'numpy.zeros', 'np.zeros', (['(self._output_shape[0], self._output_shape[0])', 'self._numpy_dtype'], {}), True, 'import numpy as np\n'), (2380, 'numpy.zeros', 'np.zeros', (['(self._output_shape[0],)', 'self._numpy_dtype'], {}), True, 'import numpy as np\n'), (2381, 'numpy.zeros', 'np.zeros', (['()', 'self._numpy_dtype'], {}), True, 'import numpy as np\n'), (2413, 'numpy.sum', 'np.sum', (['batch_value'], {'axis': '(0)'}), True, 'import numpy as np\n'), (2414, 'numpy.shape', 'np.shape', (['batch_value'], {}), True, 'import numpy as np\n'), (2428, 'numpy.sum', 'np.sum', (['products'], {'axis': '(0)'}), True, 'import numpy as np\n'), (2429, 'numpy.sum', 'np.sum', (['vectors'], {'axis': '(0)'}), True, 'import numpy as np\n'), (2430, 'numpy.sum', 'np.sum', (['counts'], {'axis': '(0)'}), True, 'import numpy as np\n'), (338, 'tensorflow.as_dtype', 'tf.as_dtype', (['dtype'], {}), True, 'import tensorflow as tf\n'), (653, 'tensorflow.sparse.reduce_sum', 'tf.sparse.reduce_sum', (['x'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (711, 'tensorflow.strings.as_string', 'tf.strings.as_string', (['x'], {}), True, 'import tensorflow as tf\n'), (714, 'tensorflow.strings.to_number', 'tf.strings.to_number', (['elements', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (718, 'tensorflow.range', 'tf.range', (['(11)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (728, 'tensorflow.cast', 'tf.cast', (['boundaries', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (734, 'tensorflow.size', 'tf.size', (['boundaries'], {}), True, 'import tensorflow as tf\n'), (897, 'tensorflow.zeros', 'tf.zeros', (['[]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (903, 'tensorflow.expand_dims', 'tf.expand_dims', (['x_count'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (1119, 'tensorflow.zeros_like', 'tf.zeros_like', (['key_means', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (1254, 'numpy.int64', 'np.int64', (['accumulator.count'], {}), True, 'import numpy as np\n'), (1478, 'numpy.zeros_like', 'np.zeros_like', (['accumulator.l3'], {}), True, 'import numpy as np\n'), (1482, 'numpy.zeros_like', 'np.zeros_like', (['accumulator.l4'], {}), True, 'import numpy as np\n'), (1489, 'numpy.ones_like', 'np.ones_like', (['accumulator.l2'], {}), True, 'import numpy as np\n'), (1643, 'tensorflow.compat.v1.logging.warn', 'tf.compat.v1.logging.warn', (['"""frequency_threshold %d <= 1 is a no-op, use None instead."""', 'frequency_threshold'], {}), True, 'import tensorflow as tf\n'), (1962, 'tensorflow_transform.tf_utils.reduce_batch_weighted_counts', 'tf_utils.reduce_batch_weighted_counts', (['x', 'weights'], {'filter_regex': 'filter_regex'}), False, 'from tensorflow_transform import tf_utils\n'), (1968, 'tensorflow_transform.tf_utils.reduce_batch_weighted_counts', 'tf_utils.reduce_batch_weighted_counts', (['x'], {'filter_regex': 'filter_regex'}), False, 'from tensorflow_transform import tf_utils\n'), (2151, 'numpy.prod', 'np.prod', (['feature_shape'], {'dtype': 'np.int64'}), True, 'import numpy as np\n'), (2162, 'numpy.ravel', 'np.ravel', (['next_input[1]'], {}), True, 'import numpy as np\n'), (2182, 'numpy.zeros', 'np.zeros', (['self._output_shape', 'self._bucket_numpy_dtype'], {}), True, 'import numpy as np\n'), (2192, 'tensorflow.as_dtype', 'tf.as_dtype', (['self._bucket_numpy_dtype'], {}), True, 'import tensorflow as tf\n'), (2406, 'numpy.shape', 'np.shape', (['batch_value'], {}), True, 'import numpy as np\n'), (2452, 'numpy.zeros', 'np.zeros', (['self._output_shape', 'self._numpy_dtype'], {}), True, 'import numpy as np\n'), (2464, 'tensorflow.as_dtype', 'tf.as_dtype', (['self._numpy_dtype'], {}), True, 'import tensorflow as tf\n'), (2534, 'numpy.eye', 'np.eye', ([], {'N': 'self._output_shape[0]', 'M': 'self._output_dim', 'dtype': 'self._numpy_dtype'}), True, 'import numpy as np\n'), (2539, 'numpy.outer', 'np.outer', (['expected_terms', 'expected_terms'], {}), True, 'import numpy as np\n'), (647, 'tensorflow_transform.tf_utils.get_values', 'tf_utils.get_values', (['x'], {}), False, 'from tensorflow_transform import tf_utils\n'), (650, 'tensorflow.cast', 'tf.cast', (['x', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (658, 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': 'x', 'axis': '(0)'}), True, 'import tensorflow as tf\n'), (723, 'tensorflow.cast', 'tf.cast', (['min_value', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (723, 'tensorflow.cast', 'tf.cast', (['max_value', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (724, 'tensorflow.cast', 'tf.cast', (['boundaries', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (759, 'tensorflow.ones_like', 'tf.ones_like', (['x.values', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (1269, 'tensorflow.as_dtype', 'tf.as_dtype', (['np.int64'], {}), True, 'import tensorflow as tf\n'), (1271, 'tensorflow.as_dtype', 'tf.as_dtype', (['self._output_numpy_dtype'], {}), True, 'import tensorflow as tf\n'), (1273, 'tensorflow.as_dtype', 'tf.as_dtype', (['self._output_numpy_dtype'], {}), True, 'import tensorflow as tf\n'), (1275, 'tensorflow.as_dtype', 'tf.as_dtype', (['self._output_numpy_dtype'], {}), True, 'import tensorflow as tf\n'), (1499, 'tensorflow.as_dtype', 'tf.as_dtype', (['self._output_numpy_dtype'], {}), True, 'import tensorflow as tf\n'), (1587, 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (2409, 'numpy.transpose', 'np.transpose', (['batch_value'], {}), True, 'import numpy as np\n'), (2457, 'numpy.outer', 'np.outer', (['expected_terms', 'expected_terms'], {}), True, 'import numpy as np\n'), (1264, 'tensorflow.as_dtype', 'tf.as_dtype', (['self._output_numpy_dtype'], {}), True, 'import tensorflow as tf\n'), (1543, 'numpy.zeros_like', 'np.zeros_like', (['a_l1'], {}), True, 'import numpy as np\n'), (1546, 'numpy.zeros_like', 'np.zeros_like', (['a_l2'], {}), True, 'import numpy as np\n'), (1549, 'numpy.zeros_like', 'np.zeros_like', (['a_l3'], {}), True, 'import numpy as np\n'), (1552, 'numpy.zeros_like', 'np.zeros_like', (['a_l4'], {}), True, 'import numpy as np\n'), (2542, 'numpy.argsort', 'np.argsort', (['vals'], {}), True, 'import numpy as np\n')]
mingxuts/multi-center-fed-learning
9262ddaefb79b14ea44b61ffce200b82d31b0af1
import tensorflow as tf import logging tf.get_logger().setLevel(logging.ERROR) import numpy as np IMAGE_SIZE = 28 def get_conv_dimension(filter_list): with tf.Graph().as_default(): with tf.Session() as sess: """Model function for CNN.""" features = tf.placeholder( tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name='features') labels = tf.placeholder(tf.int64, shape=[None], name='labels') input_layer = tf.reshape(features, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]) conv1 = tf.layers.conv2d( inputs=input_layer, filters=filter_list[0], kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) conv2 = tf.layers.conv2d( inputs=pool1, filters=filter_list[1], kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # return int(np.prod(pool2.get_shape().as_list()[1:])) return pool2.get_shape().as_list() if __name__ == "__main__": tf.autograph.set_verbosity(0) print(get_conv_dimension([32, 64]))
[ "tensorflow.layers.conv2d", "tensorflow.Graph", "tensorflow.reshape", "tensorflow.placeholder", "tensorflow.get_logger", "tensorflow.layers.max_pooling2d", "tensorflow.autograph.set_verbosity", "tensorflow.Session" ]
models/femnist/cnn_container.py
[(38, 'tensorflow.autograph.set_verbosity', 'tf.autograph.set_verbosity', (['(0)'], {}), True, 'import tensorflow as tf\n'), (4, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (13, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (15, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, IMAGE_SIZE * IMAGE_SIZE]', 'name': '"""features"""'}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64'], {'shape': '[None]', 'name': '"""labels"""'}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.reshape', 'tf.reshape', (['features', '[-1, IMAGE_SIZE, IMAGE_SIZE, 1]'], {}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'input_layer', 'filters': 'filter_list[0]', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '[2, 2]', 'strides': '(2)'}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'pool1', 'filters': 'filter_list[1]', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '[2, 2]', 'strides': '(2)'}), True, 'import tensorflow as tf\n'), (12, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n')]
ShaunHeNJU/DeepRec-1
e280fb19de179f03dc05e1d8e3f4f7459796d96e
import tensorflow as tf from tensorflow.python.ops.rnn_cell import * #from tensorflow.python.ops.rnn_cell_impl import _Linear from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear from tensorflow import keras from tensorflow.python.ops import math_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.keras import backend as K def dice(_x, axis=-1, epsilon=0.000000001, name=''): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): alphas = tf.get_variable('alpha'+name, _x.get_shape()[-1], initializer=tf.constant_initializer(0.0), dtype=_x.dtype) input_shape = list(_x.get_shape()) reduction_axes = list(range(len(input_shape))) del reduction_axes[axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[axis] = input_shape[axis] # case: train mode (uses stats of the current batch) mean = tf.reduce_mean(_x, axis=reduction_axes) brodcast_mean = tf.reshape(mean, broadcast_shape) std = tf.reduce_mean(tf.square(_x - brodcast_mean) + epsilon, axis=reduction_axes) std = tf.sqrt(std) brodcast_std = tf.reshape(std, broadcast_shape) x_normed = (_x - brodcast_mean) / (brodcast_std + epsilon) # x_normed = tf.layers.batch_normalization(_x, center=False, scale=False) x_p = tf.sigmoid(x_normed) return alphas * (1.0 - x_p) * _x + x_p * _x class QAAttGRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. """ def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(QAAttGRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._gate_linear = None self._candidate_linear = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, att_score): return self.call(inputs, state, att_score) def call(self, inputs, state, att_score=None): """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) new_h = (1. - att_score) * state + att_score * c return new_h, new_h class VecAttGRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078). Args: num_units: int, The number of units in the GRU cell. activation: Nonlinearity to use. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. kernel_initializer: (optional) The initializer to use for the weight and projection matrices. bias_initializer: (optional) The initializer to use for the bias. """ def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(VecAttGRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._gate_linear = None self._candidate_linear = None @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, att_score): return self.call(inputs, state, att_score) def call(self, inputs, state, att_score=None): """Gated recurrent unit (GRU) with nunits cells.""" if self._gate_linear is None: bias_ones = self._bias_initializer if self._bias_initializer is None: bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype) with vs.variable_scope("gates"): # Reset gate and update gate. self._gate_linear = _Linear( [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''): """parametric ReLU activation""" with tf.variable_scope(name_or_scope=scope, default_name="prelu"): _alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1], dtype=_x.dtype, initializer=tf.constant_initializer(0.1)) _zero = tf.constant(0,dtype=_x.dtype) # return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x) return tf.maximum(_zero, _x) + _alpha * tf.minimum(_zero, _x) def calc_auc(raw_arr): """Summary Args: raw_arr (TYPE): Description Returns: TYPE: Description """ arr = sorted(raw_arr, key=lambda d:d[0], reverse=True) pos, neg = 0., 0. for record in arr: if record[1] == 1.: pos += 1 else: neg += 1 fp, tp = 0., 0. xy_arr = [] for record in arr: if record[1] == 1.: tp += 1 else: fp += 1 xy_arr.append([fp/neg, tp/pos]) auc = 0. prev_x = 0. prev_y = 0. for x, y in xy_arr: if x != prev_x: auc += ((x - prev_x) * (y + prev_y) / 2.) prev_x = x prev_y = y return auc def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer input_size = query.get_shape().as_list()[-1] # Trainable parameters w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1)) w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) return output def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag) d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]]) scores = d_layer_3_all # Mask # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T] key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(scores) * (-2 ** 32 + 1) if not forCnn: scores = tf.where(key_masks, scores, paddings) # [B, 1, T] # Scale # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5) # Activation if softmax_stag: scores = tf.nn.softmax(scores) # [B, 1, T] # Weighted sum if mode == 'SUM': output = tf.matmul(scores, facts) # [B, 1, H] # output = tf.reshape(output, [-1, tf.shape(facts)[-1]]) else: scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) output = facts * tf.expand_dims(scores, -1) output = tf.reshape(output, tf.shape(facts)) if return_alphas: return output, scores return output def self_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :], ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'): if len(facts.get_shape().as_list()) == 2: facts = tf.expand_dims(facts, 1) def cond(batch, output, i): return tf.less(i, tf.shape(batch)[1]) def body(batch, output, i): self_attention_tmp = din_fcn_attention(batch[:, i, :], batch, ATTENTION_SIZE, mask, softmax_stag=1, stag=stag, mode='LIST') self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1) output = output.write(i, self_attention_tmp) return batch, output, i + 1 output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, element_shape=(facts[:, 0, :].get_shape())) _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0]) self_attention = output_op.stack() self_attention = tf.transpose(self_attention, perm = [1, 0, 2]) return self_attention def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) # Trainable parameters mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag) query = prelu(query) queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag) d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag) d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts)) output = d_layer_2_all return output
[ "tensorflow.concat", "tensorflow.python.ops.array_ops.split", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.tanh", "tensorflow.where", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.while_loop", "tensorflow.layers.dense", "tensorflow.name_scope", "tensorflow.square", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.tensordot", "tensorflow.matmul", "tensorflow.shape", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.constant", "tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.expand_dims", "tensorflow.ones_like", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.sqrt", "tensorflow.array_ops.transpose", "tensorflow.random_normal" ]
modelzoo/features/MultiHashVariable/DIEN/script/utils.py
[(247, 'tensorflow.tensordot', 'tf.tensordot', (['tmp', 'v'], {'axes': '(1)', 'name': '"""v_dot_tmp"""'}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.where', 'tf.where', (['key_masks', 'v_dot_tmp', 'paddings'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['v_dot_tmp'], {'name': '"""alphas"""'}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.concat', 'tf.concat', (['[queries, facts, queries - facts, queries * facts]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.layers.dense', 'tf.layers.dense', (['din_all', '(80)'], {'activation': 'tf.nn.sigmoid', 'name': "('f1_att' + stag)"}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_1_all', '(40)'], {'activation': 'tf.nn.sigmoid', 'name': "('f2_att' + stag)"}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_2_all', '(1)'], {'activation': 'None', 'name': "('f3_att' + stag)"}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(1)'], {}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.where', 'tf.where', (['key_masks', 'scores', 'paddings'], {}), True, 'import tensorflow as tf\n'), (325, 'tensorflow.layers.dense', 'tf.layers.dense', (['query', 'facts_size'], {'activation': 'None', 'name': "('f1' + stag)"}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.concat', 'tf.concat', (['[queries, facts, queries - facts, queries * facts]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (330, 'tensorflow.layers.dense', 'tf.layers.dense', (['din_all', '(80)'], {'activation': 'tf.nn.sigmoid', 'name': "('f1_att' + stag)"}), True, 'import tensorflow as tf\n'), (331, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_1_all', '(40)'], {'activation': 'tf.nn.sigmoid', 'name': "('f2_att' + stag)"}), True, 'import tensorflow as tf\n'), (332, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_2_all', '(1)'], {'activation': 'None', 'name': "('f3_att' + stag)"}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(1)'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '[facts, output_ta, 0]'], {}), True, 'import tensorflow as tf\n'), (382, 'tensorflow.transpose', 'tf.transpose', (['self_attention'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.while_loop', 'tf.while_loop', (['cond', 'body', '[facts, output_ta, 0]'], {}), True, 'import tensorflow as tf\n'), (406, 'tensorflow.transpose', 'tf.transpose', (['self_attention'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (421, 'tensorflow.layers.dense', 'tf.layers.dense', (['query', 'facts_size'], {'activation': 'None', 'name': "('f1_trans_shine' + stag)"}), True, 'import tensorflow as tf\n'), (425, 'tensorflow.concat', 'tf.concat', (['[queries, facts, queries - facts, queries * facts]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.layers.dense', 'tf.layers.dense', (['din_all', 'facts_size'], {'activation': 'tf.nn.sigmoid', 'name': "('f1_shine_att' + stag)"}), True, 'import tensorflow as tf\n'), (427, 'tensorflow.layers.dense', 'tf.layers.dense', (['d_layer_1_all', 'facts_size'], {'activation': 'tf.nn.sigmoid', 'name': "('f2_shine_att' + stag)"}), True, 'import tensorflow as tf\n'), (13, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['_x'], {'axis': 'reduction_axes'}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.reshape', 'tf.reshape', (['mean', 'broadcast_shape'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.sqrt', 'tf.sqrt', (['std'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.reshape', 'tf.reshape', (['std', 'broadcast_shape'], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.sigmoid', 'tf.sigmoid', (['x_normed'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.python.ops.array_ops.split', 'array_ops.split', ([], {'value': 'value', 'num_or_size_splits': '(2)', 'axis': '(1)'}), False, 'from tensorflow.python.ops import array_ops\n'), (156, 'tensorflow.python.ops.array_ops.split', 'array_ops.split', ([], {'value': 'value', 'num_or_size_splits': '(2)', 'axis': '(1)'}), False, 'from tensorflow.python.ops import array_ops\n'), (174, 'tensorflow.variable_scope', 'tf.variable_scope', ([], {'name_or_scope': 'scope', 'default_name': '"""prelu"""'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': '_x.dtype'}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (226, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.ones_like', 'tf.ones_like', (['mask'], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.random_normal', 'tf.random_normal', (['[hidden_size, attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.random_normal', 'tf.random_normal', (['[input_size, attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.random_normal', 'tf.random_normal', (['[attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.random_normal', 'tf.random_normal', (['[attention_size]'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.name_scope', 'tf.name_scope', (['"""v"""'], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.tensordot', 'tf.tensordot', (['facts', 'w1'], {'axes': '(1)'}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.tensordot', 'tf.tensordot', (['query', 'w2'], {'axes': '(1)'}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.tanh', 'tf.tanh', (['(tmp1 + tmp2 + b)'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.ones_like', 'tf.ones_like', (['v_dot_tmp'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.expand_dims', 'tf.expand_dims', (['alphas', '(-1)'], {}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.concat', 'tf.concat', ([], {'values': '[query, query]', 'axis': '(1)'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.ones_like', 'tf.ones_like', (['mask'], {}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.ones_like', 'tf.ones_like', (['scores'], {}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scores'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.matmul', 'tf.matmul', (['scores', 'facts'], {}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (316, 'tensorflow.expand_dims', 'tf.expand_dims', (['facts', '(1)'], {}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (322, 'tensorflow.ones_like', 'tf.ones_like', (['mask'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (338, 'tensorflow.ones_like', 'tf.ones_like', (['scores'], {}), True, 'import tensorflow as tf\n'), (340, 'tensorflow.where', 'tf.where', (['key_masks', 'scores', 'paddings'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scores'], {}), True, 'import tensorflow as tf\n'), (351, 'tensorflow.matmul', 'tf.matmul', (['scores', 'facts'], {}), True, 'import tensorflow as tf\n'), (363, 'tensorflow.expand_dims', 'tf.expand_dims', (['facts', '(1)'], {}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self_attention_tmp', '(1)'], {}), True, 'import tensorflow as tf\n'), (387, 'tensorflow.expand_dims', 'tf.expand_dims', (['facts', '(1)'], {}), True, 'import tensorflow as tf\n'), (396, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self_attention_tmp', '(1)'], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.concat', 'tf.concat', (['facts', '(2)'], {}), True, 'import tensorflow as tf\n'), (416, 'tensorflow.array_ops.transpose', 'tf.array_ops.transpose', (['facts', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (418, 'tensorflow.ones_like', 'tf.ones_like', (['mask'], {}), True, 'import tensorflow as tf\n'), (424, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.maximum', 'tf.maximum', (['_zero', '_x'], {}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.expand_dims', 'tf.expand_dims', (['scores', '(-1)'], {}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.expand_dims', 'tf.expand_dims', (['scores', '(-1)'], {}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (15, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.square', 'tf.square', (['(_x - brodcast_mean)'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(1.0)'], {'dtype': 'inputs.dtype'}), False, 'from tensorflow.python.ops import init_ops\n'), (81, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""gates"""'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (82, 'tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear', '_Linear', (['[inputs, state]', '(2 * self._num_units)', '(True)'], {'bias_initializer': 'bias_ones', 'kernel_initializer': 'self._kernel_initializer'}), False, 'from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear\n'), (94, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""candidate"""'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (95, 'tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear', '_Linear', (['[inputs, r_state]', 'self._num_units', '(True)'], {'bias_initializer': 'self._bias_initializer', 'kernel_initializer': 'self._kernel_initializer'}), False, 'from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear\n'), (146, 'tensorflow.python.ops.init_ops.constant_initializer', 'init_ops.constant_initializer', (['(1.0)'], {'dtype': 'inputs.dtype'}), False, 'from tensorflow.python.ops import init_ops\n'), (147, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""gates"""'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (148, 'tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear', '_Linear', (['[inputs, state]', '(2 * self._num_units)', '(True)'], {'bias_initializer': 'bias_ones', 'kernel_initializer': 'self._kernel_initializer'}), False, 'from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear\n'), (160, 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""candidate"""'], {}), True, 'from tensorflow.python.ops import variable_scope as vs\n'), (161, 'tensorflow.contrib.rnn.python.ops.core_rnn_cell._Linear', '_Linear', (['[inputs, r_state]', 'self._num_units', '(True)'], {'bias_initializer': 'self._bias_initializer', 'kernel_initializer': 'self._kernel_initializer'}), False, 'from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear\n'), (176, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.minimum', 'tf.minimum', (['_zero', '_x'], {}), True, 'import tensorflow as tf\n'), (280, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (366, 'tensorflow.shape', 'tf.shape', (['batch'], {}), True, 'import tensorflow as tf\n'), (390, 'tensorflow.shape', 'tf.shape', (['batch'], {}), True, 'import tensorflow as tf\n'), (423, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.shape', 'tf.shape', (['tmp2'], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n'), (354, 'tensorflow.shape', 'tf.shape', (['facts'], {}), True, 'import tensorflow as tf\n')]
kpedro88/triton-inference-server
37b3441e59bd0da314f428e1dcddf0a2f67d52e1
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from builtins import range import os import sys import numpy as np import gen_ensemble_model_utils as emu FLAGS = None np_dtype_string = np.dtype(object) def np_to_model_dtype(np_dtype): if np_dtype == np.bool: return "TYPE_BOOL" elif np_dtype == np.int8: return "TYPE_INT8" elif np_dtype == np.int16: return "TYPE_INT16" elif np_dtype == np.int32: return "TYPE_INT32" elif np_dtype == np.int64: return "TYPE_INT64" elif np_dtype == np.uint8: return "TYPE_UINT8" elif np_dtype == np.uint16: return "TYPE_UINT16" elif np_dtype == np.float16: return "TYPE_FP16" elif np_dtype == np.float32: return "TYPE_FP32" elif np_dtype == np.float64: return "TYPE_FP64" elif np_dtype == np_dtype_string: return "TYPE_STRING" return None def np_to_tf_dtype(np_dtype): if np_dtype == np.bool: return tf.bool elif np_dtype == np.int8: return tf.int8 elif np_dtype == np.int16: return tf.int16 elif np_dtype == np.int32: return tf.int32 elif np_dtype == np.int64: return tf.int64 elif np_dtype == np.uint8: return tf.uint8 elif np_dtype == np.uint16: return tf.uint16 elif np_dtype == np.float16: return tf.float16 elif np_dtype == np.float32: return tf.float32 elif np_dtype == np.float64: return tf.float64 elif np_dtype == np_dtype_string: return tf.string return None def np_to_c2_dtype(np_dtype): if np_dtype == np.bool: return c2core.DataType.BOOL elif np_dtype == np.int8: return c2core.DataType.INT8 elif np_dtype == np.int16: return c2core.DataType.INT16 elif np_dtype == np.int32: return c2core.DataType.INT32 elif np_dtype == np.int64: return c2core.DataType.INT64 elif np_dtype == np.uint8: return c2core.DataType.UINT8 elif np_dtype == np.uint16: return c2core.DataType.UINT16 elif np_dtype == np.float16: return c2core.DataType.FLOAT16 elif np_dtype == np.float32: return c2core.DataType.FLOAT elif np_dtype == np.float64: return c2core.DataType.DOUBLE elif np_dtype == np_dtype_string: return c2core.DataType.STRING return None def np_to_trt_dtype(np_dtype): if np_dtype == np.bool: return trt.bool elif np_dtype == np.int8: return trt.int8 elif np_dtype == np.int32: return trt.int32 elif np_dtype == np.float16: return trt.float16 elif np_dtype == np.float32: return trt.float32 return None def np_to_onnx_dtype(np_dtype): if np_dtype == np.bool: return onnx.TensorProto.BOOL elif np_dtype == np.int8: return onnx.TensorProto.INT8 elif np_dtype == np.int16: return onnx.TensorProto.INT16 elif np_dtype == np.int32: return onnx.TensorProto.INT32 elif np_dtype == np.int64: return onnx.TensorProto.INT64 elif np_dtype == np.uint8: return onnx.TensorProto.UINT8 elif np_dtype == np.uint16: return onnx.TensorProto.UINT16 elif np_dtype == np.float16: return onnx.TensorProto.FLOAT16 elif np_dtype == np.float32: return onnx.TensorProto.FLOAT elif np_dtype == np.float64: return onnx.TensorProto.DOUBLE elif np_dtype == np_dtype_string: return onnx.TensorProto.STRING return None def np_to_torch_dtype(np_dtype): if np_dtype == np.bool: return torch.bool elif np_dtype == np.int8: return torch.int8 elif np_dtype == np.int16: return torch.int16 elif np_dtype == np.int32: return torch.int elif np_dtype == np.int64: return torch.long elif np_dtype == np.uint8: return torch.uint8 elif np_dtype == np.uint16: return None # Not supported in Torch elif np_dtype == np.float16: return None elif np_dtype == np.float32: return torch.float elif np_dtype == np.float64: return torch.double elif np_dtype == np_dtype_string: return None # Not supported in Torch def create_graphdef_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return tf_input_dtype = np_to_tf_dtype(input_dtype) tf_output0_dtype = np_to_tf_dtype(output0_dtype) tf_output1_dtype = np_to_tf_dtype(output1_dtype) # Create the model. If non-batching then don't include the batch # dimension. tf.reset_default_graph() if max_batch == 0: in0 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "INPUT0") in1 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "INPUT1") else: in0 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "INPUT0") in1 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "INPUT1") # If the input is a string, then convert each string to the # equivalent int32 value. if tf_input_dtype == tf.string: in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = tf.identity(cast0, "OUTPUT0") out1 = tf.identity(cast1, "OUTPUT1") # Use a different model name for the non-batching variant model_name = tu.get_model_name( "graphdef_nobatch" if max_batch == 0 else "graphdef", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with tf.Session() as sess: graph_io.write_graph(sess.graph.as_graph_def(), model_version_dir, "model.graphdef", as_text=False) def create_graphdef_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "graphdef_nobatch" if max_batch == 0 else "graphdef", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "tensorflow_graphdef" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_savedmodel_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return tf_input_dtype = np_to_tf_dtype(input_dtype) tf_output0_dtype = np_to_tf_dtype(output0_dtype) tf_output1_dtype = np_to_tf_dtype(output1_dtype) # Create the model. If non-batching then don't include the batch # dimension. tf.reset_default_graph() if max_batch == 0: in0 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT0") in1 = tf.placeholder(tf_input_dtype, tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT1") else: in0 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT0") in1 = tf.placeholder(tf_input_dtype, [ None, ] + tu.shape_to_tf_shape(input_shape), "TENSOR_INPUT1") # If the input is a string, then convert each string to the # equivalent float value. if tf_input_dtype == tf.string: in0 = tf.strings.to_number(in0, tf.int32) in1 = tf.strings.to_number(in1, tf.int32) add = tf.add(in0, in1, "ADD") sub = tf.subtract(in0, in1, "SUB") # Cast or convert result to the output dtype. if tf_output0_dtype == tf.string: cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = tf.identity(cast0, "TENSOR_OUTPUT0") out1 = tf.identity(cast1, "TENSOR_OUTPUT1") # Use a different model name for the non-batching variant model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with tf.Session() as sess: input0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT0:0") input1_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_INPUT1:0") output0_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT0:0") output1_tensor = tf.get_default_graph().get_tensor_by_name( "TENSOR_OUTPUT1:0") tf.saved_model.simple_save(sess, model_version_dir + "/model.savedmodel", inputs={ "INPUT0": input0_tensor, "INPUT1": input1_tensor }, outputs={ "OUTPUT0": output0_tensor, "OUTPUT1": output1_tensor }) def create_savedmodel_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "savedmodel_nobatch" if max_batch == 0 else "savedmodel", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "tensorflow_savedmodel" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_netdef_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return c2_input_dtype = np_to_c2_dtype(input_dtype) c2_output0_dtype = np_to_c2_dtype(output0_dtype) c2_output1_dtype = np_to_c2_dtype(output1_dtype) model_name = tu.get_model_name( "netdef_nobatch" if max_batch == 0 else "netdef", input_dtype, output0_dtype, output1_dtype) # Create the model model = c2model_helper.ModelHelper(name=model_name) add = model.net.Add(["INPUT0", "INPUT1"], "add") sub = model.net.Sub(["INPUT0", "INPUT1"], "sub") out0 = model.net.Cast(["add" if not swap else "sub"], "OUTPUT0", to=c2_output0_dtype) out1 = model.net.Cast(["sub" if not swap else "add"], "OUTPUT1", to=c2_output1_dtype) predict_net, _ = c2model_helper.ExtractPredictorNet(model.Proto(), \ input_blobs = ["INPUT0", "INPUT1"], output_blobs = ["OUTPUT0", "OUTPUT1"]) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.netdef", "wb") as f: f.write(predict_net.Proto().SerializeToString()) with open(model_version_dir + "/init_model.netdef", "wb") as f: f.write(model.InitProto().SerializeToString()) def create_netdef_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "netdef_nobatch" if max_batch == 0 else "netdef", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "caffe2_netdef" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_plan_dynamic_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) trt_memory_format = trt.TensorFormat.LINEAR # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network( 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) if max_batch == 0: input_with_batchsize = [i for i in input_shape] else: input_with_batchsize = [-1] + [i for i in input_shape] in0 = network.add_input("INPUT0", trt_input_dtype, input_with_batchsize) in1 = network.add_input("INPUT1", trt_input_dtype, input_with_batchsize) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) out0.get_output(0).dtype = trt_output0_dtype out1.get_output(0).dtype = trt_output1_dtype in0.allowed_formats = 1 << int(trt_memory_format) in1.allowed_formats = 1 << int(trt_memory_format) out0.get_output(0).allowed_formats = 1 << int(trt_memory_format) out1.get_output(0).allowed_formats = 1 << int(trt_memory_format) if (trt_input_dtype == trt.int8): in0.dynamic_range = (-128.0, 127.0) in1.dynamic_range = (-128.0, 127.0) if (trt_output0_dtype == trt.int8): out0.get_output(0).dynamic_range = (-128.0, 127.0) if (trt_output1_dtype == trt.int8): out1.get_output(0).dynamic_range = (-128.0, 127.0) min_shape = [] opt_shape = [] max_shape = [] if max_batch != 0: min_shape = min_shape + [1] opt_shape = opt_shape + [max(1, max_batch)] max_shape = max_shape + [max(1, max_batch)] for i in input_shape: if i == -1: min_shape = min_shape + [min_dim] opt_shape = opt_shape + [int((max_dim + min_dim) / 2)] max_shape = max_shape + [max_dim] else: min_shape = min_shape + [i] opt_shape = opt_shape + [i] max_shape = max_shape + [i] profile = builder.create_optimization_profile() profile.set_shape("INPUT0", min_shape, opt_shape, max_shape) profile.set_shape("INPUT1", min_shape, opt_shape, max_shape) flags = 1 << int(trt.BuilderFlag.STRICT_TYPES) datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype]) for dt in datatype_set: if (dt == trt.int8): flags |= 1 << int(trt.BuilderFlag.INT8) elif (dt == trt.float16): flags |= 1 << int(trt.BuilderFlag.FP16) config = builder.create_builder_config() config.flags = flags config.add_optimization_profile(profile) config.max_workspace_size = 1 << 20 engine = builder.build_engine(network, config) # Use a different model name for different kinds of models model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) if min_dim != 1 or max_dim != 32: model_name = "{}-{}-{}".format(model_name, min_dim, max_dim) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_dynamic_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network( 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) if max_batch == 0: input_with_batchsize = [i for i in input_shape] else: input_with_batchsize = [-1] + [i for i in input_shape] in0 = network.add_input("INPUT0", trt_input_dtype, input_with_batchsize) in1 = network.add_input("INPUT1", trt_input_dtype, input_with_batchsize) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) min_shape = [] opt_shape = [] max_shape = [] for i in input_shape: if i == -1: min_shape = min_shape + [min_dim] opt_shape = opt_shape + [int((max_dim + min_dim) / 2)] max_shape = max_shape + [max_dim] else: min_shape = min_shape + [i] opt_shape = opt_shape + [i] max_shape = max_shape + [i] config = builder.create_builder_config() # create multiple profiles with same shape for testing # with decreasing batch sizes profile = [] for i in range(4): profile.append(builder.create_optimization_profile()) if max_batch == 0: profile[i].set_shape("INPUT0", min_shape, opt_shape, max_shape) profile[i].set_shape("INPUT1", min_shape, opt_shape, max_shape) else: bs = [max_batch - i if max_batch > i else 1] opt_bs = [1 + i if 1 + i < max_batch - 1 else max_batch - 1] # Hardcoded 'max_shape[0] += 1' in default profile for # L0_trt_dynamic_shape, to differentiate whether default profile # is used if no profile is specified max_shape_override = max_shape if i == 0 and (min_dim == 1 and max_dim == 32): max_shape_override[0] += 1 profile[i].set_shape("INPUT0", [1] + min_shape, opt_bs + opt_shape, bs + max_shape_override) profile[i].set_shape("INPUT1", [1] + min_shape, opt_bs + opt_shape, bs + max_shape_override) config.add_optimization_profile(profile[i]) # some profiles with non-one min shape for first dim to test autofiller for i in range(2): profile.append(builder.create_optimization_profile()) if max_batch == 0: profile[i + 4].set_shape("INPUT0", min_shape, opt_shape, max_shape) profile[i + 4].set_shape("INPUT1", min_shape, opt_shape, max_shape) else: profile[i + 4].set_shape("INPUT0", [5 + i] + min_shape, [6] + opt_shape, [max_batch] + max_shape) profile[i + 4].set_shape("INPUT1", [5 + i] + min_shape, [6] + opt_shape, [max_batch] + max_shape) config.add_optimization_profile(profile[i + 4]) # Will repeat another profile with same min and max shapes as the first profile to test non-zero profile # for infer_variable test. profile.append(builder.create_optimization_profile()) if max_batch == 0: profile[6].set_shape("INPUT0", min_shape, opt_shape, max_shape) profile[6].set_shape("INPUT1", min_shape, opt_shape, max_shape) else: profile[6].set_shape("INPUT0", [1] + min_shape, [1] + opt_shape, [max_batch] + max_shape) profile[6].set_shape("INPUT1", [1] + min_shape, [1] + opt_shape, [max_batch] + max_shape) config.add_optimization_profile(profile[6]) config.max_workspace_size = 1 << 20 engine = builder.build_engine(network, config) # Use a different model name for different kinds of models model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) if min_dim != 1 or max_dim != 32: model_name = "{}-{}-{}".format(model_name, min_dim, max_dim) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_fixed_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) trt_memory_format = trt.TensorFormat.LINEAR # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network() in0 = network.add_input("INPUT0", trt_input_dtype, input_shape) in1 = network.add_input("INPUT1", trt_input_dtype, input_shape) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) out0.get_output(0).dtype = trt_output0_dtype out1.get_output(0).dtype = trt_output1_dtype in0.allowed_formats = 1 << int(trt_memory_format) in1.allowed_formats = 1 << int(trt_memory_format) out0.get_output(0).allowed_formats = 1 << int(trt_memory_format) out1.get_output(0).allowed_formats = 1 << int(trt_memory_format) if (trt_input_dtype == trt.int8): in0.dynamic_range = (-128.0, 127.0) in1.dynamic_range = (-128.0, 127.0) if (trt_output0_dtype == trt.int8): out0.get_output(0).dynamic_range = (-128.0, 127.0) if (trt_output1_dtype == trt.int8): out1.get_output(0).dynamic_range = (-128.0, 127.0) flags = 1 << int(trt.BuilderFlag.STRICT_TYPES) datatype_set = set([trt_input_dtype, trt_output0_dtype, trt_output1_dtype]) for dt in datatype_set: if (dt == trt.int8): flags |= 1 << int(trt.BuilderFlag.INT8) elif (dt == trt.float16): flags |= 1 << int(trt.BuilderFlag.FP16) config = builder.create_builder_config() config.flags = flags config.max_workspace_size = 1 << 20 builder.max_batch_size = max(1, max_batch) engine = builder.build_engine(network, config) model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_fixed_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap): trt_input_dtype = np_to_trt_dtype(input_dtype) trt_output0_dtype = np_to_trt_dtype(output0_dtype) trt_output1_dtype = np_to_trt_dtype(output1_dtype) # Create the model TRT_LOGGER = trt.Logger(trt.Logger.INFO) builder = trt.Builder(TRT_LOGGER) network = builder.create_network() in0 = network.add_input("INPUT0", trt_input_dtype, input_shape) in1 = network.add_input("INPUT1", trt_input_dtype, input_shape) add = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUM) sub = network.add_elementwise(in0, in1, trt.ElementWiseOperation.SUB) out0 = add if not swap else sub out1 = sub if not swap else add out0.get_output(0).name = "OUTPUT0" out1.get_output(0).name = "OUTPUT1" network.mark_output(out0.get_output(0)) network.mark_output(out1.get_output(0)) config = builder.create_builder_config() config.max_workspace_size = 1 << 20 builder.max_batch_size = max(1, max_batch) engine = builder.build_engine(network, config) del network model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir with open(model_version_dir + "/model.plan", "wb") as f: f.write(engine.serialize()) del engine del builder def create_plan_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False, min_dim=1, max_dim=32): if not tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return if input_dtype != np.float32 or output0_dtype != np.float32 or output1_dtype != np.float32: if (not tu.shape_is_fixed(input_shape) or not tu.shape_is_fixed(output0_shape) or not tu.shape_is_fixed(output1_shape)): create_plan_dynamic_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim) else: create_plan_fixed_rf_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap) else: if (not tu.shape_is_fixed(input_shape) or not tu.shape_is_fixed(output0_shape) or not tu.shape_is_fixed(output1_shape)): create_plan_dynamic_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap, min_dim, max_dim) else: create_plan_fixed_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap) def create_plan_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy, min_dim=1, max_dim=32): if not tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for different kinds of models model_name = tu.get_model_name("plan_nobatch" if max_batch == 0 else "plan", input_dtype, output0_dtype, output1_dtype) if min_dim != 1 or max_dim != 32: model_name = "{}-{}-{}".format(model_name, min_dim, max_dim) config_dir = models_dir + "/" + model_name if -1 in input_shape: # Selects the sixth profile for FP32 datatype # Note the min and max shapes of first and sixth # profile are identical. profile_index = 6 if input_dtype == np.float32 else 0 config = ''' name: "{}" platform: "tensorrt_plan" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] instance_group [ {{ profile:"{}" }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape), profile_index) else: config = ''' name: "{}" platform: "tensorrt_plan" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_onnx_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return onnx_input_dtype = np_to_onnx_dtype(input_dtype) onnx_output0_dtype = np_to_onnx_dtype(output0_dtype) onnx_output1_dtype = np_to_onnx_dtype(output1_dtype) onnx_input_shape, idx = tu.shape_to_onnx_shape(input_shape, 0) onnx_output0_shape, idx = tu.shape_to_onnx_shape(input_shape, idx) onnx_output1_shape, idx = tu.shape_to_onnx_shape(input_shape, idx) # Create the model model_name = tu.get_model_name("onnx_nobatch" if max_batch == 0 else "onnx", input_dtype, output0_dtype, output1_dtype) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) batch_dim = [] if max_batch == 0 else [None] in0 = onnx.helper.make_tensor_value_info("INPUT0", onnx_input_dtype, batch_dim + onnx_input_shape) in1 = onnx.helper.make_tensor_value_info("INPUT1", onnx_input_dtype, batch_dim + onnx_input_shape) out0 = onnx.helper.make_tensor_value_info("OUTPUT0", onnx_output0_dtype, batch_dim + onnx_output0_shape) out1 = onnx.helper.make_tensor_value_info("OUTPUT1", onnx_output1_dtype, batch_dim + onnx_output1_shape) internal_in0 = onnx.helper.make_node("Identity", ["INPUT0"], ["_INPUT0"]) internal_in1 = onnx.helper.make_node("Identity", ["INPUT1"], ["_INPUT1"]) # cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type # Also casting String data type to int32 if ((onnx_input_dtype == onnx.TensorProto.INT8) or (onnx_input_dtype == onnx.TensorProto.INT16) or (onnx_input_dtype == onnx.TensorProto.STRING)): internal_in0 = onnx.helper.make_node("Cast", ["INPUT0"], ["_INPUT0"], to=onnx.TensorProto.INT32) internal_in1 = onnx.helper.make_node("Cast", ["INPUT1"], ["_INPUT1"], to=onnx.TensorProto.INT32) add = onnx.helper.make_node("Add", ["_INPUT0", "_INPUT1"], ["CAST0" if not swap else "CAST1"]) sub = onnx.helper.make_node("Sub", ["_INPUT0", "_INPUT1"], ["CAST1" if not swap else "CAST0"]) cast0 = onnx.helper.make_node("Cast", ["CAST0"], ["OUTPUT0"], to=onnx_output0_dtype) cast1 = onnx.helper.make_node("Cast", ["CAST1"], ["OUTPUT1"], to=onnx_output1_dtype) # Avoid cast from float16 to float16 # (bug in Onnx Runtime, cast from float16 to float16 will become cast from float16 to float32) if onnx_input_dtype == onnx.TensorProto.FLOAT16: if onnx_output0_dtype == onnx_input_dtype: cast0 = onnx.helper.make_node("Identity", ["CAST0"], ["OUTPUT0"]) if onnx_output1_dtype == onnx_input_dtype: cast1 = onnx.helper.make_node("Identity", ["CAST1"], ["OUTPUT1"]) onnx_nodes = [internal_in0, internal_in1, add, sub, cast0, cast1] onnx_inputs = [in0, in1] onnx_outputs = [out0, out1] graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs) if FLAGS.onnx_opset > 0: model_opset = onnx.helper.make_operatorsetid("", FLAGS.onnx_opset) model_def = onnx.helper.make_model(graph_proto, producer_name="triton", opset_imports=[model_opset]) else: model_def = onnx.helper.make_model(graph_proto, producer_name="triton") try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir onnx.save(model_def, model_version_dir + "/model.onnx") def create_onnx_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Use a different model name for the non-batching variant model_name = tu.get_model_name("onnx_nobatch" if max_batch == 0 else "onnx", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name # [TODO] move create_general_modelconfig() out of emu as it is general # enough for all backends to use config = emu.create_general_modelconfig(model_name, "onnxruntime_onnx", max_batch, emu.repeat(input_dtype, 2), emu.repeat(input_shape, 2), emu.repeat(None, 2), [output0_dtype, output1_dtype], [output0_shape, output1_shape], emu.repeat(None, 2), ["output0_labels.txt", None], version_policy=version_policy, force_tensor_number_suffix=True) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_libtorch_modelfile(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, swap=False): if not tu.validate_for_libtorch_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return torch_input_dtype = np_to_torch_dtype(input_dtype) torch_output0_dtype = np_to_torch_dtype(output0_dtype) torch_output1_dtype = np_to_torch_dtype(output1_dtype) model_name = tu.get_model_name( "libtorch_nobatch" if max_batch == 0 else "libtorch", input_dtype, output0_dtype, output1_dtype) # handle for -1 (when variable) since can't create tensor with shape of [-1] input_shape = [abs(ips) for ips in input_shape] # Create the model if not swap: class AddSubNet(nn.Module): def __init__(self, *args): self.torch_output0_dtype = args[0][0] self.torch_output1_dtype = args[0][1] super(AddSubNet, self).__init__() def forward(self, input0, input1): return (input0 + input1).to(self.torch_output0_dtype), \ (input0 - input1).to(self.torch_output1_dtype) addSubModel = AddSubNet((torch_output0_dtype, torch_output1_dtype)) example_input = torch.zeros(input_shape, dtype=torch_input_dtype) traced = torch.jit.trace(addSubModel, (example_input, example_input)) else: class SubAddNet(nn.Module): def __init__(self, *args): self.torch_output0_dtype = args[0][0] self.torch_output1_dtype = args[0][1] super(SubAddNet, self).__init__() def forward(self, input0, input1): return (input0 - input1).to(self.torch_output0_dtype), \ (input0 + input1).to(self.torch_output1_dtype) subAddModel = SubAddNet((torch_output0_dtype, torch_output1_dtype)) example_input = torch.zeros(input_shape, dtype=torch_input_dtype) traced = torch.jit.trace(subAddModel, (example_input, example_input)) model_version_dir = models_dir + "/" + model_name + "/" + str(model_version) try: os.makedirs(model_version_dir) except OSError as ex: pass # ignore existing dir traced.save(model_version_dir + "/model.pt") def create_libtorch_modelconfig(models_dir, max_batch, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy): if not tu.validate_for_libtorch_model(input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): return # Unpack version policy version_policy_str = "{ latest { num_versions: 1 }}" if version_policy is not None: type, val = version_policy if type == 'latest': version_policy_str = "{{ latest {{ num_versions: {} }}}}".format( val) elif type == 'specific': version_policy_str = "{{ specific {{ versions: {} }}}}".format(val) else: version_policy_str = "{ all { }}" # Use a different model name for the non-batching variant model_name = tu.get_model_name( "libtorch_nobatch" if max_batch == 0 else "libtorch", input_dtype, output0_dtype, output1_dtype) config_dir = models_dir + "/" + model_name config = ''' name: "{}" platform: "pytorch_libtorch" max_batch_size: {} version_policy: {} input [ {{ name: "INPUT__0" data_type: {} dims: [ {} ] }}, {{ name: "INPUT__1" data_type: {} dims: [ {} ] }} ] output [ {{ name: "OUTPUT__0" data_type: {} dims: [ {} ] label_filename: "output0_labels.txt" }}, {{ name: "OUTPUT__1" data_type: {} dims: [ {} ] }} ] '''.format(model_name, max_batch, version_policy_str, np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(input_dtype), tu.shape_to_dims_str(input_shape), np_to_model_dtype(output0_dtype), tu.shape_to_dims_str(output0_shape), np_to_model_dtype(output1_dtype), tu.shape_to_dims_str(output1_shape)) try: os.makedirs(config_dir) except OSError as ex: pass # ignore existing dir with open(config_dir + "/config.pbtxt", "w") as cfile: cfile.write(config) with open(config_dir + "/output0_labels.txt", "w") as lfile: for l in range(output0_label_cnt): lfile.write("label" + str(l) + "\n") def create_models(models_dir, input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape, output0_label_cnt, version_policy=None): model_version = 1 # Create two models, one that supports batching with a max-batch # of 8, and one that does not with a max-batch of 0 if FLAGS.graphdef: # max-batch 8 create_graphdef_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_graphdef_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_graphdef_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_graphdef_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.savedmodel: # max-batch 8 create_savedmodel_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_savedmodel_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_savedmodel_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_savedmodel_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.netdef: # max-batch 8 create_netdef_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_netdef_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_netdef_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_netdef_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.tensorrt: # max-batch 8 suffix = () if input_dtype == np.int8 or output0_dtype == np.int8 or output1_dtype == np.int8: suffix = (1, 1) create_plan_modelconfig(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_plan_modelfile(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_plan_modelconfig(models_dir, 0, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_plan_modelfile(models_dir, 0, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype) if -1 in input_shape: # models for testing optimization profiles create_plan_modelconfig(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy, min_dim=4, max_dim=32) create_plan_modelfile(models_dir, 8, model_version, input_shape + suffix, output0_shape + suffix, output1_shape + suffix, input_dtype, output0_dtype, output1_dtype, min_dim=4, max_dim=32) if FLAGS.onnx: # max-batch 8 create_onnx_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_onnx_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_onnx_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_onnx_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.libtorch: # max-batch 8 create_libtorch_modelconfig(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_libtorch_modelfile(models_dir, 8, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 create_libtorch_modelconfig(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) create_libtorch_modelfile(models_dir, 0, model_version, input_shape, output0_shape, output1_shape, input_dtype, output0_dtype, output1_dtype) if FLAGS.ensemble: for pair in emu.platform_types_and_validation(): if not pair[1](input_dtype, output0_dtype, output1_dtype, input_shape, output0_shape, output1_shape): continue config_input_shape = input_shape config_output0_shape = output0_shape config_output1_shape = output1_shape if pair[0] == "plan": if len(input_shape) == 1 and input_dtype == np.int8: config_input_shape = (input_shape[0], 1, 1) if len(output0_shape) == 1 and output0_dtype == np.int8: config_output0_shape = (output0_shape[0], 1, 1) if len(output1_shape) == 1 and output1_dtype == np.int8: config_output1_shape = (output1_shape[0], 1, 1) # max-batch 8 emu.create_ensemble_modelconfig(pair[0], models_dir, 8, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) emu.create_ensemble_modelfile(pair[0], models_dir, 8, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype) # max-batch 0 emu.create_ensemble_modelconfig(pair[0], models_dir, 0, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype, output0_label_cnt, version_policy) emu.create_ensemble_modelfile(pair[0], models_dir, 0, model_version, config_input_shape, config_output0_shape, config_output1_shape, input_dtype, output0_dtype, output1_dtype) def create_fixed_models(models_dir, input_dtype, output0_dtype, output1_dtype, version_policy=None): input_size = 16 create_models(models_dir, input_dtype, output0_dtype, output1_dtype, (input_size,), (input_size,), (input_size,), input_size, version_policy) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--models_dir', type=str, required=True, help='Top-level model directory') parser.add_argument('--graphdef', required=False, action='store_true', help='Generate GraphDef models') parser.add_argument('--savedmodel', required=False, action='store_true', help='Generate SavedModel models') parser.add_argument('--netdef', required=False, action='store_true', help='Generate NetDef models') parser.add_argument('--tensorrt', required=False, action='store_true', help='Generate TensorRT PLAN models') parser.add_argument('--onnx', required=False, action='store_true', help='Generate Onnx Runtime Onnx models') parser.add_argument( '--onnx_opset', type=int, required=False, default=0, help='Opset used for Onnx models. Default is to use ONNXRT default') parser.add_argument('--libtorch', required=False, action='store_true', help='Generate Pytorch LibTorch models') parser.add_argument('--variable', required=False, action='store_true', help='Used variable-shape tensors for input/output') parser.add_argument('--ensemble', required=False, action='store_true', help='Generate ensemble models against the models' + ' in all platforms. Note that the models generated' + ' are not completed.') FLAGS, unparsed = parser.parse_known_args() if FLAGS.netdef: from caffe2.python import core as c2core from caffe2.python import model_helper as c2model_helper if FLAGS.graphdef or FLAGS.savedmodel: import tensorflow as tf from tensorflow.python.framework import graph_io, graph_util if FLAGS.tensorrt: import tensorrt as trt if FLAGS.onnx: import onnx if FLAGS.libtorch: import torch from torch import nn import test_util as tu # Tests with models that accept fixed-shape input/output tensors if not FLAGS.variable: create_fixed_models(FLAGS.models_dir, np.int8, np.int8, np.int8, ('latest', 1)) create_fixed_models(FLAGS.models_dir, np.int16, np.int16, np.int16, ('latest', 2)) create_fixed_models(FLAGS.models_dir, np.int32, np.int32, np.int32, ('all', None)) create_fixed_models(FLAGS.models_dir, np.int64, np.int64, np.int64) create_fixed_models(FLAGS.models_dir, np.float16, np.float16, np.float16, ('specific', [ 1, ])) create_fixed_models(FLAGS.models_dir, np.float32, np.float32, np.float32, ('specific', [1, 3])) create_fixed_models(FLAGS.models_dir, np.float16, np.float32, np.float32) create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int8) create_fixed_models(FLAGS.models_dir, np.int8, np.int32, np.int32) create_fixed_models(FLAGS.models_dir, np.int32, np.int8, np.int16) create_fixed_models(FLAGS.models_dir, np.int32, np.float32, np.float32) create_fixed_models(FLAGS.models_dir, np.float32, np.int32, np.int32) create_fixed_models(FLAGS.models_dir, np.int32, np.float16, np.int16) create_fixed_models(FLAGS.models_dir, np_dtype_string, np.int32, np.int32) create_fixed_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np_dtype_string) create_fixed_models(FLAGS.models_dir, np_dtype_string, np.int32, np_dtype_string) create_fixed_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np.int32) create_fixed_models(FLAGS.models_dir, np.int32, np_dtype_string, np_dtype_string) create_fixed_models(FLAGS.models_dir, np.int32, np.int32, np_dtype_string) create_fixed_models(FLAGS.models_dir, np.int32, np_dtype_string, np.int32) # Make multiple versions of some models for version testing # (they use different version policies when created above) if FLAGS.graphdef: for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: create_graphdef_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_graphdef_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_graphdef_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_graphdef_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.savedmodel: for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: create_savedmodel_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_savedmodel_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_savedmodel_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_savedmodel_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.netdef: for vt in [np.float32, np.int32]: create_netdef_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_netdef_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_netdef_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_netdef_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.tensorrt: for vt in [np.float32, np.float16, np.int32]: create_plan_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) vt = np.int8 #handle INT8 separately as it doesn't allow 1d tensors create_plan_modelfile(FLAGS.models_dir, 8, 2, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 8, 3, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 2, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) create_plan_modelfile(FLAGS.models_dir, 0, 3, (16, 1, 1), (16, 1, 1), (16, 1, 1), vt, vt, vt, swap=True) if FLAGS.onnx: for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: create_onnx_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_onnx_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_onnx_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_onnx_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.libtorch: for vt in [np.float32, np.int32, np.int16, np.int8]: create_libtorch_modelfile(FLAGS.models_dir, 8, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_libtorch_modelfile(FLAGS.models_dir, 8, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) create_libtorch_modelfile(FLAGS.models_dir, 0, 2, (16,), (16,), (16,), vt, vt, vt, swap=True) create_libtorch_modelfile(FLAGS.models_dir, 0, 3, (16,), (16,), (16,), vt, vt, vt, swap=True) if FLAGS.ensemble: for pair in emu.platform_types_and_validation(): for vt in [np.float16, np.float32, np.int8, np.int16, np.int32]: shape = (16, 1, 1) if (pair[0] == "plan" and vt == np.int8) else (16,) if not pair[1](vt, vt, vt, shape, shape, shape): continue emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 8, 2, shape, shape, shape, vt, vt, vt, swap=True) emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 8, 3, shape, shape, shape, vt, vt, vt, swap=True) emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 0, 2, shape, shape, shape, vt, vt, vt, swap=True) emu.create_ensemble_modelfile(pair[0], FLAGS.models_dir, 0, 3, shape, shape, shape, vt, vt, vt, swap=True) # Tests with models that accept variable-shape input/output tensors if FLAGS.variable: create_models(FLAGS.models_dir, np.float32, np.float32, np.float32, (-1,), (-1,), (-1,), 16) create_models(FLAGS.models_dir, np.float32, np.int32, np.int32, (-1, -1), (-1, -1), (-1, -1), 16) create_models(FLAGS.models_dir, np.float32, np.int64, np.int64, (8, -1), (8, -1), (8, -1), 32) create_models(FLAGS.models_dir, np.float32, np.int32, np.int64, (-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32) create_models(FLAGS.models_dir, np.float32, np.float32, np.int32, (-1,), (-1,), (-1,), 16) create_models(FLAGS.models_dir, np.int32, np.int32, np.int32, (-1, -1), (-1, -1), (-1, -1), 16) create_models(FLAGS.models_dir, np.int32, np.int32, np.float32, (-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32) create_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np_dtype_string, (-1,), (-1,), (-1,), 16) create_models(FLAGS.models_dir, np_dtype_string, np.int32, np.int32, (-1, -1), (-1, -1), (-1, -1), 16) create_models(FLAGS.models_dir, np_dtype_string, np_dtype_string, np.int32, (8, -1), (8, -1), (8, -1), 32) create_models(FLAGS.models_dir, np_dtype_string, np.int32, np_dtype_string, (-1, 8, -1), (-1, 8, -1), (-1, 8, -1), 32) if FLAGS.ensemble: # Create utility models used in ensemble # nop (only creates model config, should add model file before use) model_dtypes = ["TYPE_BOOL", "TYPE_STRING"] for s in [8, 16, 32, 64]: for t in ["INT", "UINT", "FP"]: if t == "FP" and s == 8: continue model_dtypes.append("TYPE_{}{}".format(t, s)) for model_dtype in model_dtypes: # Use variable size to handle all shape. Note: piping variable size output # to fixed size model is not safe but doable for model_shape in [(-1,), (-1, -1), (-1, -1, -1)]: emu.create_nop_modelconfig(FLAGS.models_dir, model_shape, model_dtype)
[ "torch.jit.trace", "torch.zeros", "tensorflow.cast", "tensorflow.identity", "tensorflow.saved_model.simple_save", "numpy.dtype", "tensorflow.subtract", "tensorflow.dtypes.as_string", "tensorflow.strings.to_number", "tensorflow.reset_default_graph", "tensorflow.add", "tensorflow.Session", "tensorflow.get_default_graph" ]
qa/common/gen_qa_models.py
[(35, 'numpy.dtype', 'np.dtype', (['object'], {}), True, 'import numpy as np\n'), (202, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.add', 'tf.add', (['in0', 'in1', '"""ADD"""'], {}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.subtract', 'tf.subtract', (['in0', 'in1', '"""SUB"""'], {}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.identity', 'tf.identity', (['cast0', '"""OUTPUT0"""'], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.identity', 'tf.identity', (['cast1', '"""OUTPUT1"""'], {}), True, 'import tensorflow as tf\n'), (240, 'test_util.get_model_name', 'tu.get_model_name', (["('graphdef_nobatch' if max_batch == 0 else 'graphdef')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (279, 'test_util.get_model_name', 'tu.get_model_name', (["('graphdef_nobatch' if max_batch == 0 else 'graphdef')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (355, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.add', 'tf.add', (['in0', 'in1', '"""ADD"""'], {}), True, 'import tensorflow as tf\n'), (376, 'tensorflow.subtract', 'tf.subtract', (['in0', 'in1', '"""SUB"""'], {}), True, 'import tensorflow as tf\n'), (389, 'tensorflow.identity', 'tf.identity', (['cast0', '"""TENSOR_OUTPUT0"""'], {}), True, 'import tensorflow as tf\n'), (390, 'tensorflow.identity', 'tf.identity', (['cast1', '"""TENSOR_OUTPUT1"""'], {}), True, 'import tensorflow as tf\n'), (393, 'test_util.get_model_name', 'tu.get_model_name', (["('savedmodel_nobatch' if max_batch == 0 else 'savedmodel')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (446, 'test_util.get_model_name', 'tu.get_model_name', (["('savedmodel_nobatch' if max_batch == 0 else 'savedmodel')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (520, 'test_util.get_model_name', 'tu.get_model_name', (["('netdef_nobatch' if max_batch == 0 else 'netdef')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (525, 'caffe2.python.model_helper.ModelHelper', 'c2model_helper.ModelHelper', ([], {'name': 'model_name'}), True, 'from caffe2.python import model_helper as c2model_helper\n'), (572, 'test_util.get_model_name', 'tu.get_model_name', (["('netdef_nobatch' if max_batch == 0 else 'netdef')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (637, 'tensorrt.Logger', 'trt.Logger', (['trt.Logger.INFO'], {}), True, 'import tensorrt as trt\n'), (638, 'tensorrt.Builder', 'trt.Builder', (['TRT_LOGGER'], {}), True, 'import tensorrt as trt\n'), (709, 'test_util.get_model_name', 'tu.get_model_name', (["('plan_nobatch' if max_batch == 0 else 'plan')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (737, 'tensorrt.Logger', 'trt.Logger', (['trt.Logger.INFO'], {}), True, 'import tensorrt as trt\n'), (738, 'tensorrt.Builder', 'trt.Builder', (['TRT_LOGGER'], {}), True, 'import tensorrt as trt\n'), (776, 'builtins.range', 'range', (['(4)'], {}), False, 'from builtins import range\n'), (797, 'builtins.range', 'range', (['(2)'], {}), False, 'from builtins import range\n'), (825, 'test_util.get_model_name', 'tu.get_model_name', (["('plan_nobatch' if max_batch == 0 else 'plan')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (854, 'tensorrt.Logger', 'trt.Logger', (['trt.Logger.INFO'], {}), True, 'import tensorrt as trt\n'), (855, 'tensorrt.Builder', 'trt.Builder', (['TRT_LOGGER'], {}), True, 'import tensorrt as trt\n'), (899, 'test_util.get_model_name', 'tu.get_model_name', (["('plan_nobatch' if max_batch == 0 else 'plan')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (924, 'tensorrt.Logger', 'trt.Logger', (['trt.Logger.INFO'], {}), True, 'import tensorrt as trt\n'), (925, 'tensorrt.Builder', 'trt.Builder', (['TRT_LOGGER'], {}), True, 'import tensorrt as trt\n'), (946, 'test_util.get_model_name', 'tu.get_model_name', (["('plan_nobatch' if max_batch == 0 else 'plan')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (1042, 'test_util.get_model_name', 'tu.get_model_name', (["('plan_nobatch' if max_batch == 0 else 'plan')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (1167, 'test_util.shape_to_onnx_shape', 'tu.shape_to_onnx_shape', (['input_shape', '(0)'], {}), True, 'import test_util as tu\n'), (1168, 'test_util.shape_to_onnx_shape', 'tu.shape_to_onnx_shape', (['input_shape', 'idx'], {}), True, 'import test_util as tu\n'), (1169, 'test_util.shape_to_onnx_shape', 'tu.shape_to_onnx_shape', (['input_shape', 'idx'], {}), True, 'import test_util as tu\n'), (1172, 'test_util.get_model_name', 'tu.get_model_name', (["('onnx_nobatch' if max_batch == 0 else 'onnx')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (1178, 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""INPUT0"""', 'onnx_input_dtype', '(batch_dim + onnx_input_shape)'], {}), False, 'import onnx\n'), (1180, 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""INPUT1"""', 'onnx_input_dtype', '(batch_dim + onnx_input_shape)'], {}), False, 'import onnx\n'), (1183, 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""OUTPUT0"""', 'onnx_output0_dtype', '(batch_dim + onnx_output0_shape)'], {}), False, 'import onnx\n'), (1185, 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""OUTPUT1"""', 'onnx_output1_dtype', '(batch_dim + onnx_output1_shape)'], {}), False, 'import onnx\n'), (1188, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['INPUT0']", "['_INPUT0']"], {}), False, 'import onnx\n'), (1189, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['INPUT1']", "['_INPUT1']"], {}), False, 'import onnx\n'), (1201, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""', "['_INPUT0', '_INPUT1']", "['CAST0' if not swap else 'CAST1']"], {}), False, 'import onnx\n'), (1203, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Sub"""', "['_INPUT0', '_INPUT1']", "['CAST1' if not swap else 'CAST0']"], {}), False, 'import onnx\n'), (1205, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Cast"""', "['CAST0']", "['OUTPUT0']"], {'to': 'onnx_output0_dtype'}), False, 'import onnx\n'), (1207, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Cast"""', "['CAST1']", "['OUTPUT1']"], {'to': 'onnx_output1_dtype'}), False, 'import onnx\n'), (1222, 'onnx.helper.make_graph', 'onnx.helper.make_graph', (['onnx_nodes', 'model_name', 'onnx_inputs', 'onnx_outputs'], {}), False, 'import onnx\n'), (1237, 'onnx.save', 'onnx.save', (['model_def', "(model_version_dir + '/model.onnx')"], {}), False, 'import onnx\n'), (1251, 'test_util.get_model_name', 'tu.get_model_name', (["('onnx_nobatch' if max_batch == 0 else 'onnx')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (1303, 'test_util.get_model_name', 'tu.get_model_name', (["('libtorch_nobatch' if max_batch == 0 else 'libtorch')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (1375, 'test_util.get_model_name', 'tu.get_model_name', (["('libtorch_nobatch' if max_batch == 0 else 'libtorch')", 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import test_util as tu\n'), (1640, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (192, 'test_util.validate_for_tf_model', 'tu.validate_for_tf_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (219, 'tensorflow.strings.to_number', 'tf.strings.to_number', (['in0', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.strings.to_number', 'tf.strings.to_number', (['in1', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.dtypes.as_string', 'tf.dtypes.as_string', (['(add if not swap else sub)'], {'name': '"""TOSTR0"""'}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.cast', 'tf.cast', (['(add if not swap else sub)', 'tf_output0_dtype', '"""CAST0"""'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.dtypes.as_string', 'tf.dtypes.as_string', (['(sub if not swap else add)'], {'name': '"""TOSTR1"""'}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.cast', 'tf.cast', (['(sub if not swap else add)', 'tf_output1_dtype', '"""CAST1"""'], {}), True, 'import tensorflow as tf\n'), (246, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (250, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (262, 'test_util.validate_for_tf_model', 'tu.validate_for_tf_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (314, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (315, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (317, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output0_shape'], {}), True, 'import test_util as tu\n'), (319, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output1_shape'], {}), True, 'import test_util as tu\n'), (322, 'os.makedirs', 'os.makedirs', (['config_dir'], {}), False, 'import os\n'), (330, 'builtins.range', 'range', (['output0_label_cnt'], {}), False, 'from builtins import range\n'), (345, 'test_util.validate_for_tf_model', 'tu.validate_for_tf_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (372, 'tensorflow.strings.to_number', 'tf.strings.to_number', (['in0', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (373, 'tensorflow.strings.to_number', 'tf.strings.to_number', (['in1', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.dtypes.as_string', 'tf.dtypes.as_string', (['(add if not swap else sub)'], {'name': '"""TOSTR0"""'}), True, 'import tensorflow as tf\n'), (382, 'tensorflow.cast', 'tf.cast', (['(add if not swap else sub)', 'tf_output0_dtype', '"""CAST0"""'], {}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.dtypes.as_string', 'tf.dtypes.as_string', (['(sub if not swap else add)'], {'name': '"""TOSTR1"""'}), True, 'import tensorflow as tf\n'), (387, 'tensorflow.cast', 'tf.cast', (['(sub if not swap else add)', 'tf_output1_dtype', '"""CAST1"""'], {}), True, 'import tensorflow as tf\n'), (399, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (403, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.saved_model.simple_save', 'tf.saved_model.simple_save', (['sess', "(model_version_dir + '/model.savedmodel')"], {'inputs': "{'INPUT0': input0_tensor, 'INPUT1': input1_tensor}", 'outputs': "{'OUTPUT0': output0_tensor, 'OUTPUT1': output1_tensor}"}), True, 'import tensorflow as tf\n'), (429, 'test_util.validate_for_tf_model', 'tu.validate_for_tf_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (481, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (482, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (484, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output0_shape'], {}), True, 'import test_util as tu\n'), (486, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output1_shape'], {}), True, 'import test_util as tu\n'), (489, 'os.makedirs', 'os.makedirs', (['config_dir'], {}), False, 'import os\n'), (497, 'builtins.range', 'range', (['output0_label_cnt'], {}), False, 'from builtins import range\n'), (512, 'test_util.validate_for_c2_model', 'tu.validate_for_c2_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (540, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (555, 'test_util.validate_for_c2_model', 'tu.validate_for_c2_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (607, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (608, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (610, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output0_shape'], {}), True, 'import test_util as tu\n'), (612, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output1_shape'], {}), True, 'import test_util as tu\n'), (615, 'os.makedirs', 'os.makedirs', (['config_dir'], {}), False, 'import os\n'), (623, 'builtins.range', 'range', (['output0_label_cnt'], {}), False, 'from builtins import range\n'), (717, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (833, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (904, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (951, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (975, 'test_util.validate_for_trt_model', 'tu.validate_for_trt_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (1025, 'test_util.validate_for_trt_model', 'tu.validate_for_trt_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (1135, 'os.makedirs', 'os.makedirs', (['config_dir'], {}), False, 'import os\n'), (1143, 'builtins.range', 'range', (['output0_label_cnt'], {}), False, 'from builtins import range\n'), (1158, 'test_util.validate_for_onnx_model', 'tu.validate_for_onnx_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (1196, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Cast"""', "['INPUT0']", "['_INPUT0']"], {'to': 'onnx.TensorProto.INT32'}), False, 'import onnx\n'), (1198, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Cast"""', "['INPUT1']", "['_INPUT1']"], {'to': 'onnx.TensorProto.INT32'}), False, 'import onnx\n'), (1225, 'onnx.helper.make_operatorsetid', 'onnx.helper.make_operatorsetid', (['""""""', 'FLAGS.onnx_opset'], {}), False, 'import onnx\n'), (1226, 'onnx.helper.make_model', 'onnx.helper.make_model', (['graph_proto'], {'producer_name': '"""triton"""', 'opset_imports': '[model_opset]'}), False, 'import onnx\n'), (1230, 'onnx.helper.make_model', 'onnx.helper.make_model', (['graph_proto'], {'producer_name': '"""triton"""'}), False, 'import onnx\n'), (1233, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (1245, 'test_util.validate_for_onnx_model', 'tu.validate_for_onnx_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (1260, 'gen_ensemble_model_utils.repeat', 'emu.repeat', (['input_dtype', '(2)'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1261, 'gen_ensemble_model_utils.repeat', 'emu.repeat', (['input_shape', '(2)'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1262, 'gen_ensemble_model_utils.repeat', 'emu.repeat', (['None', '(2)'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1265, 'gen_ensemble_model_utils.repeat', 'emu.repeat', (['None', '(2)'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1271, 'os.makedirs', 'os.makedirs', (['config_dir'], {}), False, 'import os\n'), (1279, 'builtins.range', 'range', (['output0_label_cnt'], {}), False, 'from builtins import range\n'), (1294, 'test_util.validate_for_libtorch_model', 'tu.validate_for_libtorch_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (1323, 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch_input_dtype'}), False, 'import torch\n'), (1324, 'torch.jit.trace', 'torch.jit.trace', (['addSubModel', '(example_input, example_input)'], {}), False, 'import torch\n'), (1339, 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch_input_dtype'}), False, 'import torch\n'), (1340, 'torch.jit.trace', 'torch.jit.trace', (['subAddModel', '(example_input, example_input)'], {}), False, 'import torch\n'), (1345, 'os.makedirs', 'os.makedirs', (['model_version_dir'], {}), False, 'import os\n'), (1357, 'test_util.validate_for_libtorch_model', 'tu.validate_for_libtorch_model', (['input_dtype', 'output0_dtype', 'output1_dtype', 'input_shape', 'output0_shape', 'output1_shape'], {}), True, 'import test_util as tu\n'), (1410, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (1411, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (1413, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output0_shape'], {}), True, 'import test_util as tu\n'), (1415, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output1_shape'], {}), True, 'import test_util as tu\n'), (1418, 'os.makedirs', 'os.makedirs', (['config_dir'], {}), False, 'import os\n'), (1426, 'builtins.range', 'range', (['output0_label_cnt'], {}), False, 'from builtins import range\n'), (1586, 'gen_ensemble_model_utils.platform_types_and_validation', 'emu.platform_types_and_validation', ([], {}), True, 'import gen_ensemble_model_utils as emu\n'), (204, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (206, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (357, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (359, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (1089, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (1090, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (1092, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output0_shape'], {}), True, 'import test_util as tu\n'), (1094, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output1_shape'], {}), True, 'import test_util as tu\n'), (1127, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (1128, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['input_shape'], {}), True, 'import test_util as tu\n'), (1130, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output0_shape'], {}), True, 'import test_util as tu\n'), (1132, 'test_util.shape_to_dims_str', 'tu.shape_to_dims_str', (['output1_shape'], {}), True, 'import test_util as tu\n'), (1214, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['CAST0']", "['OUTPUT0']"], {}), False, 'import onnx\n'), (1216, 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['CAST1']", "['OUTPUT1']"], {}), False, 'import onnx\n'), (1603, 'gen_ensemble_model_utils.create_ensemble_modelconfig', 'emu.create_ensemble_modelconfig', (['pair[0]', 'models_dir', '(8)', 'model_version', 'config_input_shape', 'config_output0_shape', 'config_output1_shape', 'input_dtype', 'output0_dtype', 'output1_dtype', 'output0_label_cnt', 'version_policy'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1609, 'gen_ensemble_model_utils.create_ensemble_modelfile', 'emu.create_ensemble_modelfile', (['pair[0]', 'models_dir', '(8)', 'model_version', 'config_input_shape', 'config_output0_shape', 'config_output1_shape', 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1615, 'gen_ensemble_model_utils.create_ensemble_modelconfig', 'emu.create_ensemble_modelconfig', (['pair[0]', 'models_dir', '(0)', 'model_version', 'config_input_shape', 'config_output0_shape', 'config_output1_shape', 'input_dtype', 'output0_dtype', 'output1_dtype', 'output0_label_cnt', 'version_policy'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1621, 'gen_ensemble_model_utils.create_ensemble_modelfile', 'emu.create_ensemble_modelfile', (['pair[0]', 'models_dir', '(0)', 'model_version', 'config_input_shape', 'config_output0_shape', 'config_output1_shape', 'input_dtype', 'output0_dtype', 'output1_dtype'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1961, 'gen_ensemble_model_utils.platform_types_and_validation', 'emu.platform_types_and_validation', ([], {}), True, 'import gen_ensemble_model_utils as emu\n'), (211, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (214, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (364, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (367, 'test_util.shape_to_tf_shape', 'tu.shape_to_tf_shape', (['input_shape'], {}), True, 'import test_util as tu\n'), (404, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (406, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (408, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (410, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (980, 'test_util.shape_is_fixed', 'tu.shape_is_fixed', (['input_shape'], {}), True, 'import test_util as tu\n'), (981, 'test_util.shape_is_fixed', 'tu.shape_is_fixed', (['output0_shape'], {}), True, 'import test_util as tu\n'), (982, 'test_util.shape_is_fixed', 'tu.shape_is_fixed', (['output1_shape'], {}), True, 'import test_util as tu\n'), (996, 'test_util.shape_is_fixed', 'tu.shape_is_fixed', (['input_shape'], {}), True, 'import test_util as tu\n'), (997, 'test_util.shape_is_fixed', 'tu.shape_is_fixed', (['output0_shape'], {}), True, 'import test_util as tu\n'), (998, 'test_util.shape_is_fixed', 'tu.shape_is_fixed', (['output1_shape'], {}), True, 'import test_util as tu\n'), (2053, 'gen_ensemble_model_utils.create_nop_modelconfig', 'emu.create_nop_modelconfig', (['FLAGS.models_dir', 'model_shape', 'model_dtype'], {}), True, 'import gen_ensemble_model_utils as emu\n'), (1967, 'gen_ensemble_model_utils.create_ensemble_modelfile', 'emu.create_ensemble_modelfile', (['pair[0]', 'FLAGS.models_dir', '(8)', '(2)', 'shape', 'shape', 'shape', 'vt', 'vt', 'vt'], {'swap': '(True)'}), True, 'import gen_ensemble_model_utils as emu\n'), (1978, 'gen_ensemble_model_utils.create_ensemble_modelfile', 'emu.create_ensemble_modelfile', (['pair[0]', 'FLAGS.models_dir', '(8)', '(3)', 'shape', 'shape', 'shape', 'vt', 'vt', 'vt'], {'swap': '(True)'}), True, 'import gen_ensemble_model_utils as emu\n'), (1989, 'gen_ensemble_model_utils.create_ensemble_modelfile', 'emu.create_ensemble_modelfile', (['pair[0]', 'FLAGS.models_dir', '(0)', '(2)', 'shape', 'shape', 'shape', 'vt', 'vt', 'vt'], {'swap': '(True)'}), True, 'import gen_ensemble_model_utils as emu\n'), (2000, 'gen_ensemble_model_utils.create_ensemble_modelfile', 'emu.create_ensemble_modelfile', (['pair[0]', 'FLAGS.models_dir', '(0)', '(3)', 'shape', 'shape', 'shape', 'vt', 'vt', 'vt'], {'swap': '(True)'}), True, 'import gen_ensemble_model_utils as emu\n')]
cvai-repo/eccentricity
b8a4570635320aad4e6e48712104b56cf3f67291
"""Builds the eccentricity model. Based on the tutorial for the CIFAR-10 model in Tensorflow. http://tensorflow.org/tutorials/deep_cnn/ Relevant comments from that tutorial have been kept, others are added from me. Summary of available functions: # Compute inference on the model inputs to make a prediction. predictions = inference(inputs) # Compute the total loss of the prediction with respect to the labels. loss = loss(predictions, labels) # Create a graph to run one step of training with respect to the loss. train_op = train(loss, global_step) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import re import sys import tarfile from six.moves import urllib import tensorflow as tf # from tensorflow.models.image.cifar10 import cifar10_input import convert_to_records as records import numpy as np FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.') tf.app.flags.DEFINE_string('pm', '66661', 'pooling scheme across scales. Each number specifies the number of scales remaining at each layer. The first number has to be the same as used in --num_scales.') tf.app.flags.DEFINE_integer('conv_kernel', 5, 'Size of convolutional kernel') tf.app.flags.DEFINE_integer('pool_kernel', 3, 'Size of spatial pooling kernel') tf.app.flags.DEFINE_integer('feats_per_layer', 32, 'Number of feature channels at each layer') tf.app.flags.DEFINE_boolean('total_pool', True, 'If true, pool all feature maps to 1x1 size in final layer') tf.app.flags.DEFINE_integer('pool_stride', '1', 'If 2, we get progressive pooling - with overlap pooling, AlexNet style') TRAIN_FILE = 'train_{}.tfrecords'.format(records.tfrecord_name()) VALIDATION_FILE = 'validation_{}.tfrecords'.format(records.tfrecord_name()) TEST_FILE = 'test_{}.tfrecords'.format(records.tfrecord_name()) def NUM_CLASSES(): return 10 if FLAGS.parity == 'none' else 5 def read_and_decode(filename_queue): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, # Defaults are not specified since both keys are required. features={ 'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64), }) if FLAGS.contrast_norm == 'areafactor': image = tf.decode_raw(features['image_raw'], tf.float32) else: image = tf.decode_raw(features['image_raw'], tf.uint8) image = tf.cast(image, tf.float32) * (1. / 255) image.set_shape(np.prod([FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size])) image = tf.reshape(image, [FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size, 1]) image = image - 0.5 # Convert label from a scalar uint8 tensor to an int32 scalar. label = tf.cast(features['label'], tf.int32) return image, label def inputs(name, batch_size, num_epochs): """Reads input data num_epochs times. Args: train: Selects between the training (True) and test (False) data. batch_size: Number of examples per returned batch. num_epochs: Number of times to read the input data, or 0/None to train forever. Returns: A tuple (images, labels), where: * images is a float tensor with shape [batch_size, FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size] * labels is an int32 tensor with shape [batch_size] with the true label, a number in the range [0, NUM_CLASSES()). Note that an tf.train.QueueRunner is added to the graph, which must be run using e.g. tf.train.start_queue_runners(). """ if not num_epochs: num_epochs = None filename = os.path.join(FLAGS.train_dir, 'data', '{}_{}.tfrecords'.format(name, records.tfrecord_name())) with tf.name_scope('input'): filename_queue = tf.train.string_input_producer( [filename], num_epochs=num_epochs) # Even when reading in multiple threads, share the filename # queue. image, label = read_and_decode(filename_queue) # Shuffle the examples and collect them into batch_size batches. # (Internally uses a RandomShuffleQueue.) # We run this in two threads to avoid being a bottleneck. images, sparse_labels = tf.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=8, capacity=1000 + 3 * batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=1000) return images, sparse_labels def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv_scale(x, W): return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='VALID') def inference(x): """ Creates a model with pooling across space and scales. Always we have a conv-relu-spatial_pool-scale_pool x N layers structure with one fully connected layer on top. """ if '-' in FLAGS.pm: FLAGS.pm= FLAGS.pm.split('-') num_layers = len(FLAGS.pm) - 1 print(num_layers) for l in range(num_layers): with tf.variable_scope('layer{}'.format(l)): with tf.variable_scope('conv'): if l == 0: bottom = x W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, 1, FLAGS.feats_per_layer]) else: if out.get_shape()[2] < FLAGS.conv_kernel: bottom = out # l (not l + 1) because from previous layer W = weight_variable([1, 1, 1, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) else: bottom = out # l (not l + 1) because from previous layer W = weight_variable([1, FLAGS.conv_kernel, FLAGS.conv_kernel, FLAGS.feats_per_layer, FLAGS.feats_per_layer]) b = bias_variable([FLAGS.feats_per_layer]) Wx_b = tf.nn.conv3d(bottom, W, strides=[1,1,1,1,1], padding='VALID') + b out = tf.nn.relu(Wx_b) shape = out.get_shape() print('conv{}'.format(l+1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with tf.variable_scope('pool'): bottom = out if l == num_layers - 1 and FLAGS.total_pool: kernel_size = bottom.get_shape()[2] out = tf.nn.max_pool3d(bottom, ksize=[1,1, kernel_size, kernel_size,1], strides=[1,1,1,1,1], padding='VALID') else: out = tf.nn.max_pool3d(bottom, ksize=[1,1, FLAGS.pool_kernel, FLAGS.pool_kernel,1], strides=[1,1,FLAGS.pool_stride,FLAGS.pool_stride,1], padding='VALID') shape = out.get_shape() print('pool{}'.format(l + 1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with tf.variable_scope('scale'): bottom = out if FLAGS.pm[l + 1] == FLAGS.pm[l]: kernel_size = 1 # useless 1x1 pooling elif int(FLAGS.pm[l + 1]) < int(FLAGS.pm[l]): num_scales_prev = int(FLAGS.pm[l]) num_scales_current = int(FLAGS.pm[l + 1]) kernel_size = (num_scales_prev - num_scales_current) + 1 else: raise ValueError('Number of scales must stay constant or decrease, got {}'.format(FLAGS.pm)) out = tf.nn.max_pool3d(bottom, ksize=[1,kernel_size,1,1,1], strides=[1,1,1,1,1], padding='VALID') shape = out.get_shape() print('scale{}'.format(l + 1)) print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) with tf.variable_scope('fully_connected'): bottom = out bottom_shape = bottom.get_shape().as_list() reshape = tf.reshape( bottom, [-1, bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4]]) W_fc1 = weight_variable([bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4], NUM_CLASSES()]) b_fc1 = bias_variable([NUM_CLASSES()]) out = tf.matmul(reshape, W_fc1) + b_fc1 print('fc') print('\t{} --> {}'.format(bottom.name, out.name)) print('\t{} --> {}'.format(bottom.get_shape(), out.get_shape())) if isinstance(FLAGS.pm, list): FLAGS.pm = '-'.join(FLAGS.pm) return out def loss(logits, labels): """Calculates the loss from the logits and the labels. Args: logits: Logits tensor, float - [batch_size, FLAGS.NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float. """ labels = tf.to_int64(labels) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss def train(loss, global_step): """Train eccentricity model. Create an optimizer and apply to all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Compute gradients. tf.scalar_summary(loss.op.name, loss) optimizer = tf.train.AdagradOptimizer(FLAGS.learning_rate) # Use the optimizer to apply the gradients that minimize the loss # (and also increment the global step counter) as a single training step. train_op = optimizer.minimize(loss, global_step=global_step) return train_op return train_op
[ "tensorflow.FixedLenFeature", "tensorflow.cast", "tensorflow.app.flags.DEFINE_string", "tensorflow.TFRecordReader", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.to_int64", "tensorflow.Variable", "tensorflow.decode_raw", "tensorflow.app.flags.DEFINE_integer", "tensorflow.name_scope", "tensorflow.train.shuffle_batch", "tensorflow.train.AdagradOptimizer", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.nn.conv3d", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.train.string_input_producer", "tensorflow.nn.relu", "tensorflow.nn.max_pool3d", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.scalar_summary", "tensorflow.app.flags.DEFINE_float", "numpy.prod", "tensorflow.variable_scope" ]
src/python/ecc/ecc.py
[(38, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.1)', '"""Initial learning rate."""'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""pm"""', '"""66661"""', '"""pooling scheme across scales. Each number specifies the number of scales remaining at each layer. The first number has to be the same as used in --num_scales."""'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""conv_kernel"""', '(5)', '"""Size of convolutional kernel"""'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""pool_kernel"""', '(3)', '"""Size of spatial pooling kernel"""'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""feats_per_layer"""', '(32)', '"""Number of feature channels at each layer"""'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""total_pool"""', '(True)', '"""If true, pool all feature maps to 1x1 size in final layer"""'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""pool_stride"""', '"""1"""', '"""If 2, we get progressive pooling - with overlap pooling, AlexNet style"""'], {}), True, 'import tensorflow as tf\n'), (47, 'convert_to_records.tfrecord_name', 'records.tfrecord_name', ([], {}), True, 'import convert_to_records as records\n'), (48, 'convert_to_records.tfrecord_name', 'records.tfrecord_name', ([], {}), True, 'import convert_to_records as records\n'), (50, 'convert_to_records.tfrecord_name', 'records.tfrecord_name', ([], {}), True, 'import convert_to_records as records\n'), (57, 'tensorflow.TFRecordReader', 'tf.TFRecordReader', ([], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.reshape', 'tf.reshape', (['image', '[FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size, 1]'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.cast', 'tf.cast', (["features['label']", 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['x', 'W'], {'strides': '[1, 1, 1, 1, 1]', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.to_int64', 'tf.to_int64', (['labels'], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', (['logits', 'labels'], {'name': '"""xentropy"""'}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""xentropy_mean"""'}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.scalar_summary', 'tf.scalar_summary', (['loss.op.name', 'loss'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['FLAGS.learning_rate'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.decode_raw', 'tf.decode_raw', (["features['image_raw']", 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.decode_raw', 'tf.decode_raw', (["features['image_raw']", 'tf.uint8'], {}), True, 'import tensorflow as tf\n'), (73, 'numpy.prod', 'np.prod', (['[FLAGS.num_scales, FLAGS.crop_size, FLAGS.crop_size]'], {}), True, 'import numpy as np\n'), (104, 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['[filename]'], {'num_epochs': 'num_epochs'}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.train.shuffle_batch', 'tf.train.shuffle_batch', (['[image, label]'], {'batch_size': 'batch_size', 'num_threads': '(8)', 'capacity': '(1000 + 3 * batch_size)', 'min_after_dequeue': '(1000)'}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fully_connected"""'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, bottom_shape[1] * bottom_shape[2] * bottom_shape[3] * bottom_shape[4]]'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (102, 'convert_to_records.tfrecord_name', 'records.tfrecord_name', ([], {}), True, 'import convert_to_records as records\n'), (203, 'tensorflow.matmul', 'tf.matmul', (['reshape', 'W_fc1'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.nn.relu', 'tf.nn.relu', (['Wx_b'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pool"""'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""scale"""'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.nn.max_pool3d', 'tf.nn.max_pool3d', (['bottom'], {'ksize': '[1, kernel_size, 1, 1, 1]', 'strides': '[1, 1, 1, 1, 1]', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['bottom', 'W'], {'strides': '[1, 1, 1, 1, 1]', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.nn.max_pool3d', 'tf.nn.max_pool3d', (['bottom'], {'ksize': '[1, 1, kernel_size, kernel_size, 1]', 'strides': '[1, 1, 1, 1, 1]', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.nn.max_pool3d', 'tf.nn.max_pool3d', (['bottom'], {'ksize': '[1, 1, FLAGS.pool_kernel, FLAGS.pool_kernel, 1]', 'strides': '[1, 1, FLAGS.pool_stride, FLAGS.pool_stride, 1]', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n')]
miaomiaosang/bert-as-language-model
93bf3d652ee15d283149a480ee07aa6bf0d52666
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT language model predict.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import modeling import tokenization import numpy as np import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_integer("max_predictions_per_seq", 20, "In this task, it also refers to maximum number of masked tokens per word.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): def __init__(self, unique_id, text): self.unique_id = unique_id self.text = text def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with tf.gfile.GFile(input_file, "r") as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() unique_id += 1 examples.append( InputExample(unique_id, line)) unique_id += 1 return examples def model_fn_builder(bert_config, init_checkpoint, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] model = modeling.BertModel( config=bert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) masked_lm_example_loss = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.PREDICT: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=masked_lm_example_loss, scaffold_fn=scaffold_fn) # 输出mask_word的score return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) loss = tf.reshape(per_example_loss, [-1, tf.shape(positions)[1]]) # TODO: dynamic gather from per_example_loss return loss def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(features, seq_length, max_predictions_per_seq): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_masked_lm_positions = [] all_masked_lm_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_masked_lm_positions.append(feature.masked_lm_positions) all_masked_lm_ids.append(feature.masked_lm_ids) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "masked_lm_positions": tf.constant( all_masked_lm_positions, shape=[num_examples, max_predictions_per_seq], dtype=tf.int32), "masked_lm_ids": tf.constant( all_masked_lm_ids, shape=[num_examples, max_predictions_per_seq], dtype=tf.int32) }) d = d.batch(batch_size=batch_size, drop_remainder=False) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" all_features = [] all_tokens = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) features, tokens = convert_single_example(ex_index, example, max_seq_length, tokenizer) all_features.extend(features) all_tokens.extend(tokens) return all_features, all_tokens tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) MASKED_TOKEN = "[MASK]" MASKED_ID = tokenizer.convert_tokens_to_ids([MASKED_TOKEN])[0] def create_masked_lm_prediction(input_ids, mask_position, mask_count=1): new_input_ids = list(input_ids) masked_lm_labels = [] masked_lm_positions = list(range(mask_position, mask_position + mask_count)) for i in masked_lm_positions: new_input_ids[i] = MASKED_ID masked_lm_labels.append(input_ids[i]) return new_input_ids, masked_lm_positions, masked_lm_labels class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, segment_ids, input_mask, masked_lm_positions, masked_lm_ids): self.input_ids = input_ids, self.segment_ids = segment_ids, self.input_mask = input_mask, self.masked_lm_positions = masked_lm_positions, self.masked_lm_ids = masked_lm_ids, def convert_single_example(ex_index, example, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" tokens = tokenizer.tokenize(example.text) # Account for [CLS] and [SEP] with "- 2" if len(tokens) > max_seq_length - 2: tokens = tokens[0:(max_seq_length - 2)] input_tokens = [] segment_ids = [] input_tokens.append("[CLS]") segment_ids.append(0) for token in tokens: input_tokens.append(token) segment_ids.append(0) input_tokens.append("[SEP]") segment_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(input_tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("id: %s" % (example.unique_id)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in input_tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) features = create_sequential_mask(input_tokens, input_ids, input_mask, segment_ids, FLAGS.max_predictions_per_seq) return features, input_tokens def is_subtoken(x): return x.startswith("##") def create_sequential_mask(input_tokens, input_ids, input_mask, segment_ids, max_predictions_per_seq): """Mask each token/word sequentially""" features = [] i = 1 while i < len(input_tokens) - 1: mask_count = 1 while is_subtoken(input_tokens[i+mask_count]): mask_count += 1 input_ids_new, masked_lm_positions, masked_lm_labels = create_masked_lm_prediction(input_ids, i, mask_count) while len(masked_lm_positions) < max_predictions_per_seq: masked_lm_positions.append(0) masked_lm_labels.append(0) feature = InputFeatures( input_ids=input_ids_new, input_mask=input_mask, segment_ids=segment_ids, masked_lm_positions=masked_lm_positions, masked_lm_ids=masked_lm_labels) features.append(feature) i += mask_count return features def parse_result(result, all_tokens, output_file=None): with tf.gfile.GFile(output_file, "w") as writer: tf.logging.info("***** Predict results *****") i = 0 sentences = [] for word_loss in result: # start of a sentence if all_tokens[i] == "[CLS]": sentence = {} tokens = [] sentence_loss = 0.0 word_count_per_sent = 0 i += 1 # add token tokens.append({"token": tokenization.printable_text(all_tokens[i]), "prob": float(np.exp(-word_loss[0])) }) sentence_loss += word_loss[0] word_count_per_sent += 1 i += 1 token_count_per_word = 0 while is_subtoken(all_tokens[i]): token_count_per_word += 1 tokens.append({"token": tokenization.printable_text(all_tokens[i]), "prob": float(np.exp(-word_loss[token_count_per_word]))}) sentence_loss += word_loss[token_count_per_word] i += 1 # end of a sentence if all_tokens[i] == "[SEP]": sentence["tokens"] = tokens sentence["ppl"] = float(np.exp(sentence_loss / word_count_per_sent)) sentences.append(sentence) i += 1 if output_file is not None: tf.logging.info("Saving results to %s" % output_file) writer.write(json.dumps(sentences, indent=2, ensure_ascii=False)) def main(_): tf.logging.set_verbosity(tf.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, tpu_config=tf.contrib.tpu.TPUConfig( num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, predict_batch_size=FLAGS.predict_batch_size) predict_examples = read_examples(FLAGS.input_file) features, all_tokens = convert_examples_to_features(predict_examples, FLAGS.max_seq_length, tokenizer) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d", len(predict_examples)) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) if FLAGS.use_tpu: # Warning: According to tpu_estimator.py Prediction on TPU is an # experimental feature and hence not supported here raise ValueError("Prediction in TPU not supported") predict_input_fn = input_fn_builder( features=features, seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, FLAGS.input_file.split('.')[0] + ".json") parse_result(result, all_tokens, output_predict_file) if __name__ == "__main__": tf.app.run()
[ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.nn.log_softmax", "tensorflow.gfile.GFile", "tensorflow.reduce_sum", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "numpy.exp", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.gather", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.app.run", "tensorflow.matmul", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.nn.bias_add", "tensorflow.train.Scaffold", "tensorflow.constant", "tensorflow.range", "tensorflow.flags.DEFINE_string", "tensorflow.reshape", "tensorflow.variable_scope" ]
run_lm_predict.py
[(72, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (316, 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), False, 'import tokenization\n'), (225, 'modeling.get_shape_list', 'modeling.get_shape_list', (['sequence_tensor'], {'expected_rank': '(3)'}), False, 'import modeling\n'), (232, 'tensorflow.reshape', 'tf.reshape', (['(positions + flat_offsets)', '[-1]'], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.reshape', 'tf.reshape', (['sequence_tensor', '[batch_size * seq_length, width]'], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.gather', 'tf.gather', (['flat_sequence_tensor', 'flat_positions'], {}), True, 'import tensorflow as tf\n'), (463, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (465, 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'import modeling\n'), (473, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (498, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'predict_batch_size': 'FLAGS.predict_batch_size'}), True, 'import tensorflow as tf\n'), (509, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running prediction*****"""'], {}), True, 'import tensorflow as tf\n'), (511, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), True, 'import tensorflow as tf\n'), (528, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['input_file', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (138, 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': '(False)', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'import modeling\n'), (150, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cls/predictions"""'], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.reshape', 'tf.reshape', (['label_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.one_hot', 'tf.one_hot', (['label_ids'], {'depth': 'bert_config.vocab_size', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.logging.info', 'tf.logging.info', (["('id: %s' % example.unique_id)"], {}), True, 'import tensorflow as tf\n'), (423, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (424, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Predict results *****"""'], {}), True, 'import tensorflow as tf\n'), (477, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (155, 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), False, 'import modeling\n'), (171, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': 'masked_lm_example_loss', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transform"""'], {}), True, 'import tensorflow as tf\n'), (200, 'modeling.layer_norm', 'modeling.layer_norm', (['input_tensor'], {}), False, 'import modeling\n'), (216, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_probs * one_hot_labels)'], {'axis': '[-1]'}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (459, 'tensorflow.logging.info', 'tf.logging.info', (["('Saving results to %s' % output_file)"], {}), True, 'import tensorflow as tf\n'), (485, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (274, 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.constant', 'tf.constant', (['all_masked_lm_positions'], {'shape': '[num_examples, max_predictions_per_seq]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.constant', 'tf.constant', (['all_masked_lm_ids'], {'shape': '[num_examples, max_predictions_per_seq]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (460, 'json.dumps', 'json.dumps', (['sentences'], {'indent': '(2)', 'ensure_ascii': '(False)'}), False, 'import json\n'), (159, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (197, 'modeling.get_activation', 'modeling.get_activation', (['bert_config.hidden_act'], {}), False, 'import modeling\n'), (198, 'modeling.create_initializer', 'modeling.create_initializer', (['bert_config.initializer_range'], {}), False, 'import modeling\n'), (217, 'tensorflow.shape', 'tf.shape', (['positions'], {}), True, 'import tensorflow as tf\n'), (437, 'tokenization.printable_text', 'tokenization.printable_text', (['all_tokens[i]'], {}), False, 'import tokenization\n'), (454, 'numpy.exp', 'np.exp', (['(sentence_loss / word_count_per_sent)'], {}), True, 'import numpy as np\n'), (383, 'tokenization.printable_text', 'tokenization.printable_text', (['x'], {}), False, 'import tokenization\n'), (438, 'numpy.exp', 'np.exp', (['(-word_loss[0])'], {}), True, 'import numpy as np\n'), (446, 'tokenization.printable_text', 'tokenization.printable_text', (['all_tokens[i]'], {}), False, 'import tokenization\n'), (447, 'numpy.exp', 'np.exp', (['(-word_loss[token_count_per_word])'], {}), True, 'import numpy as np\n')]
eff-kay/temp-texar-repo
9c699e8143fd8ecb5d65a41ceef09c45832b9258
# """ Unit tests for various optimization related utilities. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import tensorflow as tf import texar.core.optimization as opt from texar.utils import utils class OptimizationTest(tf.test.TestCase): """Tests optimization. """ def test_get_optimizer(self): """Tests get_optimizer. """ default_optimizer_fn, optimizer_class = opt.get_optimizer_fn( opt.default_optimization_hparams()["optimizer"]) default_optimizer = default_optimizer_fn(1.0) self.assertTrue(optimizer_class, tf.train.Optimizer) self.assertIsInstance(default_optimizer, tf.train.AdamOptimizer) hparams = { "type": "MomentumOptimizer", "kwargs": { "learning_rate": 0.001, "momentum": 0.9, "use_nesterov": True } } momentum_optimizer_fn, _ = opt.get_optimizer_fn(hparams) momentum_optimizer = momentum_optimizer_fn() self.assertIsInstance(momentum_optimizer, tf.train.MomentumOptimizer) hparams = { "type": tf.train.MomentumOptimizer, "kwargs": { "momentum": 0.9, "use_nesterov": True } } momentum_optimizer_fn, _ = opt.get_optimizer_fn(hparams) momentum_optimizer = momentum_optimizer_fn(0.001) self.assertIsInstance(momentum_optimizer, tf.train.MomentumOptimizer) hparams = { "type": tf.train.MomentumOptimizer(0.001, 0.9) } momentum_optimizer, _ = opt.get_optimizer_fn(hparams) self.assertIsInstance(momentum_optimizer, tf.train.MomentumOptimizer) def test_get_learning_rate_decay_fn(self): # pylint: disable=too-many-locals """Tests get_learning_rate_decay_fn. """ default_lr_decay_fn = opt.get_learning_rate_decay_fn( opt.default_optimization_hparams()["learning_rate_decay"]) self.assertIsNone(default_lr_decay_fn) boundaries = [2, 4] values = [0.1, 0.01, 0.001] hparams = { "type": "piecewise_constant", "kwargs": { "boundaries": boundaries, "values": values }, "min_learning_rate": 0.05, "start_decay_step": 1, "end_decay_step": utils.MAX_SEQ_LENGTH, } pc_lr_decay_fn = opt.get_learning_rate_decay_fn(hparams) global_step = 1 pc_lr = pc_lr_decay_fn(learning_rate=1., global_step=global_step) pc_lr_true = tf.train.piecewise_constant( global_step-hparams["start_decay_step"], boundaries, values) hparams["type"] = "natural_exp_decay" hparams["kwargs"] = { "decay_steps": 1, "decay_rate": 0.5 } ned_lr_decay_fn = opt.get_learning_rate_decay_fn(hparams) ned_lr = ned_lr_decay_fn(learning_rate=1., global_step=global_step) ned_lr_true = tf.train.natural_exp_decay( 1., global_step-hparams["start_decay_step"], hparams["kwargs"]["decay_steps"], hparams["kwargs"]["decay_rate"]) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) pc_lr_, pc_lr_true_, ned_lr_, ned_lr_true_ = sess.run( [pc_lr, pc_lr_true, ned_lr, ned_lr_true]) self.assertEqual(pc_lr_, pc_lr_true_) self.assertEqual(ned_lr_, ned_lr_true_) def test_get_gradient_clip_fn(self): # pylint: disable=too-many-locals """Tests get_gradient_clip_fn. """ default_grad_clip_fn = opt.get_gradient_clip_fn( opt.default_optimization_hparams()["gradient_clip"]) self.assertIsNone(default_grad_clip_fn) grads = [tf.random_uniform([10, 10], -1., 1.) for _ in range(5)] grads_and_vars = list(zip(grads, range(5))) hparams = { "type": "clip_by_global_norm", "kwargs": { "clip_norm": 0.1 } } gn_grad_clip_fn = opt.get_gradient_clip_fn(hparams) gn_grads_and_vars = gn_grad_clip_fn(grads_and_vars) gn_grads, _ = zip(*gn_grads_and_vars) gn_grads_true, _ = tf.clip_by_global_norm( grads, hparams["kwargs"]["clip_norm"]) hparams = { "type": "clip_by_value", "kwargs": { "clip_value_min": -0.01, "clip_value_max": 0.01 } } v_grad_clip_fn = opt.get_gradient_clip_fn(hparams) v_grads_and_vars = v_grad_clip_fn(grads_and_vars) v_grads, _ = zip(*v_grads_and_vars) v_grads_true = tf.clip_by_value(grads, hparams["kwargs"]["clip_value_min"], hparams["kwargs"]["clip_value_max"]) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) gn_grads_, gn_grads_true_, v_grads_, v_grads_true_ = sess.run( [gn_grads, gn_grads_true, v_grads, v_grads_true]) np.testing.assert_array_equal(gn_grads_, gn_grads_true_) np.testing.assert_array_equal(v_grads_, v_grads_true_) def test_get_train_op(self): """Tests get_train_op. """ var = tf.Variable(0.) loss = tf.nn.l2_loss(var) train_op = opt.get_train_op(loss) self.assertTrue(tf.contrib.framework.is_tensor(train_op)) if __name__ == "__main__": tf.test.main()
[ "tensorflow.clip_by_value", "tensorflow.Variable", "tensorflow.test.main", "tensorflow.train.natural_exp_decay", "tensorflow.train.piecewise_constant", "numpy.testing.assert_array_equal", "tensorflow.clip_by_global_norm", "tensorflow.nn.l2_loss", "tensorflow.train.MomentumOptimizer", "tensorflow.contrib.framework.is_tensor", "tensorflow.global_variables_initializer", "tensorflow.random_uniform" ]
texar/core/optimization_test.py
[(158, 'tensorflow.test.main', 'tf.test.main', ([], {}), True, 'import tensorflow as tf\n'), (40, 'texar.core.optimization.get_optimizer_fn', 'opt.get_optimizer_fn', (['hparams'], {}), True, 'import texar.core.optimization as opt\n'), (51, 'texar.core.optimization.get_optimizer_fn', 'opt.get_optimizer_fn', (['hparams'], {}), True, 'import texar.core.optimization as opt\n'), (58, 'texar.core.optimization.get_optimizer_fn', 'opt.get_optimizer_fn', (['hparams'], {}), True, 'import texar.core.optimization as opt\n'), (81, 'texar.core.optimization.get_learning_rate_decay_fn', 'opt.get_learning_rate_decay_fn', (['hparams'], {}), True, 'import texar.core.optimization as opt\n'), (85, 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (["(global_step - hparams['start_decay_step'])", 'boundaries', 'values'], {}), True, 'import tensorflow as tf\n'), (93, 'texar.core.optimization.get_learning_rate_decay_fn', 'opt.get_learning_rate_decay_fn', (['hparams'], {}), True, 'import texar.core.optimization as opt\n'), (95, 'tensorflow.train.natural_exp_decay', 'tf.train.natural_exp_decay', (['(1.0)', "(global_step - hparams['start_decay_step'])", "hparams['kwargs']['decay_steps']", "hparams['kwargs']['decay_rate']"], {}), True, 'import tensorflow as tf\n'), (122, 'texar.core.optimization.get_gradient_clip_fn', 'opt.get_gradient_clip_fn', (['hparams'], {}), True, 'import texar.core.optimization as opt\n'), (125, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', "hparams['kwargs']['clip_norm']"], {}), True, 'import tensorflow as tf\n'), (135, 'texar.core.optimization.get_gradient_clip_fn', 'opt.get_gradient_clip_fn', (['hparams'], {}), True, 'import texar.core.optimization as opt\n'), (138, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grads', "hparams['kwargs']['clip_value_min']", "hparams['kwargs']['clip_value_max']"], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), True, 'import tensorflow as tf\n'), (154, 'texar.core.optimization.get_train_op', 'opt.get_train_op', (['loss'], {}), True, 'import texar.core.optimization as opt\n'), (56, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['(0.001)', '(0.9)'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.random_uniform', 'tf.random_uniform', (['[10, 10]', '(-1.0)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (146, 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['gn_grads_', 'gn_grads_true_'], {}), True, 'import numpy as np\n'), (147, 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['v_grads_', 'v_grads_true_'], {}), True, 'import numpy as np\n'), (155, 'tensorflow.contrib.framework.is_tensor', 'tf.contrib.framework.is_tensor', (['train_op'], {}), True, 'import tensorflow as tf\n'), (27, 'texar.core.optimization.default_optimization_hparams', 'opt.default_optimization_hparams', ([], {}), True, 'import texar.core.optimization as opt\n'), (66, 'texar.core.optimization.default_optimization_hparams', 'opt.default_optimization_hparams', ([], {}), True, 'import texar.core.optimization as opt\n'), (100, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (110, 'texar.core.optimization.default_optimization_hparams', 'opt.default_optimization_hparams', ([], {}), True, 'import texar.core.optimization as opt\n'), (143, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n')]
VanessaDo/cloudml-samples
ae6cd718e583944beef9d8a90db12091ac399432
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import numpy as np import tensorflow as tf INPUT_DIM = 5 OUTPUT_DIM = 3 def generator_fn(generator_inputs): outputs = tf.layers.dense(generator_inputs, OUTPUT_DIM) return outputs def discriminator_fn(data, generator_inputs): outputs = tf.layers.dense(data, 1) return outputs def model_fn(features, labels, mode, params): # build model global_step = tf.train.get_global_step() generator_inputs = features real_data = labels gan_model = tf.contrib.gan.gan_model(generator_fn, discriminator_fn, real_data, generator_inputs) predictions = gan_model.generated_data loss = None train_op = None if mode == tf.estimator.ModeKeys.TRAIN: # define loss gan_loss = tf.contrib.gan.gan_loss(gan_model, add_summaries=False) loss = gan_loss.generator_loss # define train_op gen_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05) dis_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05) # wrapper to make the optimizer work with TPUs if params['use_tpu']: gen_optimizer = tf.contrib.tpu.CrossShardOptimizer(gen_optimizer) dis_optimizer = tf.contrib.tpu.CrossShardOptimizer(dis_optimizer) gan_train_ops = tf.contrib.gan.gan_train_ops(gan_model, gan_loss, gen_optimizer, dis_optimizer) while_loop = tf.contrib.tpu.while_loop if params['use_tpu'] else tf.while_loop # train the discriminator 100 steps inputs = [tf.constant(0), tf.constant(0.0)] cond = lambda i, x: tf.less(i, 100) def body(i, x): return tf.add(i, 1), gan_train_ops.discriminator_train_op dis_train_op = while_loop(cond, body, inputs) # tf.contrib.gan's train op does not manage global steps in it train_op = tf.group( dis_train_op, gan_train_ops.generator_train_op, global_step.assign_add(1)) if params['use_tpu']: # TPU version of EstimatorSpec return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op,) else: return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op,) def train_input_fn(params={}): # make some fake noise data_size = 100 noise_tensor = tf.random_normal((data_size, INPUT_DIM)) real_data_tensor = tf.random_uniform((data_size, OUTPUT_DIM)) dataset = tf.data.Dataset.from_tensor_slices((noise_tensor, real_data_tensor)) dataset = dataset.repeat().shuffle(10) # TPUEstimator passes params when calling input_fn batch_size = params.get('train_batch_size', 16) dataset = dataset.batch(batch_size, drop_remainder=True) # TPUs need to know all dimensions when the graph is built # Datasets know the batch size only when the graph is run def set_shapes(features, labels): features_shape = features.get_shape().merge_with([batch_size, None]) labels_shape = labels.get_shape().merge_with([batch_size, None]) features.set_shape(features_shape) labels.set_shape(labels_shape) return features, labels dataset = dataset.map(set_shapes) dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) return dataset def main(args): # pass the args as params so the model_fn can use # the TPU specific args params = vars(args) if args.use_tpu: # additional configs required for using TPUs tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(args.tpu) tpu_config = tf.contrib.tpu.TPUConfig( num_shards=8, # using Cloud TPU v2-8 iterations_per_loop=args.save_checkpoints_steps) # use the TPU version of RunConfig config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=args.model_dir, tpu_config=tpu_config, save_checkpoints_steps=args.save_checkpoints_steps, save_summary_steps=10) # TPUEstimator estimator = tf.contrib.tpu.TPUEstimator( model_fn=model_fn, config=config, params=params, train_batch_size=args.train_batch_size, eval_batch_size=32, export_to_tpu=False) else: config = tf.estimator.RunConfig( model_dir=args.model_dir, save_checkpoints_steps=10, save_summary_steps=10) estimator = tf.estimator.Estimator( model_fn, config=config, params=params) estimator.train(train_input_fn, steps=100) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--model-dir', type=str, default='/tmp/tpu-template', help='Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.') parser.add_argument( '--train-batch-size', type=int, default=16, help='The training batch size. The training batch is divided evenly across the TPU cores.') parser.add_argument( '--save-checkpoints-steps', type=int, default=10, help='The number of training steps before saving each checkpoint.') parser.add_argument( '--use-tpu', action='store_true', help='Whether to use TPU.') parser.add_argument( '--tpu', default=None, help='The name or GRPC URL of the TPU node. Leave it as `None` when training on CMLE.') args, _ = parser.parse_known_args() main(args)
[ "tensorflow.contrib.gan.gan_loss", "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.contrib.tpu.CrossShardOptimizer", "tensorflow.estimator.RunConfig", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.contrib.gan.gan_model", "tensorflow.layers.dense", "tensorflow.train.get_global_step", "tensorflow.add", "tensorflow.contrib.gan.gan_train_ops", "tensorflow.estimator.Estimator", "tensorflow.train.RMSPropOptimizer", "tensorflow.less", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.constant", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.contrib.tpu.RunConfig", "tensorflow.estimator.EstimatorSpec", "tensorflow.random_uniform", "tensorflow.random_normal" ]
tpu/templates/tpu_gan_estimator/trainer_single.py
[(24, 'tensorflow.layers.dense', 'tf.layers.dense', (['generator_inputs', 'OUTPUT_DIM'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.layers.dense', 'tf.layers.dense', (['data', '(1)'], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.contrib.gan.gan_model', 'tf.contrib.gan.gan_model', (['generator_fn', 'discriminator_fn', 'real_data', 'generator_inputs'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.random_normal', 'tf.random_normal', (['(data_size, INPUT_DIM)'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.random_uniform', 'tf.random_uniform', (['(data_size, OUTPUT_DIM)'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(noise_tensor, real_data_tensor)'], {}), True, 'import tensorflow as tf\n'), (167, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (48, 'tensorflow.contrib.gan.gan_loss', 'tf.contrib.gan.gan_loss', (['gan_model'], {'add_summaries': '(False)'}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': '(0.05)'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': '(0.05)'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.contrib.gan.gan_train_ops', 'tf.contrib.gan.gan_train_ops', (['gan_model', 'gan_loss', 'gen_optimizer', 'dis_optimizer'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions', 'loss': 'loss', 'train_op': 'train_op'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions', 'loss': 'loss', 'train_op': 'train_op'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['args.tpu'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'num_shards': '(8)', 'iterations_per_loop': 'args.save_checkpoints_steps'}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.contrib.tpu.RunConfig', 'tf.contrib.tpu.RunConfig', ([], {'cluster': 'tpu_cluster_resolver', 'model_dir': 'args.model_dir', 'tpu_config': 'tpu_config', 'save_checkpoints_steps': 'args.save_checkpoints_steps', 'save_summary_steps': '(10)'}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'model_fn': 'model_fn', 'config': 'config', 'params': 'params', 'train_batch_size': 'args.train_batch_size', 'eval_batch_size': '(32)', 'export_to_tpu': '(False)'}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'model_dir': 'args.model_dir', 'save_checkpoints_steps': '(10)', 'save_summary_steps': '(10)'}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', (['model_fn'], {'config': 'config', 'params': 'params'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.contrib.tpu.CrossShardOptimizer', 'tf.contrib.tpu.CrossShardOptimizer', (['gen_optimizer'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.contrib.tpu.CrossShardOptimizer', 'tf.contrib.tpu.CrossShardOptimizer', (['dis_optimizer'], {}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.constant', 'tf.constant', (['(0)'], {}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.less', 'tf.less', (['i', '(100)'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.add', 'tf.add', (['i', '(1)'], {}), True, 'import tensorflow as tf\n')]
zhangbo2008/bert
d2c1b03735c5c1428b918d80f810baea1527ac2d
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import modeling import optimization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", './model/bert_config.json', "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", './input.txt', "Input TF example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", './tmp', "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.") flags.DEFINE_bool("do_train", True, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]]) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]]) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = (metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels ]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1]) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder(input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature([max_predictions_per_seq], tf.float32), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: tf.logging.info("***** Running training *****") tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True) estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) if FLAGS.do_eval: tf.logging.info("***** Running evaluation *****") tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False) result = estimator.evaluate( input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": # flags.mark_flag_as_required("input_file") # flags.mark_flag_as_required("bert_config_file") # flags.mark_flag_as_required("output_dir") tf.app.run()
[ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.nn.log_softmax", "tensorflow.FixedLenFeature", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.train.init_from_checkpoint", "tensorflow.contrib.data.parallel_interleave", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.gather", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.gfile.Glob", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.nn.bias_add", "tensorflow.train.Scaffold", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.reshape", "tensorflow.variable_scope" ]
run_pretraining.py
[(84, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (310, 'modeling.get_shape_list', 'modeling.get_shape_list', (['sequence_tensor'], {'expected_rank': '(3)'}), False, 'import modeling\n'), (317, 'tensorflow.reshape', 'tf.reshape', (['(positions + flat_offsets)', '[-1]'], {}), True, 'import tensorflow as tf\n'), (318, 'tensorflow.reshape', 'tf.reshape', (['sequence_tensor', '[batch_size * seq_length, width]'], {}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.gather', 'tf.gather', (['flat_sequence_tensor', 'flat_positions'], {}), True, 'import tensorflow as tf\n'), (393, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (407, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (412, 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'import modeling\n'), (414, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Input Files ***"""'], {}), True, 'import tensorflow as tf\n'), (451, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size'}), True, 'import tensorflow as tf\n'), (493, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (131, 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'import modeling\n'), (150, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cls/predictions"""'], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.reshape', 'tf.reshape', (['label_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.reshape', 'tf.reshape', (['label_weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.one_hot', 'tf.one_hot', (['label_ids'], {'depth': 'bert_config.vocab_size', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(label_weights * per_example_loss)'], {}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cls/seq_relationship"""'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (300, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': '(2)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (422, 'tensorflow.logging.info', 'tf.logging.info', (["(' %s' % input_file)"], {}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (459, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (460, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (469, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), True, 'import tensorflow as tf\n'), (481, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), False, 'import os\n'), (119, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (156, 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), False, 'import modeling\n'), (172, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (177, 'optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), False, 'import optimization\n'), (180, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transform"""'], {}), True, 'import tensorflow as tf\n'), (255, 'modeling.layer_norm', 'modeling.layer_norm', (['input_tensor'], {}), False, 'import modeling\n'), (277, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_probs * one_hot_labels)'], {'axis': '[-1]'}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['label_weights'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (316, 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (371, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_files'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (418, 'tensorflow.gfile.Glob', 'tf.gfile.Glob', (['input_pattern'], {}), True, 'import tensorflow as tf\n'), (435, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (482, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_eval_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (483, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Eval results *****"""'], {}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (294, 'modeling.create_initializer', 'modeling.create_initializer', (['bert_config.initializer_range'], {}), False, 'import modeling\n'), (296, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.constant', 'tf.constant', (['input_files'], {}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.contrib.data.parallel_interleave', 'tf.contrib.data.parallel_interleave', (['tf.data.TFRecordDataset'], {'sloppy': 'is_training', 'cycle_length': 'cycle_length'}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_log_probs', '[-1, masked_lm_log_probs.shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.argmax', 'tf.argmax', (['masked_lm_log_probs'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_example_loss', '[-1]'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'masked_lm_ids', 'predictions': 'masked_lm_predictions', 'weights': 'masked_lm_weights'}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'masked_lm_example_loss', 'weights': 'masked_lm_weights'}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.reshape', 'tf.reshape', (['next_sentence_log_probs', '[-1, next_sentence_log_probs.shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.argmax', 'tf.argmax', (['next_sentence_log_probs'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.reshape', 'tf.reshape', (['next_sentence_labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'next_sentence_labels', 'predictions': 'next_sentence_predictions'}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'next_sentence_example_loss'}), True, 'import tensorflow as tf\n'), (252, 'modeling.get_activation', 'modeling.get_activation', (['bert_config.hidden_act'], {}), False, 'import modeling\n'), (253, 'modeling.create_initializer', 'modeling.create_initializer', (['bert_config.initializer_range'], {}), False, 'import modeling\n')]
kazushi-fa/RotationDetection
79c3b51822aa8aeed3fbf68a58c8802fc3fe12bb
# -*- coding:utf-8 -*- from __future__ import absolute_import from __future__ import print_function from __future__ import division import os import sys import tensorflow as tf import tensorflow.contrib.slim as slim import numpy as np import cv2 sys.path.append("../../") from tools.train_base import Train from libs.configs import cfgs from libs.models.detectors.fcos import build_whole_network_batch_quad from libs.utils.coordinate_convert import backward_convert, sort_box_points from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo from libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle from utils.order_points import re_order, sort_corners os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP class TrainFCOS(Train): def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects): return gtboxes_and_label_h[:, :int(max(num_objects)), :].astype(np.float32), \ gtboxes_and_label_r[:, :int(max(num_objects)), :].astype(np.float32) def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(',')) global_step = slim.get_or_create_global_step() lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu*cfgs.BATCH_SIZE) tf.summary.scalar('lr', lr) optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM) fcos = build_whole_network_batch_quad.DetectionNetworkFCOS(cfgs=self.cfgs, is_training=True) with tf.name_scope('get_batch'): if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0] else: shortside_len = cfgs.IMG_SHORT_SIDE_LEN img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \ self.reader.next_batch(dataset_name=cfgs.DATASET_NAME, batch_size=cfgs.BATCH_SIZE * num_gpu, shortside_len=shortside_len, is_training=True) # data processing inputs_list = [] for i in range(num_gpu): start = i * cfgs.BATCH_SIZE end = (i + 1) * cfgs.BATCH_SIZE img = img_batch[start:end, :, :, :] pretrain_zoo = PretrainModelZoo() if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo: img = img / tf.constant([cfgs.PIXEL_STD]) gtboxes_and_label_h = get_horizen_minAreaRectangle( tf.reshape(gtboxes_and_label_batch[start:end], [-1, 9])) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) gtboxes_and_label_q = tf.reshape(gtboxes_and_label_batch[start:end], [cfgs.BATCH_SIZE, -1, 9]) num_objects = num_objects_batch[start:end] num_objects = tf.cast(tf.reshape(num_objects, [cfgs.BATCH_SIZE, -1, ]), tf.float32) img_h = img_h_batch[start:end] img_w = img_w_batch[start:end] inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_q, num_objects, img_h, img_w]) tower_grads = [] biases_regularizer = tf.no_regularizer weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY) with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpu): with tf.device('/gpu:%d' % i): with tf.name_scope('tower_%d' % i): with slim.arg_scope( [slim.model_variable, slim.variable], device='/device:CPU:0'): with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): gtboxes_and_label_h, gtboxes_and_label_q = tf.py_func(self.get_gtboxes_and_label, inp=[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]], Tout=[tf.float32, tf.float32]) gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [cfgs.BATCH_SIZE, -1, 5]) # Unnecessary, if you have already sorted when making tfrecord and no data augmentation. gtboxes_and_label_q = tf.py_func(func=re_order, inp=[tf.reshape(gtboxes_and_label_q, [-1, 9]), True], Tout=[tf.float32]) gtboxes_and_label_q = tf.reshape(gtboxes_and_label_q, [cfgs.BATCH_SIZE, -1, 9]) img = inputs_list[i][0] img_shape = inputs_list[i][-2:] h_crop = tf.reduce_max(img_shape[0]) w_crop = tf.reduce_max(img_shape[1]) img = tf.image.crop_to_bounding_box(image=img, offset_height=0, offset_width=0, target_height=tf.cast(h_crop, tf.int32), target_width=tf.cast(w_crop, tf.int32)) outputs = fcos.build_whole_detection_network(input_img_batch=img, gtboxes_batch_h=gtboxes_and_label_h, gtboxes_batch_r=gtboxes_and_label_q, gpu_id=i) gtboxes_in_img_q = self.drawer.draw_boxes_with_categories( img_batch=tf.expand_dims(img[0, :, :, :], axis=0), boxes=gtboxes_and_label_q[0, :, :-1], labels=gtboxes_and_label_q[0, :, -1], method=2) tf.summary.image('Compare/gtboxes_q_gpu:%d' % i, gtboxes_in_img_q) gtboxes_in_img_h = self.drawer.draw_boxes_with_categories( img_batch=tf.expand_dims(img[0, :, :, :], axis=0), boxes=gtboxes_and_label_h[0, :, :-1], labels=gtboxes_and_label_h[0, :, -1], method=0) tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h) if cfgs.ADD_BOX_IN_TENSORBOARD: detections_in_img = self.drawer.draw_boxes_with_categories_and_scores( img_batch=tf.expand_dims(img[0, :, :, :], axis=0), boxes=outputs[0], scores=outputs[1], labels=outputs[2], method=2) tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img) loss_dict = outputs[-1] total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu) if i == num_gpu - 1: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) # weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses()) total_losses = total_losses + tf.add_n(regularization_losses) tf.get_variable_scope().reuse_variables() grads = optimizer.compute_gradients(total_losses) if cfgs.GRADIENT_CLIPPING_BY_NORM is not None: grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM) tower_grads.append(grads) self.log_printer(fcos, optimizer, global_step, tower_grads, total_loss_dict, num_gpu*cfgs.BATCH_SIZE, graph) if __name__ == '__main__': trainer = TrainFCOS(cfgs) trainer.main()
[ "tensorflow.device", "tensorflow.cast", "tensorflow.random_shuffle", "tensorflow.summary.scalar", "tensorflow.add_n", "tensorflow.py_func", "tensorflow.Graph", "tensorflow.contrib.slim.get_or_create_global_step", "tensorflow.get_collection", "tensorflow.summary.image", "tensorflow.train.MomentumOptimizer", "tensorflow.name_scope", "tensorflow.contrib.slim.arg_scope", "tensorflow.reduce_max", "tensorflow.constant", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.contrib.slim.learning.clip_gradient_norms", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.get_variable_scope" ]
tools/fcos/train_batch_quad.py
[(13, 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), False, 'import sys\n'), (33, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.contrib.slim.get_or_create_global_step', 'slim.get_or_create_global_step', ([], {}), True, 'import tensorflow.contrib.slim as slim\n'), (38, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""lr"""', 'lr'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['lr'], {'momentum': 'cfgs.MOMENTUM'}), True, 'import tensorflow as tf\n'), (41, 'libs.models.detectors.fcos.build_whole_network_batch_quad.DetectionNetworkFCOS', 'build_whole_network_batch_quad.DetectionNetworkFCOS', ([], {'cfgs': 'self.cfgs', 'is_training': '(True)'}), False, 'from libs.models.detectors.fcos import build_whole_network_batch_quad\n'), (85, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['cfgs.WEIGHT_DECAY'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.name_scope', 'tf.name_scope', (['"""get_batch"""'], {}), True, 'import tensorflow as tf\n'), (64, 'dataloader.pretrained_weights.pretrain_zoo.PretrainModelZoo', 'PretrainModelZoo', ([], {}), False, 'from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo\n'), (70, 'tensorflow.reshape', 'tf.reshape', (['gtboxes_and_label_h', '[cfgs.BATCH_SIZE, -1, 5]'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.reshape', 'tf.reshape', (['gtboxes_and_label_batch[start:end]', '[cfgs.BATCH_SIZE, -1, 9]'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.constant', 'tf.constant', (['cfgs.IMG_SHORT_SIDE_LEN'], {}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.reshape', 'tf.reshape', (['gtboxes_and_label_batch[start:end]', '[-1, 9]'], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.reshape', 'tf.reshape', (['num_objects', '[cfgs.BATCH_SIZE, -1]'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (35, 'libs.configs.cfgs.GPU_GROUP.strip', 'cfgs.GPU_GROUP.strip', ([], {}), False, 'from libs.configs import cfgs\n'), (47, 'tensorflow.random_shuffle', 'tf.random_shuffle', (['shortside_len_list'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.constant', 'tf.constant', (['[cfgs.PIXEL_STD]'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.device', 'tf.device', (["('/gpu:%d' % i)"], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.name_scope', 'tf.name_scope', (["('tower_%d' % i)"], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.model_variable, slim.variable]'], {'device': '"""/device:CPU:0"""'}), True, 'import tensorflow.contrib.slim as slim\n'), (163, 'tensorflow.contrib.slim.learning.clip_gradient_norms', 'slim.learning.clip_gradient_norms', (['grads', 'cfgs.GRADIENT_CLIPPING_BY_NORM'], {}), True, 'import tensorflow.contrib.slim as slim\n'), (101, 'tensorflow.py_func', 'tf.py_func', (['self.get_gtboxes_and_label'], {'inp': '[inputs_list[i][1], inputs_list[i][2], inputs_list[i][3]]', 'Tout': '[tf.float32, tf.float32]'}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.reshape', 'tf.reshape', (['gtboxes_and_label_h', '[cfgs.BATCH_SIZE, -1, 5]'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.reshape', 'tf.reshape', (['gtboxes_and_label_q', '[cfgs.BATCH_SIZE, -1, 9]'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.reduce_max', 'tf.reduce_max', (['img_shape[0]'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.reduce_max', 'tf.reduce_max', (['img_shape[1]'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.summary.image', 'tf.summary.image', (["('Compare/gtboxes_q_gpu:%d' % i)", 'gtboxes_in_img_q'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.summary.image', 'tf.summary.image', (["('Compare/gtboxes_h_gpu:%d' % i)", 'gtboxes_in_img_h'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.summary.image', 'tf.summary.image', (["('Compare/final_detection_gpu:%d' % i)", 'detections_in_img'], {}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.cast', 'tf.cast', (['h_crop', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.cast', 'tf.cast', (['w_crop', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.expand_dims', 'tf.expand_dims', (['img[(0), :, :, :]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.expand_dims', 'tf.expand_dims', (['img[(0), :, :, :]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.add_n', 'tf.add_n', (['regularization_losses'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.reshape', 'tf.reshape', (['gtboxes_and_label_q', '[-1, 9]'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.expand_dims', 'tf.expand_dims', (['img[(0), :, :, :]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n')]
vincentcheny/distributed-bert
e8b20be5063999f27f3bffec8acec4807d619b45
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run masked LM/next sentence masked_lm pre-training for BERT.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import modeling import optimization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "bert_config_file", "./bert_config_file", "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.", ) flags.DEFINE_string( "input_file", None, "Input TF example files (can be a glob or comma separated)." ) flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.", ) ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).", ) flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.", ) flags.DEFINE_integer( "max_predictions_per_seq", 20, "Maximum number of masked LM predictions per sequence. " "Must match data generation.", ) flags.DEFINE_bool("do_train", True, "Whether to run training.") flags.DEFINE_bool("do_eval", True, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_integer("num_train_steps", 100, "Number of training steps.") flags.DEFINE_integer("num_warmup_steps", 10, "Number of warmup steps.") flags.DEFINE_integer( "save_checkpoints_steps", 5, "How often to save the model checkpoint." ) flags.DEFINE_integer( "iterations_per_loop", 1000, "How many steps to make in each estimator call." ) flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.", ) tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.", ) tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.", ) tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.", ) flags.DEFINE_integer("num_gpus", 2, "Total number of GPUs to use.") flags.DEFINE_bool("multi_worker", True, "Multi-worker training.") # My additional flags tf.app.flags.DEFINE_boolean("use_original_ckpt", True, "use original ckpt") flags.DEFINE_integer("task_index", 0, "task_index") flags.DEFINE_string( "worker", "localhost:3000,localhost:3001", "specify workers in the cluster" ) worker = FLAGS.worker.split(",") task_index = FLAGS.task_index os.environ["CUDA_VISIBLE_DEVICES"] = str(task_index) if not FLAGS.use_original_ckpt: tf.train.TFTunerContext.init_context(len(worker), task_index) def model_fn_builder( bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, ): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] masked_lm_positions = features["masked_lm_positions"] masked_lm_ids = features["masked_lm_ids"] masked_lm_weights = features["masked_lm_weights"] next_sentence_labels = features["next_sentence_labels"] is_training = mode == tf.estimator.ModeKeys.TRAIN model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings, ) ( masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs, ) = get_masked_lm_output( bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights, ) ( next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs, ) = get_next_sentence_output( bert_config, model.get_pooled_output(), next_sentence_labels ) total_loss = masked_lm_loss + next_sentence_loss tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: ( assignment_map, initialized_variable_names, ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info( " name = %s, shape = %s%s", var.name, var.shape, init_string ) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu ) if FLAGS.use_tpu: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn, ) else: output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold=None ) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn( masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels, ): """Computes the loss and accuracy of the model.""" masked_lm_log_probs = tf.reshape( masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]] ) masked_lm_predictions = tf.argmax( masked_lm_log_probs, axis=-1, output_type=tf.int32 ) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1]) masked_lm_ids = tf.reshape(masked_lm_ids, [-1]) masked_lm_weights = tf.reshape(masked_lm_weights, [-1]) masked_lm_accuracy = tf.metrics.accuracy( labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights, ) masked_lm_mean_loss = tf.metrics.mean( values=masked_lm_example_loss, weights=masked_lm_weights ) next_sentence_log_probs = tf.reshape( next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]] ) next_sentence_predictions = tf.argmax( next_sentence_log_probs, axis=-1, output_type=tf.int32 ) next_sentence_labels = tf.reshape(next_sentence_labels, [-1]) next_sentence_accuracy = tf.metrics.accuracy( labels=next_sentence_labels, predictions=next_sentence_predictions ) next_sentence_mean_loss = tf.metrics.mean( values=next_sentence_example_loss ) return { "masked_lm_accuracy": masked_lm_accuracy, "masked_lm_loss": masked_lm_mean_loss, "next_sentence_accuracy": next_sentence_accuracy, "next_sentence_loss": next_sentence_mean_loss, } eval_metrics = ( metric_fn, [ masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels, ], ) if FLAGS.use_tpu: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn, ) else: output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=total_loss) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def get_masked_lm_output( bert_config, input_tensor, output_weights, positions, label_ids, label_weights ): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range ), ) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer(), ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32 ) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs) def get_next_sentence_output(bert_config, input_tensor, labels): """Get loss and log probs for the next sentence prediction.""" # Simple binary classification. Note that 0 is "next sentence" and 1 is # "random sentence". This weight matrix is not used after pre-training. with tf.variable_scope("cls/seq_relationship"): output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range), ) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer() ) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs) def gather_indexes(sequence_tensor, positions): """Gathers the vectors at the specific positions over a minibatch.""" sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3) batch_size = sequence_shape[0] seq_length = sequence_shape[1] width = sequence_shape[2] flat_offsets = tf.reshape( tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1] ) flat_positions = tf.reshape(positions + flat_offsets, [-1]) flat_sequence_tensor = tf.reshape(sequence_tensor, [batch_size * seq_length, width]) output_tensor = tf.gather(flat_sequence_tensor, flat_positions) return output_tensor def input_fn_builder( input_files, max_seq_length, max_predictions_per_seq, is_training, num_cpu_threads=4 ): """Creates an `input_fn` closure to be passed to TPUEstimator.""" def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([max_seq_length], tf.int64), "masked_lm_positions": tf.FixedLenFeature( [max_predictions_per_seq], tf.int64 ), "masked_lm_ids": tf.FixedLenFeature([max_predictions_per_seq], tf.int64), "masked_lm_weights": tf.FixedLenFeature( [max_predictions_per_seq], tf.float32 ), "next_sentence_labels": tf.FixedLenFeature([1], tf.int64), } # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. if is_training: d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.contrib.data.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length, ) ) d = d.shuffle(buffer_size=100) else: d = tf.data.TFRecordDataset(input_files) # Since we evaluate for a fixed number of steps we don't want to encounter # out-of-range exceptions. d = d.repeat() # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don't* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True, ) ) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def main(_): tf.logging.set_verbosity(tf.logging.INFO) if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project ) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 if FLAGS.use_tpu: run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host, ), ) else: if FLAGS.multi_worker: # distribution = tf.contrib.distribute.CollectiveAllReduceStrategy(num_gpus_per_worker=1) # run_config = tf.estimator.RunConfig( # experimental_distribute=tf.contrib.distribute.DistributeConfig( # train_distribute=distribution, # remote_cluster={ # 'worker': ['localhost:5000', 'localhost:5001'], # }, # ) # ) os.environ["TF_CONFIG"] = json.dumps( { "cluster": {"worker": worker}, "task": {"type": "worker", "index": task_index}, } ) strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() run_config = tf.estimator.RunConfig( save_summary_steps=1, train_distribute=strategy, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_ckpt_steps, log_step_count_steps=1, ) else: distribution = tf.contrib.distribute.MirroredStrategy( num_gpus=FLAGS.num_gpus ) run_config = tf.estimator.RunConfig(train_distribute=distribution) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=True, ) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. if FLAGS.use_tpu: estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, ) else: estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={ "batch_size": FLAGS.train_batch_size if FLAGS.do_train else FLAGS.eval_batch_size, }, ) if FLAGS.do_train and FLAGS.do_eval: tf.logging.info("***** Running training *****") tf.logging.info(" Training batch size = %d", FLAGS.train_batch_size) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True, ) eval_input_fn = input_fn_builder( input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False, ) tf.estimator.train_and_evaluate( estimator, train_spec=tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=100), eval_spec=tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=10), ) # if FLAGS.do_train: # tf.logging.info("***** Running training *****") # tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) # train_input_fn = input_fn_builder( # input_files=input_files, # max_seq_length=FLAGS.max_seq_length, # max_predictions_per_seq=FLAGS.max_predictions_per_seq, # is_training=True) # estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps) # if FLAGS.do_eval: # tf.logging.info("***** Running evaluation *****") # tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # eval_input_fn = input_fn_builder( # input_files=input_files, # max_seq_length=FLAGS.max_seq_length, # max_predictions_per_seq=FLAGS.max_predictions_per_seq, # is_training=False) # result = estimator.evaluate( # input_fn=eval_input_fn, steps=FLAGS.max_eval_steps) # output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") # with tf.gfile.GFile(output_eval_file, "w") as writer: # tf.logging.info("***** Eval results *****") # for key in sorted(result.keys()): # tf.logging.info(" %s = %s", key, str(result[key])) # writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
[ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.nn.log_softmax", "tensorflow.FixedLenFeature", "tensorflow.reduce_sum", "tensorflow.train.init_from_checkpoint", "tensorflow.contrib.data.parallel_interleave", "tensorflow.gfile.MakeDirs", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.to_int32", "tensorflow.estimator.RunConfig", "tensorflow.contrib.distribute.MirroredStrategy", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.gather", "tensorflow.logging.set_verbosity", "tensorflow.estimator.EvalSpec", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.estimator.Estimator", "tensorflow.zeros_initializer", "tensorflow.estimator.TrainSpec", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.gfile.Glob", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.nn.bias_add", "tensorflow.train.Scaffold", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.reshape", "tensorflow.distribute.experimental.MultiWorkerMirroredStrategy", "tensorflow.estimator.EstimatorSpec", "tensorflow.variable_scope" ]
run_pretraining.py
[(97, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""use_original_ckpt"""', '(True)', '"""use original ckpt"""'], {}), True, 'import tensorflow as tf\n'), (408, 'modeling.get_shape_list', 'modeling.get_shape_list', (['sequence_tensor'], {'expected_rank': '(3)'}), False, 'import modeling\n'), (416, 'tensorflow.reshape', 'tf.reshape', (['(positions + flat_offsets)', '[-1]'], {}), True, 'import tensorflow as tf\n'), (417, 'tensorflow.reshape', 'tf.reshape', (['sequence_tensor', '[batch_size * seq_length, width]'], {}), True, 'import tensorflow as tf\n'), (418, 'tensorflow.gather', 'tf.gather', (['flat_sequence_tensor', 'flat_positions'], {}), True, 'import tensorflow as tf\n'), (490, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (504, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (509, 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'import modeling\n'), (511, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (517, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Input Files ***"""'], {}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (174, 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'import modeling\n'), (206, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cls/predictions"""'], {}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (359, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.reshape', 'tf.reshape', (['label_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (363, 'tensorflow.reshape', 'tf.reshape', (['label_weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.one_hot', 'tf.one_hot', (['label_ids'], {'depth': 'bert_config.vocab_size', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(label_weights * per_example_loss)'], {}), True, 'import tensorflow as tf\n'), (386, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cls/seq_relationship"""'], {}), True, 'import tensorflow as tf\n'), (396, 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (398, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (399, 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': '(2)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (519, 'tensorflow.logging.info', 'tf.logging.info', (["(' %s' % input_file)"], {}), True, 'import tensorflow as tf\n'), (523, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (584, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size'}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'model_fn', 'config': 'run_config', 'params': "{'batch_size': FLAGS.train_batch_size if FLAGS.do_train else FLAGS.\n eval_batch_size}"}), True, 'import tensorflow as tf\n'), (602, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (603, 'tensorflow.logging.info', 'tf.logging.info', (['""" Training batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (214, 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), False, 'import modeling\n'), (230, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (236, 'optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), False, 'import optimization\n'), (340, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transform"""'], {}), True, 'import tensorflow as tf\n'), (349, 'modeling.layer_norm', 'modeling.layer_norm', (['input_tensor'], {}), False, 'import modeling\n'), (373, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_probs * one_hot_labels)'], {'axis': '[-1]'}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['label_weights'], {}), True, 'import tensorflow as tf\n'), (401, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (414, 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (432, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (433, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (434, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (435, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (438, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (439, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_predictions_per_seq]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (442, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (466, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_files'], {}), True, 'import tensorflow as tf\n'), (497, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (515, 'tensorflow.gfile.Glob', 'tf.gfile.Glob', (['input_pattern'], {}), True, 'import tensorflow as tf\n'), (551, 'json.dumps', 'json.dumps', (["{'cluster': {'worker': worker}, 'task': {'type': 'worker', 'index': task_index}\n }"], {}), False, 'import json\n'), (557, 'tensorflow.distribute.experimental.MultiWorkerMirroredStrategy', 'tf.distribute.experimental.MultiWorkerMirroredStrategy', ([], {}), True, 'import tensorflow as tf\n'), (558, 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'save_summary_steps': '(1)', 'train_distribute': 'strategy', 'model_dir': 'FLAGS.output_dir', 'save_checkpoints_steps': 'FLAGS.save_ckpt_steps', 'log_step_count_steps': '(1)'}), True, 'import tensorflow as tf\n'), (566, 'tensorflow.contrib.distribute.MirroredStrategy', 'tf.contrib.distribute.MirroredStrategy', ([], {'num_gpus': 'FLAGS.num_gpus'}), True, 'import tensorflow as tf\n'), (569, 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'train_distribute': 'distribution'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold': 'None'}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (390, 'modeling.create_initializer', 'modeling.create_initializer', (['bert_config.initializer_range'], {}), False, 'import modeling\n'), (393, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (448, 'tensorflow.constant', 'tf.constant', (['input_files'], {}), True, 'import tensorflow as tf\n'), (458, 'tensorflow.contrib.data.parallel_interleave', 'tf.contrib.data.parallel_interleave', (['tf.data.TFRecordDataset'], {'sloppy': 'is_training', 'cycle_length': 'cycle_length'}), True, 'import tensorflow as tf\n'), (534, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (618, 'tensorflow.estimator.TrainSpec', 'tf.estimator.TrainSpec', ([], {'input_fn': 'train_input_fn', 'max_steps': '(100)'}), True, 'import tensorflow as tf\n'), (619, 'tensorflow.estimator.EvalSpec', 'tf.estimator.EvalSpec', ([], {'input_fn': 'eval_input_fn', 'steps': '(10)'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_log_probs', '[-1, masked_lm_log_probs.shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.argmax', 'tf.argmax', (['masked_lm_log_probs'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_example_loss', '[-1]'], {}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (271, 'tensorflow.reshape', 'tf.reshape', (['masked_lm_weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (272, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'masked_lm_ids', 'predictions': 'masked_lm_predictions', 'weights': 'masked_lm_weights'}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'masked_lm_example_loss', 'weights': 'masked_lm_weights'}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.reshape', 'tf.reshape', (['next_sentence_log_probs', '[-1, next_sentence_log_probs.shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.argmax', 'tf.argmax', (['next_sentence_log_probs'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.reshape', 'tf.reshape', (['next_sentence_labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'next_sentence_labels', 'predictions': 'next_sentence_predictions'}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'next_sentence_example_loss'}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (322, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss'}), True, 'import tensorflow as tf\n'), (344, 'modeling.get_activation', 'modeling.get_activation', (['bert_config.hidden_act'], {}), False, 'import modeling\n'), (345, 'modeling.create_initializer', 'modeling.create_initializer', (['bert_config.initializer_range'], {}), False, 'import modeling\n')]
viotemp1/talos
b78d047b0f6d6f2a6a074c12d6c79bb1386fe791
def parallel_gpu_jobs(allow_growth=True, fraction=.5): '''Sets the max used memory as a fraction for tensorflow backend allow_growth :: True of False fraction :: a float value (e.g. 0.5 means 4gb out of 8gb) ''' import keras.backend as K import tensorflow as tf from nvidia_info import get_memory_info memory_info = get_memory_info(0) total_memory = memory_info[1] memory_limit = int(fraction*total_memory) print(memory_info) if tf.version.VERSION[0]=="2": gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)]) else: gpu_options = tf.GPUOptions(allow_growth=allow_growth, per_process_gpu_memory_fraction=fraction) config = tf.ConfigProto(gpu_options=gpu_options) session = tf.Session(config=config) K.set_session(session) def multi_gpu(model, gpus=None, cpu_merge=True, cpu_relocation=False): '''Takes as input the model, and returns a model based on the number of GPUs available on the machine or alternatively the 'gpus' user input. NOTE: this needs to be used before model.compile() in the model inputted to Scan in the form: from talos.utils.gpu_utils import multi_gpu model = multi_gpu(model) ''' from keras.utils import multi_gpu_model return multi_gpu_model(model, gpus=gpus, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation) def force_cpu(): '''Force CPU on a GPU system ''' import keras.backend as K import tensorflow as tf config = tf.ConfigProto(device_count={'GPU': 0}) session = tf.Session(config=config) K.set_session(session)
[ "tensorflow.config.experimental.set_memory_growth", "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.VirtualDeviceConfiguration", "tensorflow.ConfigProto", "tensorflow.GPUOptions", "tensorflow.Session" ]
talos/utils/gpu_utils.py
[(16, 'nvidia_info.get_memory_info', 'get_memory_info', (['(0)'], {}), False, 'from nvidia_info import get_memory_info\n'), (49, 'keras.utils.multi_gpu_model', 'multi_gpu_model', (['model'], {'gpus': 'gpus', 'cpu_merge': 'cpu_merge', 'cpu_relocation': 'cpu_relocation'}), False, 'from keras.utils import multi_gpu_model\n'), (63, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (65, 'keras.backend.set_session', 'K.set_session', (['session'], {}), True, 'import keras.backend as K\n'), (21, 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[0]', '(True)'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': 'allow_growth', 'per_process_gpu_memory_fraction': 'fraction'}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (30, 'keras.backend.set_session', 'K.set_session', (['session'], {}), True, 'import keras.backend as K\n'), (24, 'tensorflow.config.experimental.VirtualDeviceConfiguration', 'tf.config.experimental.VirtualDeviceConfiguration', ([], {'memory_limit': 'memory_limit'}), True, 'import tensorflow as tf\n')]
Naveen-and-Taishi/DeepCorrect
b1ac17927d91f2010888533e1bed014637aa9453
import os import tensorflow as tf import numpy as np import random import math class Simulator(): def __init__(self, type) -> None: if type == 'D': # deuteranope self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0.494207, 0, 1.24827], [0, 0, 1]]) elif type == 'P': # protanope self.color_matrix = tf.convert_to_tensor([[0, 2.02344, -2.52581], [0, 1, 0], [0, 0, 1]]) elif type == 'T': # tritanope self.color_matrix = tf.convert_to_tensor([[1, 0, 0], [0, 1, 0], [-0.395913, 0.801109, 0]]) else: raise("ERROR: invalid type passed into Simulator class (only accepts 'D', 'P', or 'T')") self.rgb2lms = tf.convert_to_tensor([[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, 0.184309, 1.46709]]) def simulate_image(self, image): # passes an image through the color-blindness simulator inverted_rgb2lms = tf.linalg.inv(self.rgb2lms) product1 = tf.matmul(inverted_rgb2lms, self.color_matrix) product2 = tf.matmul(product1, self.rgb2lms) original_image_shape = image.shape simulated_image = tf.transpose(tf.matmul(product2, tf.reshape(tf.transpose(image, perm=[2, 0, 1]), (image.shape[2], image.shape[0] * image.shape[1]))), perm=[1, 0]) return tf.reshape(simulated_image, original_image_shape)
[ "tensorflow.convert_to_tensor", "tensorflow.linalg.inv", "tensorflow.matmul", "tensorflow.transpose", "tensorflow.reshape" ]
code/simulator.py
[(21, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[17.8824, 43.5161, 4.11935], [3.45565, 27.1554, 3.86714], [0.0299566, \n 0.184309, 1.46709]]'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.linalg.inv', 'tf.linalg.inv', (['self.rgb2lms'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.matmul', 'tf.matmul', (['inverted_rgb2lms', 'self.color_matrix'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.matmul', 'tf.matmul', (['product1', 'self.rgb2lms'], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.reshape', 'tf.reshape', (['simulated_image', 'original_image_shape'], {}), True, 'import tensorflow as tf\n'), (11, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[1, 0, 0], [0.494207, 0, 1.24827], [0, 0, 1]]'], {}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, 2.02344, -2.52581], [0, 1, 0], [0, 0, 1]]'], {}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[1, 0, 0], [0, 1, 0], [-0.395913, 0.801109, 0]]'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.transpose', 'tf.transpose', (['image'], {'perm': '[2, 0, 1]'}), True, 'import tensorflow as tf\n')]
sirCamp/tensorflow-kernels
e3d459406f463bb646e150c3bab89d8410f86f16
import tensorflow as tf from kernels.base import BaseKernel tf.enable_eager_execution() tf.executing_eagerly() import numpy as np __author__ = "Stefano Campese" __version__ = "0.1.2" __maintainer__ = "Stefano Campese" __email__ = "[email protected]" class PSpectrumKernel(BaseKernel): """ P-Spectrum kernel, defined as weighted trasformation of subsequences. usefull for character embedding K(x, y) = = <Φp(x ), Φp(y)>. where: p = the spectrum weight """ def __init__(self, p=2): self._dim = None self._p = p def _compute(self, x, y): self._dim = x._rank() kernel = np.zeros((tf.size(x), tf.size(y))) for l in tf.range(start=0, limit=tf.size(x), delta=1, dtype=None, name='l_range'): for m in tf.range(start=0, limit=tf.size(y), delta=1, dtype=None, name='m_range'): vx = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r) for i in tf.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(z_t[i:i + self._p], '') vz_keys, r = tf.cond( tf.greater(vz.lookup(u), -1), true_fn=lambda: (vz_keys, tf.add(vz.lookup(u), 1)), false_fn=lambda: ( tf.concat([vz_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64)) ) vz.insert(u, r) kk = tf.Variable(0, dtype=tf.int64) for i in tf.range(start=0, limit=tf.size(vx_keys), delta=1, dtype=None, name='range'): for j in tf.range(start=0, limit=tf.size(vz_keys), delta=1, dtype=None, name='range'): to_add = tf.cond( tf.greater(vz.lookup(vx_keys[i]), -1), true_fn=lambda: tf.math.multiply(vx.lookup(vx_keys[i]), vz.lookup(vz_keys[j])), false_fn=lambda: tf.constant(0, dtype=tf.int64) ) kk = tf.math.add(kk, to_add) kernel[l][m] = kk return tf.convert_to_tensor(kernel, dtype=tf.int64) def dim(self): return self._dim
[ "tensorflow.convert_to_tensor", "tensorflow.math.add", "tensorflow.enable_eager_execution", "tensorflow.executing_eagerly", "tensorflow.constant", "tensorflow.range", "tensorflow.Variable", "tensorflow.contrib.lookup.MutableHashTable", "tensorflow.reshape", "tensorflow.string_join", "tensorflow.gather", "tensorflow.string_split", "tensorflow.strings.length", "tensorflow.size" ]
kernels/experimental/p_spectrum_kernel.py
[(5, 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), True, 'import tensorflow as tf\n'), (6, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['kernel'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.size', 'tf.size', (['x'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.size', 'tf.size', (['y'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.size', 'tf.size', (['x'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.contrib.lookup.MutableHashTable', 'tf.contrib.lookup.MutableHashTable', ([], {'key_dtype': 'tf.string', 'value_dtype': 'tf.int64', 'default_value': '(-1)'}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.contrib.lookup.MutableHashTable', 'tf.contrib.lookup.MutableHashTable', ([], {'key_dtype': 'tf.string', 'value_dtype': 'tf.int64', 'default_value': '(-1)'}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.gather', 'tf.gather', (['x', 'l'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.strings.length', 'tf.strings.length', (['x_t'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.gather', 'tf.gather', (['y', 'm'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.strings.length', 'tf.strings.length', (['z_t'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.range', 'tf.range', ([], {'start': '(0)', 'limit': '(x_t_len - self._p + 1)', 'delta': '(1)', 'dtype': 'None', 'name': '"""range"""'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.range', 'tf.range', ([], {'start': '(0)', 'limit': '(z_t_len - self._p + 1)', 'delta': '(1)', 'dtype': 'None', 'name': '"""range"""'}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.size', 'tf.size', (['y'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.Variable', 'tf.Variable', (['[]'], {'collections': '[]', 'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.Variable', 'tf.Variable', (['[]'], {'collections': '[]', 'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.string_split', 'tf.string_split', (['[x_t]'], {'delimiter': '""""""'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.string_split', 'tf.string_split', (['[z_t]'], {'delimiter': '""""""'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.string_join', 'tf.string_join', (['x_t[i:i + self._p]', '""""""'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.string_join', 'tf.string_join', (['z_t[i:i + self._p]', '""""""'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.size', 'tf.size', (['vx_keys'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.math.add', 'tf.math.add', (['kk', 'to_add'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.size', 'tf.size', (['vz_keys'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int64', 'name': '"""constant"""'}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.reshape', 'tf.reshape', (['u', '(-1, 1)'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.reshape', 'tf.reshape', (['u', '(-1, 1)'], {}), True, 'import tensorflow as tf\n')]
teecha/Autonomous_Tello_Drone
b7fd20f3eb830bf5387dba1579c041975348de14
#header to convert outputs of model into boxes, scores, classes, valid import tensorflow as tf import numpy as np def YoloV4Header(num_classes, anchorlist, mask, strides, max_outputs, iou_threshold, score_threshold,inputs): boxes, objects, classes = [], [], [] dtype = inputs[0].dtype for i, logits in enumerate(inputs): print(i,mask[i]) stride = strides[i] anchors = anchorlist[mask[i]] x_shape = tf.shape(logits) logits = tf.reshape(logits, (x_shape[0], x_shape[1], x_shape[2], len(anchors), num_classes + 5)) box_xy, box_wh, obj, cls = tf.split(logits, (2, 2, 1, num_classes), axis=-1) box_xy = tf.sigmoid(box_xy) obj = tf.sigmoid(obj) cls = tf.sigmoid(cls) anchors = anchors.astype(np.float32) grid_shape = x_shape[1:3] # print(grid_shape) grid_h, grid_w = grid_shape[0], grid_shape[1] # print(grid_h,tf.range(grid_h)) grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h)) grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2] box_xy = (box_xy + tf.cast(grid, dtype)) * stride box_wh = tf.exp(box_wh) * anchors box_x1y1 = box_xy - box_wh / 2. box_x2y2 = box_xy + box_wh / 2. box = tf.concat([box_x1y1, box_x2y2], axis=-1) boxes.append(tf.reshape(box, (x_shape[0], -1, 1, 4))) objects.append(tf.reshape(obj, (x_shape[0], -1, 1))) classes.append(tf.reshape(cls, (x_shape[0], -1, num_classes))) boxes = tf.concat(boxes, axis=1) objects = tf.concat(objects, axis=1) classes = tf.concat(classes, axis=1) scores = objects * classes boxes, scores, classes, valid = tf.image.combined_non_max_suppression( boxes=boxes, scores=scores, max_output_size_per_class=max_outputs, max_total_size=max_outputs, iou_threshold=iou_threshold, score_threshold=score_threshold, clip_boxes=False ) return boxes, scores, classes, valid
[ "tensorflow.image.combined_non_max_suppression", "tensorflow.concat", "tensorflow.range", "tensorflow.shape", "tensorflow.stack", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.exp", "tensorflow.cast", "tensorflow.split" ]
Yello/headers.py
[(44, 'tensorflow.concat', 'tf.concat', (['boxes'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.concat', 'tf.concat', (['objects'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.concat', 'tf.concat', (['classes'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.image.combined_non_max_suppression', 'tf.image.combined_non_max_suppression', ([], {'boxes': 'boxes', 'scores': 'scores', 'max_output_size_per_class': 'max_outputs', 'max_total_size': 'max_outputs', 'iou_threshold': 'iou_threshold', 'score_threshold': 'score_threshold', 'clip_boxes': '(False)'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.shape', 'tf.shape', (['logits'], {}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.split', 'tf.split', (['logits', '(2, 2, 1, num_classes)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.sigmoid', 'tf.sigmoid', (['box_xy'], {}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.sigmoid', 'tf.sigmoid', (['obj'], {}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.sigmoid', 'tf.sigmoid', (['cls'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.concat', 'tf.concat', (['[box_x1y1, box_x2y2]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.range', 'tf.range', (['grid_w'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.range', 'tf.range', (['grid_h'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.stack', 'tf.stack', (['grid'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.exp', 'tf.exp', (['box_wh'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.reshape', 'tf.reshape', (['box', '(x_shape[0], -1, 1, 4)'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.reshape', 'tf.reshape', (['obj', '(x_shape[0], -1, 1)'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.reshape', 'tf.reshape', (['cls', '(x_shape[0], -1, num_classes)'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.cast', 'tf.cast', (['grid', 'dtype'], {}), True, 'import tensorflow as tf\n')]
xuzhuang1996/hierarchical_loc
05a1be3d3c6a7f9bf0ff46525f8b4af8878f3e3e
from abc import ABCMeta, abstractmethod import tensorflow as tf import numpy as np from tqdm import tqdm import itertools class Mode: TRAIN = 'train' EVAL = 'eval' PRED = 'pred' class BaseModel(metaclass=ABCMeta): """Base model class. Arguments: data: A dictionary of `tf.data.Dataset` objects, can include the keys `"training"`, `"validation"`, and `"test"`. n_gpus: An integer, the number of GPUs available. data_shape: A dictionary, where the keys are the input features of the prediction network and the values are the associated shapes. Only required if `data` is empty or `None`. config: A dictionary containing the configuration parameters. Entries `"batch_size"` and `"learning_rate"` are required if `data`is given. Models should inherit from this class and implement the following methods: `_model`, `_loss`, and `_metrics`. Additionally, the following static attributes should be defined: input_spec: A dictionary, where the keys are the input features (e.g. `"image"`) and the associated values are dictionaries containing `"shape"` (list of dimensions, e.g. `[N, H, W, C]` where `None` indicates an unconstrained dimension) and `"type"` (e.g. `tf.float32`). required_config_keys: A list containing the required configuration entries. default_config: A dictionary of potential default configuration values. """ dataset_names = set(['training', 'validation', 'test']) required_baseconfig = ['batch_size', 'learning_rate'] _default_config = {'eval_batch_size': 1} @abstractmethod def _model(self, inputs, mode, **config): """Implements the graph of the model. This method is called three times: for training, evaluation and prediction (see the `mode` argument) and can return different tensors depending on the mode. It is a good practice to support both NCHW (channels first) and NHWC (channels last) data formats using a dedicated configuration entry. Arguments: inputs: A dictionary of input features, where the keys are their names (e.g. `"image"`) and the values of type `tf.Tensor`. Same keys as in the datasets given during the object instantiation. mode: An attribute of the `Mode` class, either `Mode.TRAIN`, `Mode.EVAL` or `Mode.PRED`. config: A configuration dictionary, given during the object instantiantion. Returns: A dictionary of outputs, where the keys are their names (e.g. `"logits"`) and the values are the corresponding `tf.Tensor`. """ raise NotImplementedError @abstractmethod def _loss(self, outputs, inputs, **config): """Implements the sub-graph computing the training loss. This method is called on the outputs of the `_model` method in training mode. Arguments: outputs: A dictionary, as retuned by `_model` called with `mode=Mode.TRAIN`. inputs: A dictionary of input features (see same as for `_model`). config: A configuration dictionary. Returns: A tensor corresponding to the loss to be minimized during training. """ raise NotImplementedError @abstractmethod def _metrics(self, outputs, inputs, **config): """Implements the sub-graph computing the evaluation metrics. This method is called on the outputs of the `_model` method in evaluation mode. Arguments: outputs: A dictionary, as retuned by `_model` called with `mode=Mode.EVAL`. inputs: A dictionary of input features (see same as for `_model`). config: A configuration dictionary. Returns: A dictionary of metrics, where the keys are their names (e.g. "`accuracy`") and the values are the corresponding `tf.Tensor`. """ raise NotImplementedError def __init__(self, data={}, n_gpus=1, data_shape=None, **config): self.datasets = data self.data_shape = data_shape self.n_gpus = n_gpus self.graph = tf.get_default_graph() self.name = self.__class__.__name__.lower() # get child name # Update config self.config = self._default_config self.config.update(getattr(self, 'default_config', {})) self.config.update(config) required = getattr(self, 'required_config_keys', []) if self.datasets: required += self.required_baseconfig for r in required: assert r in self.config, 'Required configuration entry: \'{}\''.format(r) assert set(self.datasets) <= self.dataset_names, \ 'Unknown dataset name: {}'.format(set(self.datasets)-self.dataset_names) assert n_gpus > 0, 'TODO: CPU-only training is currently not supported.' if data_shape is None: self.data_shape = {i: s['shape'] for i, s in self.input_spec.items()} with tf.variable_scope('', reuse=tf.AUTO_REUSE): self._build_graph() def _gpu_tower(self, data, mode): # Split the batch between the GPUs (data parallelism) with tf.device('/cpu:0'): with tf.name_scope('{}_data_sharding'.format(mode)): batch_size = self.config['batch_size'] if (mode == Mode.TRAIN) \ else self.config['eval_batch_size'] shards = {d: tf.unstack(v, num=batch_size*self.n_gpus, axis=0) for d, v in data.items()} shards = [{d: tf.stack(v[i::self.n_gpus]) for d, v in shards.items()} for i in range(self.n_gpus)] # Create towers, i.e. copies of the model for each GPU, # with their own loss and gradients. tower_losses = [] tower_gradvars = [] tower_preds = [] tower_metrics = [] for i in range(self.n_gpus): worker = '/gpu:{}'.format(i) device_setter = tf.train.replica_device_setter( worker_device=worker, ps_device='/cpu:0', ps_tasks=1) with tf.name_scope('{}_{}'.format(mode, i)) as scope: with tf.device(device_setter): net_outputs = self._model(shards[i], mode, **self.config) if mode == Mode.TRAIN: loss = self._loss(net_outputs, shards[i], **self.config) loss += tf.reduce_sum( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope)) model_params = tf.trainable_variables() grad = tf.gradients(loss, model_params) tower_losses.append(loss) tower_gradvars.append(zip(grad, model_params)) if i == 0: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope) elif mode == Mode.EVAL: tower_metrics.append(self._metrics( net_outputs, shards[i], **self.config)) else: tower_preds.append(net_outputs) if mode == Mode.TRAIN: return tower_losses, tower_gradvars, update_ops elif mode == Mode.EVAL: return tower_metrics else: return tower_preds def _train_graph(self, data): tower_losses, tower_gradvars, update_ops = self._gpu_tower(data, Mode.TRAIN) # Perform the consolidation on CPU gradvars = [] with tf.device('/cpu:0'): # Average losses and gradients with tf.name_scope('tower_averaging'): all_grads = {} for grad, var in itertools.chain(*tower_gradvars): if grad is not None: all_grads.setdefault(var, []).append(grad) for var, grads in all_grads.items(): if len(grads) == 1: avg_grad = grads[0] else: avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads)) gradvars.append((avg_grad, var)) self.loss = tf.reduce_mean(tower_losses) tf.summary.scalar('loss', self.loss) # Create optimizer ops self.global_step = tf.Variable(0, trainable=False, name='global_step') opt = tf.train.RMSPropOptimizer(self.config['learning_rate']) with tf.control_dependencies(update_ops): self.trainer = opt.apply_gradients( gradvars, global_step=self.global_step) def _eval_graph(self, data): tower_metrics = self._gpu_tower(data, Mode.EVAL) with tf.device('/cpu:0'): self.metrics = {m: tf.reduce_mean(tf.stack([t[m] for t in tower_metrics])) for m in tower_metrics[0]} def _pred_graph(self, data): with tf.name_scope('pred'): with tf.device('/gpu:0'): pred_out = self._model(data, Mode.PRED, **self.config) self.pred_out = {n: tf.identity(p, name=n) for n, p in pred_out.items()} def _build_graph(self): # Training and evaluation network, if tf datasets provided if self.datasets: # Generate iterators for the given tf datasets self.dataset_iterators = {} with tf.device('/cpu:0'): for n, d in self.datasets.items(): if n == 'training': train_batch = self.config['batch_size']*self.n_gpus d = d.repeat().batch(train_batch).prefetch(train_batch) self.dataset_iterators[n] = d.make_one_shot_iterator() else: d = d.batch(self.config['eval_batch_size']*self.n_gpus) self.dataset_iterators[n] = d.make_initializable_iterator() output_types = d.output_types output_shapes = d.output_shapes self.datasets[n] = d # Perform compatibility checks with the inputs of the child model for i, spec in self.input_spec.items(): assert i in output_shapes tf.TensorShape(output_shapes[i]).assert_is_compatible_with( tf.TensorShape(spec['shape'])) # Used for input shapes of the prediction network if self.data_shape is None: self.data_shape = output_shapes # Handle for the feedable iterator self.handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle( self.handle, output_types, output_shapes) data = iterator.get_next() # Build the actual training and evaluation models self._train_graph(data) self._eval_graph(data) self.summaries = tf.summary.merge_all() # Prediction network with feed_dict self.pred_in = {i: tf.placeholder(self.input_spec[i]['type'], shape=s, name=i) for i, s in self.data_shape.items()} self._pred_graph(self.pred_in) # Start session sess_config = tf.ConfigProto(device_count={'GPU': self.n_gpus}) sess_config.gpu_options.allow_growth = True self.sess = tf.Session(config=sess_config) # Register tf dataset handles if self.datasets: self.dataset_handles = {} for n, i in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()]) def train(self, iterations, validation_interval=100, output_dir=None, save_interval=None, checkpoint_path=None, keep_checkpoints=1): assert 'training' in self.datasets, 'Training dataset is required.' if output_dir is not None: train_writer = tf.summary.FileWriter(output_dir) if not hasattr(self, 'saver'): with tf.device('/cpu:0'): self.saver = tf.train.Saver(save_relative_paths=True, max_to_keep=keep_checkpoints) if not self.graph.finalized: self.graph.finalize() tf.logging.info('Start training') for i in range(iterations): loss, summaries, _ = self.sess.run( [self.loss, self.summaries, self.trainer], feed_dict={self.handle: self.dataset_handles['training']}) if save_interval and checkpoint_path and i != 0 and i % save_interval == 0: self.save(checkpoint_path) if 'validation' in self.datasets and i % validation_interval == 0: metrics = self.evaluate('validation', mute=True) tf.logging.info( 'Iter {:4d}: loss {:.4f}'.format(i, loss) + ''.join([', {} {:.4f}'.format(m, metrics[m]) for m in metrics])) if output_dir is not None: train_writer.add_summary(summaries, i) metrics_summaries = tf.Summary(value=[ tf.Summary.Value(tag=m, simple_value=v) for m, v in metrics.items()]) train_writer.add_summary(metrics_summaries, i) tf.logging.info('Training finished') def predict(self, data, keys='*', batch=False): assert set(data.keys()) >= set(self.data_shape.keys()) if isinstance(keys, str): if keys == '*': op = self.pred_out # just gather all outputs else: op = self.pred_out[keys] else: op = {k: self.pred_out[k] for k in keys} if not batch: # add batch dimension data = {d: [v] for d, v in data.items()} feed = {self.pred_in[i]: data[i] for i in self.data_shape} pred = self.sess.run(op, feed_dict=feed) if not batch: # remove batch dimension if isinstance(pred, dict): pred = {p: v[0] for p, v in pred.items()} else: pred = pred[0] return pred def evaluate(self, dataset, max_iterations=None, mute=False): assert dataset in self.datasets self.sess.run(self.dataset_iterators[dataset].initializer) if not mute: tf.logging.info('Starting evaluation of dataset \'{}\''.format(dataset)) if max_iterations: pbar = tqdm(total=max_iterations, ascii=True) i = 0 metrics = [] while True: try: metrics.append(self.sess.run(self.metrics, feed_dict={self.handle: self.dataset_handles[dataset]})) except tf.errors.OutOfRangeError: break if max_iterations: i += 1 if not mute: pbar.update(1) if i == max_iterations: break if not mute: tf.logging.info('Finished evaluation') if max_iterations: pbar.close() # List of dicts to dict of lists metrics = dict(zip(metrics[0], zip(*[m.values() for m in metrics]))) metrics = {m: np.nanmean(metrics[m], axis=0) for m in metrics} return metrics def _checkpoint_var_search(self, checkpoint_path): reader = tf.train.NewCheckpointReader(checkpoint_path) saved_shapes = reader.get_variable_to_shape_map() model_names = tf.model_variables() # Used by tf.slim layers if not len(tf.model_variables()): model_names = tf.global_variables() # Fallback when slim is not used model_names = set([v.name.split(':')[0] for v in model_names]) checkpoint_names = set(saved_shapes.keys()) found_names = model_names & checkpoint_names missing_names = model_names - checkpoint_names shape_conflicts = set() restored = [] with tf.variable_scope('', reuse=True): for name in found_names: # print(tf.global_variables()) # print(name, name in model_names, name in checkpoint_names) var = tf.get_variable(name) var_shape = var.get_shape().as_list() if var_shape == saved_shapes[name]: restored.append(var) else: shape_conflicts.add(name) found_names -= shape_conflicts return (restored, sorted(found_names), sorted(missing_names), sorted(shape_conflicts)) def load(self, checkpoint_path, flexible_restore=True): if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) if checkpoint_path is None: raise ValueError('Checkpoint directory is empty.') if flexible_restore: var_list, found, missing, conflicts = self._checkpoint_var_search( checkpoint_path) tf.logging.info('Restoring variables: \n\t{}'.format( '\n\t'.join(found))) if len(missing) > 0: tf.logging.info('Variables not found in checkpoint: \n\t{}'.format( '\n\t'.join(missing))) if len(conflicts) > 0: tf.logging.info('Variables with incompatible shapes: \n\t{}'.format( '\n\t'.join(conflicts))) else: var_list = None with tf.device('/cpu:0'): saver = tf.train.Saver(var_list=var_list, save_relative_paths=True) saver.restore(self.sess, checkpoint_path) def save(self, checkpoint_path): step = self.sess.run(self.global_step) tf.logging.info('Saving checkpoint for iteration #{}'.format(step)) self.saver.save(self.sess, checkpoint_path, write_meta_graph=False, global_step=step) def close(self): self.sess.close() def __enter__(self): return self def __exit__(self, *args): self.close()
[ "tensorflow.device", "tensorflow.get_variable", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.global_variables", "numpy.nanmean", "tensorflow.get_default_graph", "tensorflow.gfile.IsDirectory", "tensorflow.summary.scalar", "tensorflow.add_n", "tensorflow.Variable", "tensorflow.data.Iterator.from_string_handle", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.model_variables", "tensorflow.ConfigProto", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.trainable_variables", "tensorflow.TensorShape", "tensorflow.train.RMSPropOptimizer", "tensorflow.unstack", "tensorflow.identity", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.NewCheckpointReader", "tensorflow.logging.info", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.train.latest_checkpoint", "tensorflow.local_variables_initializer", "tensorflow.reduce_mean", "tensorflow.train.replica_device_setter", "tensorflow.Summary.Value", "tensorflow.variable_scope" ]
retrievalnet/retrievalnet/models/base_model.py
[(101, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (258, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': self.n_gpus}"}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_config'}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.logging.info', 'tf.logging.info', (['"""Start training"""'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.logging.info', 'tf.logging.info', (['"""Training finished"""'], {}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.model_variables', 'tf.model_variables', ([], {}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.variable_scope', 'tf.variable_scope', (['""""""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'worker_device': 'worker', 'ps_device': '"""/cpu:0"""', 'ps_tasks': '(1)'}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (["self.config['learning_rate']"], {}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.name_scope', 'tf.name_scope', (['"""pred"""'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.identity', 'tf.identity', (['p'], {'name': 'n'}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.placeholder', 'tf.placeholder', (["self.input_spec[i]['type']"], {'shape': 's', 'name': 'i'}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['output_dir'], {}), True, 'import tensorflow as tf\n'), (348, 'tensorflow.logging.info', 'tf.logging.info', (['"""Finished evaluation"""'], {}), True, 'import tensorflow as tf\n'), (354, 'numpy.nanmean', 'np.nanmean', (['metrics[m]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (362, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.variable_scope', 'tf.variable_scope', (['""""""'], {'reuse': '(True)'}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_path'], {}), True, 'import tensorflow as tf\n'), (401, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list', 'save_relative_paths': '(True)'}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.name_scope', 'tf.name_scope', (['"""tower_averaging"""'], {}), True, 'import tensorflow as tf\n'), (182, 'itertools.chain', 'itertools.chain', (['*tower_gradvars'], {}), False, 'import itertools\n'), (191, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_losses'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'shape': '[]'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.data.Iterator.from_string_handle', 'tf.data.Iterator.from_string_handle', (['self.handle', 'output_types', 'output_shapes'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'save_relative_paths': '(True)', 'max_to_keep': 'keep_checkpoints'}), True, 'import tensorflow as tf\n'), (332, 'tqdm.tqdm', 'tqdm', ([], {'total': 'max_iterations', 'ascii': '(True)'}), False, 'from tqdm import tqdm\n'), (361, 'tensorflow.model_variables', 'tf.model_variables', ([], {}), True, 'import tensorflow as tf\n'), (373, 'tensorflow.get_variable', 'tf.get_variable', (['name'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.unstack', 'tf.unstack', (['v'], {'num': '(batch_size * self.n_gpus)', 'axis': '(0)'}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.device', 'tf.device', (['device_setter'], {}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.stack', 'tf.stack', (['[t[m] for t in tower_metrics]'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.stack', 'tf.stack', (['v[i::self.n_gpus]'], {}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.gradients', 'tf.gradients', (['loss', 'model_params'], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES', 'scope'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'scope'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.add_n', 'tf.add_n', (['grads'], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.TensorShape', 'tf.TensorShape', (["spec['shape']"], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.TensorShape', 'tf.TensorShape', (['output_shapes[i]'], {}), True, 'import tensorflow as tf\n'), (300, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'm', 'simple_value': 'v'}), True, 'import tensorflow as tf\n')]
shawwn/mesh
9625f34e00a201775249ddb887529da859aa83a8
# coding=utf-8 # Copyright 2021 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Distributed variable implementation for TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_resource_variable_ops try: from tensorflow.python.types import core # pylint:disable=g-import-not-at-top,g-direct-tensorflow-import TF_23 = True except ImportError: TF_23 = False if TF_23: VariableBase = core.Tensor else: VariableBase = object @contextlib.contextmanager def _handle_graph(handle): with handle.graph.as_default(): yield def _enclosing_tpu_context(): # pylint: disable=protected-access context = ops.get_default_graph()._get_control_flow_context() # pylint: enable=protected-access while context is not None and not isinstance( context, control_flow_ops.XLAControlFlowContext): context = context.outer_context return context class ReplicatedVariable(VariableBase): """A replicated variable for use on TPUs. When accessed inside a tpu.replicate() context, this variable acts as if it is a single variable whose handle is a replicated input to the computation. Outside a tpu.replicate() context currently this object has pretty murky semantics, especially with respect to things such as * initialization * colocation. TODO(phawkins): merge this with the TPU DistributionStrategy code. """ def __init__(self, name, variables): self._name = name self._primary_var = variables[0] self._vars = variables self._cached_value = None self._dtype = variables[0].dtype @property def handle(self): tpu_context = _enclosing_tpu_context() if tpu_context is None: return self._primary_var.handle return tpu_context.get_replicated_var_handle(self._name, self._vars) @contextlib.contextmanager def _assign_dependencies(self): """Makes assignments depend on the cached value, if any. This prevents undefined behavior with reads not ordered wrt writes. Yields: None. """ if self._cached_value is not None: with ops.control_dependencies([self._cached_value]): yield else: yield @property def initializer(self): return control_flow_ops.group([v.initializer for v in self._vars]) @property def graph(self): return self._primary_var.graph @property def _shared_name(self): return self._common_name @property def _unique_id(self): return self._primary_var._unique_id # pylint: disable=protected-access @property def name(self): return self._name @property def dtype(self): return self._primary_var.dtype @property def shape(self): return self._primary_var.shape def get_shape(self): return self._primary_var.get_shape() def to_proto(self, export_scope=None): return self._primary_var.to_proto(export_scope=export_scope) @property def constraint(self): return None @property def op(self): return self.get().op def _read_variable_op(self): if _enclosing_tpu_context() is None: return self._primary_var.read_value() v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) return v def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): value_tensor = ops.convert_to_tensor(value, dtype=self.dtype) assign_op = gen_resource_variable_ops.assign_variable_op( self.handle, value_tensor, name=name) if read_value: return self._read_variable_op() return assign_op def assign_add(self, delta, use_locking=None, name=None, read_value=True): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): assign_add_op = gen_resource_variable_ops.assign_add_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value: return self._read_variable_op() return assign_add_op def assign_sub(self, delta, use_locking=None, name=None, read_value=True): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name) if read_value: return self._read_variable_op() return assign_sub_op def get(self): return self._primary_var @property def _in_graph_mode(self): return self._primary_var._in_graph_mode # pylint: disable=protected-access def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts a variable to a tensor.""" # pylint: disable=protected-access if _enclosing_tpu_context() is None: if hasattr(self._primary_var, '_dense_var_to_tensor'): return self._primary_var._dense_var_to_tensor(dtype, name, as_ref) else: return ops.convert_to_tensor(self._primary_var) # pylint: enable=protected-access if dtype is not None and dtype != self.dtype: return NotImplemented if as_ref: return self.handle else: return self.read_value() # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. def _tensor_conversion(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion) if not TF_23: ops.register_dense_tensor_like_type(ReplicatedVariable)
[ "tensorflow.python.ops.gen_resource_variable_ops.read_variable_op", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.gen_resource_variable_ops.assign_variable_op", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.register_dense_tensor_like_type", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.framework.ops.register_tensor_conversion_function" ]
mesh_tensorflow/tpu_variables.py
[(218, 'tensorflow.python.framework.ops.register_tensor_conversion_function', 'ops.register_tensor_conversion_function', (['ReplicatedVariable', '_tensor_conversion'], {}), False, 'from tensorflow.python.framework import ops\n'), (221, 'tensorflow.python.framework.ops.register_dense_tensor_like_type', 'ops.register_dense_tensor_like_type', (['ReplicatedVariable'], {}), False, 'from tensorflow.python.framework import ops\n'), (103, 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['[v.initializer for v in self._vars]'], {}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (146, 'tensorflow.python.ops.gen_resource_variable_ops.read_variable_op', 'gen_resource_variable_ops.read_variable_op', (['self.handle', 'self._dtype'], {}), False, 'from tensorflow.python.ops import gen_resource_variable_ops\n'), (49, 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (155, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['value'], {'dtype': 'self.dtype'}), False, 'from tensorflow.python.framework import ops\n'), (156, 'tensorflow.python.ops.gen_resource_variable_ops.assign_variable_op', 'gen_resource_variable_ops.assign_variable_op', (['self.handle', 'value_tensor'], {'name': 'name'}), False, 'from tensorflow.python.ops import gen_resource_variable_ops\n'), (96, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[self._cached_value]'], {}), False, 'from tensorflow.python.framework import ops\n'), (167, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['delta'], {'dtype': 'self.dtype'}), False, 'from tensorflow.python.framework import ops\n'), (178, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['delta'], {'dtype': 'self.dtype'}), False, 'from tensorflow.python.framework import ops\n'), (202, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._primary_var'], {}), False, 'from tensorflow.python.framework import ops\n')]
Joranson/modifiedTF
bbf3d1c16ef2b1e8d3e1add9fe07dd07d52206da
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Monitors allow user instrumentation of the training process. Monitors are useful to track training, report progress, request early stopping and more. Monitors use the observer pattern and notify at the following points: * when training begins * before a training step * after a training step * when training ends Monitors are not intended to be reusable. There are a few pre-defined monitors: * `CaptureVariable`: saves a variable's values * `GraphDump`: intended for debug only - saves all tensor values * `PrintTensor`: outputs one or more tensor values to log * `SummarySaver`: saves summaries to a summary writer * `ValidationMonitor`: runs model validation, by periodically calculating eval metrics on a separate data set; supports optional early stopping For more specific needs, you can create custom monitors by extending one of the following classes: * `BaseMonitor`: the base class for all monitors * `EveryN`: triggers a callback every N training steps Example: ```python class ExampleMonitor(monitors.BaseMonitor): def __init__(self): print 'Init' def begin(self, max_steps): print 'Starting run. Will train until step %d.' % max_steps def end(self): print 'Completed run.' def step_begin(self, step): print 'About to run step %d...' % step return ['loss_1:0'] def step_end(self, step, outputs): print 'Done running step %d. The value of "loss" tensor: %s' % ( step, outputs['loss_1:0']) linear_regressor = LinearRegressor() example_monitor = ExampleMonitor() linear_regressor.fit( x, y, steps=2, batch_size=1, monitors=[example_monitor]) ``` ## Ops @@get_default_monitors @@BaseMonitor @@CaptureVariable @@CheckpointSaver @@EveryN @@ExportMonitor @@GraphDump @@LoggingTrainable @@NanLoss @@PrintTensor @@StepCounter @@StopAtStep @@SummarySaver @@ValidationMonitor """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect import os import time import numpy as np import six from tensorflow.contrib.framework import deprecated_arg_values from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.learn.python.learn import session_run_hook from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.util.event_pb2 import SessionLog from tensorflow.python.framework import ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import saver as saver_lib from tensorflow.python.training import summary_io from tensorflow.python.util import deprecation # TODO(ptucker): Split each monitor class into a separate file. # TODO(ptucker): Fail if epoch or step does not monotonically increase? class BaseMonitor(object): """Base class for Monitors. Defines basic interfaces of Monitors. Monitors can either be run on all workers or, more commonly, restricted to run exclusively on the elected chief worker. """ @deprecation.deprecated( "2016-12-05", "Monitors are deprecated. Please use tf.train.SessionRunHook.") def __init__(self): self._begun = False self._current_epoch = None self._current_step = None self._max_steps = None self._estimator = None @property def run_on_all_workers(self): return False def set_estimator(self, estimator): """A setter called automatically by the target estimator. If the estimator is locked, this method does nothing. Args: estimator: the estimator that this monitor monitors. Raises: ValueError: if the estimator is None. """ if estimator is None: raise ValueError("Missing estimator.") # TODO(mdan): This should fail if called twice with the same estimator. self._estimator = estimator def begin(self, max_steps=None): """Called at the beginning of training. When called, the default graph is the one we are executing. Args: max_steps: `int`, the maximum global step this training will run until. Raises: ValueError: if we've already begun a run. """ if self._begun: raise ValueError("begin called twice without end.") self._max_steps = max_steps self._begun = True def end(self, session=None): """Callback at the end of training/evaluation. Args: session: A `tf.Session` object that can be used to run ops. Raises: ValueError: if we've not begun a run. """ _ = session if not self._begun: raise ValueError("end called without begin.") self._max_steps = None self._begun = False def epoch_begin(self, epoch): """Begin epoch. Args: epoch: `int`, the epoch number. Raises: ValueError: if we've already begun an epoch, or `epoch` < 0. """ if self._current_epoch is not None: raise ValueError("epoch_begin called twice without epoch_end.") if epoch < 0: raise ValueError("Invalid epoch %s." % epoch) self._current_epoch = epoch def epoch_end(self, epoch): """End epoch. Args: epoch: `int`, the epoch number. Raises: ValueError: if we've not begun an epoch, or `epoch` number does not match. """ if self._current_epoch != epoch: raise ValueError( "epoch_end expected %s but got %s.", self._current_epoch, epoch) self._current_epoch = None def step_begin(self, step): """Callback before training step begins. You may use this callback to request evaluation of additional tensors in the graph. Args: step: `int`, the current value of the global step. Returns: List of `Tensor` objects or string tensor names to be run. Raises: ValueError: if we've already begun a step, or `step` < 0, or `step` > `max_steps`. """ if (step < 0) or ( (self._max_steps is not None) and (step > self._max_steps)): raise ValueError("Invalid step %s." % step) self._current_step = step return [] def step_end(self, step, output): # pylint: disable=unused-argument """Callback after training step finished. This callback provides access to the tensors/ops evaluated at this step, including the additional tensors for which evaluation was requested in `step_begin`. In addition, the callback has the opportunity to stop training by returning `True`. This is useful for early stopping, for example. Note that this method is not called if the call to `Session.run()` that followed the last call to `step_begin()` failed. Args: step: `int`, the current value of the global step. output: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`. True if training should stop. Raises: ValueError: if we've not begun a step, or `step` number does not match. """ if self._current_step != step: raise ValueError( "step_end expected %s but got %s.", self._current_step, step) self._current_step = None return False def post_step(self, step, session): # pylint: disable=unused-argument """Callback after the step is finished. Called after step_end and receives session to perform extra session.run calls. If failure occurred in the process, will be called as well. Args: step: `int`, global step of the model. session: `Session` object. """ _ = step, session def _extract_output(outputs, request): if request in outputs: return outputs[request] return outputs[request.name] class EveryN(BaseMonitor): """Base class for monitors that execute callbacks every N steps. This class adds three new callbacks: - every_n_step_begin - every_n_step_end - every_n_post_step The callbacks are executed every n steps, or optionally every step for the first m steps, where m and n can both be user-specified. When extending this class, note that if you wish to use any of the `BaseMonitor` callbacks, you must call their respective super implementation: def step_begin(self, step): super(ExampleMonitor, self).step_begin(step) return [] Failing to call the super implementation will cause unpredictable behavior. The `every_n_post_step()` callback is also called after the last step if it was not already called through the regular conditions. Note that `every_n_step_begin()` and `every_n_step_end()` do not receive that special treatment. """ # TODO(ipolosukhin): Add also every n seconds. def __init__(self, every_n_steps=100, first_n_steps=1): """Initializes an `EveryN` monitor. Args: every_n_steps: `int`, the number of steps to allow between callbacks. first_n_steps: `int`, specifying the number of initial steps during which the callbacks will always be executed, regardless of the value of `every_n_steps`. Note that this value is relative to the global step """ super(EveryN, self).__init__() self._every_n_steps = every_n_steps self._first_n_steps = first_n_steps # Last step in the model. self._last_successful_step = None # Last step at which we called one of the every_n methods self._last_active_step = 0 self._every_n_step_begin_called = False def every_n_step_begin(self, step): # pylint: disable=unused-argument """Callback before every n'th step begins. Args: step: `int`, the current value of the global step. Returns: A `list` of tensors that will be evaluated at this step. """ return [] def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument """Callback after every n'th step finished. This callback provides access to the tensors/ops evaluated at this step, including the additional tensors for which evaluation was requested in `step_begin`. In addition, the callback has the opportunity to stop training by returning `True`. This is useful for early stopping, for example. Args: step: `int`, the current value of the global step. outputs: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`. True if training should stop. """ return False def every_n_post_step(self, step, session): """Callback after a step is finished or `end()` is called. Args: step: `int`, the current value of the global step. session: `Session` object. """ pass def step_begin(self, step): """Overrides `BaseMonitor.step_begin`. When overriding this method, you must call the super implementation. Args: step: `int`, the current value of the global step. Returns: A `list`, the result of every_n_step_begin, if that was called this step, or an empty list otherwise. Raises: ValueError: if called more than once during a step. """ super(EveryN, self).step_begin(step) if (step <= self._first_n_steps or step >= (self._every_n_steps + self._last_active_step) or step == self._max_steps): # Note: max_steps can be None here. self._every_n_step_begin_called = True return self.every_n_step_begin(step) self._every_n_step_begin_called = False return [] def step_end(self, step, output): """Overrides `BaseMonitor.step_end`. When overriding this method, you must call the super implementation. Args: step: `int`, the current value of the global step. output: `dict` mapping `string` values representing tensor names to the value resulted from running these tensors. Values may be either scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors. Returns: `bool`, the result of every_n_step_end, if that was called this step, or `False` otherwise. """ super(EveryN, self).step_end(step, output) if self._every_n_step_begin_called: return self.every_n_step_end(step, output) return False def post_step(self, step, session): super(EveryN, self).post_step(step, session) if self._every_n_step_begin_called: self.every_n_post_step(step, session) self._last_active_step = step self._last_successful_step = step def end(self, session=None): super(EveryN, self).end(session=session) if self._last_successful_step != self._last_active_step: self.every_n_post_step(self._last_successful_step, session) class StopAtStep(BaseMonitor): """Monitor to request stop at a specified step.""" def __init__(self, num_steps=None, last_step=None): """Create a StopAtStep monitor. This monitor requests stop after either a number of steps have been executed or a last step has been reached. Only of the two options can be specified. if `num_steps` is specified, it indicates the number of steps to execute after `begin()` is called. If instead `last_step` is specified, it indicates the last step we want to execute, as passed to the `step_begin()` call. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ super(StopAtStep, self).__init__() if num_steps is None and last_step is None: raise ValueError("One of num_steps or last_step must be specified.") if num_steps is not None and last_step is not None: raise ValueError("Only one of num_steps or last_step can be specified.") self._num_steps = num_steps self._last_step = last_step @property def run_on_all_workers(self): return True def step_begin(self, step): super(StopAtStep, self).step_begin(step) if self._last_step is None: self._last_step = step + self._num_steps - 1 return [] def step_end(self, step, output): super(StopAtStep, self).step_end(step, output) return step >= self._last_step # TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout. class PrintTensor(EveryN): """Prints given tensors every N steps. This is an `EveryN` monitor and has consistent semantic for `every_n` and `first_n`. The tensors will be printed to the log, with `INFO` severity. """ def __init__(self, tensor_names, every_n=100, first_n=1): """Initializes a PrintTensor monitor. Args: tensor_names: `dict` of tag to tensor names or `iterable` of tensor names (strings). every_n: `int`, print every N steps. See `PrintN.` first_n: `int`, also print the first N steps. See `PrintN.` """ super(PrintTensor, self).__init__(every_n, first_n) if not isinstance(tensor_names, dict): tensor_names = {item: item for item in tensor_names} self._tensor_names = tensor_names def every_n_step_begin(self, step): super(PrintTensor, self).every_n_step_begin(step) return list(self._tensor_names.values()) def every_n_step_end(self, step, outputs): super(PrintTensor, self).every_n_step_end(step, outputs) stats = [] for tag, tensor_name in six.iteritems(self._tensor_names): if tensor_name in outputs: stats.append("%s = %s" % (tag, str(_extract_output(outputs, tensor_name)))) logging.info("Step %d: %s", step, ", ".join(stats)) class LoggingTrainable(EveryN): """Writes trainable variable values into log every N steps. Write the tensors in trainable variables `every_n` steps, starting with the `first_n`th step. """ def __init__(self, scope=None, every_n=100, first_n=1): """Initializes LoggingTrainable monitor. Args: scope: An optional string to match variable names using re.match. every_n: Print every N steps. first_n: Print first N steps. """ super(LoggingTrainable, self).__init__(every_n, first_n) self._scope = scope def every_n_step_begin(self, step): super(LoggingTrainable, self).every_n_step_begin(step) # Get a list of trainable variables at the begining of every N steps. # We cannot get this in __init__ because train_op has not been generated. trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=self._scope) self._names = {} for var in trainables: self._names[var.name] = var.value().name return list(self._names.values()) def every_n_step_end(self, step, outputs): super(LoggingTrainable, self).every_n_step_end(step, outputs) stats = [] for tag, tensor_name in six.iteritems(self._names): if tensor_name in outputs: stats.append("%s = %s" % (tag, str(_extract_output(outputs, tensor_name)))) logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats)) class SummarySaver(EveryN): """Saves summaries every N steps.""" def __init__(self, summary_op, save_steps=100, output_dir=None, summary_writer=None, scaffold=None): """Initializes a `SummarySaver` monitor. Args: summary_op: `Tensor` of type `string`. A serialized `Summary` protocol buffer, as output by TF summary methods like `summary.scalar` or `summary.merge_all`. save_steps: `int`, save summaries every N steps. See `EveryN`. output_dir: `string`, the directory to save the summaries to. Only used if no `summary_writer` is supplied. summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed, one will be created accordingly. scaffold: `Scaffold` to get summary_op if it's not provided. """ # TODO(ipolosukhin): Implement every N seconds. super(SummarySaver, self).__init__(every_n_steps=save_steps) self._summary_op = summary_op self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = summary_io.SummaryWriter(output_dir) self._scaffold = scaffold # TODO(mdan): Throw an error if output_dir and summary_writer are None. def set_estimator(self, estimator): super(SummarySaver, self).set_estimator(estimator) # TODO(mdan): This line looks redundant. if self._summary_writer is None: self._summary_writer = summary_io.SummaryWriter(estimator.model_dir) def every_n_step_begin(self, step): super(SummarySaver, self).every_n_step_begin(step) if self._summary_op is None and self._scaffold is not None: self._summary_op = self._scaffold.summary_op if self._summary_op is not None: return [self._summary_op] return [] def every_n_step_end(self, step, outputs): super(SummarySaver, self).every_n_step_end(step, outputs) if self._summary_op is not None: summary_strs = _extract_output(outputs, self._summary_op) if self._summary_writer: self._summary_writer.add_summary(summary_strs, step) return False def end(self, session=None): super(SummarySaver, self).end(session=session) if self._summary_writer: self._summary_writer.flush() class ValidationMonitor(EveryN): """Runs evaluation of a given estimator, at most every N steps. Note that the evaluation is done based on the saved checkpoint, which will usually be older than the current step. Can do early stopping on validation metrics if `early_stopping_rounds` is provided. """ def __init__(self, x=None, y=None, input_fn=None, batch_size=None, eval_steps=None, every_n_steps=100, metrics=None, early_stopping_rounds=None, early_stopping_metric="loss", early_stopping_metric_minimize=True, name=None): """Initializes a ValidationMonitor. Args: x: See `BaseEstimator.evaluate`. y: See `BaseEstimator.evaluate`. input_fn: See `BaseEstimator.evaluate`. batch_size: See `BaseEstimator.evaluate`. eval_steps: See `BaseEstimator.evaluate`. every_n_steps: Check for new checkpoints to evaluate every N steps. If a new checkpoint is found, it is evaluated. See `EveryN`. metrics: See `BaseEstimator.evaluate`. early_stopping_rounds: `int`. If the metric indicated by `early_stopping_metric` does not change according to `early_stopping_metric_minimize` for this many steps, then training will be stopped. early_stopping_metric: `string`, name of the metric to check for early stopping. early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is expected to decrease (thus early stopping occurs when this metric stops decreasing), False if `early_stopping_metric` is expected to increase. Typically, `early_stopping_metric_minimize` is True for loss metrics like mean squared error, and False for performance metrics like accuracy. name: See `BaseEstimator.evaluate`. Raises: ValueError: If both x and input_fn are provided. """ super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps, first_n_steps=-1) # TODO(mdan): Checks like this are already done by evaluate. if x is None and input_fn is None: raise ValueError("Either x or input_fn should be provided.") self.x = x self.y = y self.input_fn = input_fn self.batch_size = batch_size self.eval_steps = eval_steps self.metrics = metrics self.early_stopping_rounds = early_stopping_rounds self.early_stopping_metric = early_stopping_metric self.early_stopping_metric_minimize = early_stopping_metric_minimize self.name = name self._best_value_step = None self._best_value = None self._early_stopped = False self._latest_path = None self._latest_path_step = None @property def early_stopped(self): """Returns True if this monitor caused an early stop.""" return self._early_stopped @property def best_step(self): """Returns the step at which the best early stopping metric was found.""" return self._best_value_step @property def best_value(self): """Returns the best early stopping metric value found so far.""" return self._best_value def every_n_step_end(self, step, outputs): super(ValidationMonitor, self).every_n_step_end(step, outputs) # TODO(mdan): The use of step below is probably misleading. # The code should probably use the step from the checkpoint, because # that's what is being evaluated. if self._estimator is None: raise ValueError("Missing call to set_estimator.") # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False if latest_path is not None and latest_path == self._latest_path: logging.debug("Skipping evaluation due to same checkpoint %s for step %d " "as for step %d.", latest_path, step, self._latest_path_step) return False self._latest_path = latest_path self._latest_path_step = step # Run evaluation and log it. validation_outputs = self._estimator.evaluate( x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size, steps=self.eval_steps, metrics=self.metrics, name=self.name) stats = [] for name in validation_outputs: stats.append("%s = %s" % (name, str(validation_outputs[name]))) logging.info("Validation (step %d): %s", step, ", ".join(stats)) # Early stopping logic. if self.early_stopping_rounds is not None: if self.early_stopping_metric not in validation_outputs: raise ValueError("Metric %s missing from outputs %s." % ( self.early_stopping_metric, set(validation_outputs.keys()))) current_value = validation_outputs[self.early_stopping_metric] if (self._best_value is None or (self.early_stopping_metric_minimize and (current_value < self._best_value)) or (not self.early_stopping_metric_minimize and (current_value > self._best_value))): self._best_value = current_value self._best_value_step = step stop_now = (step - self._best_value_step >= self.early_stopping_rounds) if stop_now: logging.info("Stopping. Best step: {} with {} = {}." .format(self._best_value_step, self.early_stopping_metric, self._best_value)) self._early_stopped = True return True return False # TODO(ptucker): This really reads any tensor, not just vars, and requires the # ':0' suffix on var_name. class CaptureVariable(EveryN): """Captures a variable's values into a collection. This monitor is useful for unit testing. You should exercise caution when using this monitor in production, since it never discards values. This is an `EveryN` monitor and has consistent semantic for `every_n` and `first_n`. """ def __init__(self, var_name, every_n=100, first_n=1): """Initializes a CaptureVariable monitor. Args: var_name: `string`. The variable name, including suffix (typically ":0"). every_n: `int`, print every N steps. See `PrintN.` first_n: `int`, also print the first N steps. See `PrintN.` """ super(CaptureVariable, self).__init__(every_n, first_n) self._var_name = var_name self._var_values = {} @property def values(self): """Returns the values captured so far. Returns: `dict` mapping `int` step numbers to that values of the variable at the respective step. """ return self._var_values def every_n_step_begin(self, step): super(CaptureVariable, self).every_n_step_begin(step) return [self._var_name] def every_n_step_end(self, step, outputs): super(CaptureVariable, self).every_n_step_end(step, outputs) self._var_values[step] = _extract_output(outputs, self._var_name) def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100, output_dir=None, summary_writer=None): """Returns a default set of typically-used monitors. Args: loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor` at the default interval. summary_op: See `SummarySaver`. save_summary_steps: See `SummarySaver`. output_dir: See `SummarySaver`. summary_writer: See `SummarySaver`. Returns: `list` of monitors. """ monitors = [] if loss_op is not None: monitors.append(PrintTensor(tensor_names={"loss": loss_op.name})) if summary_op is not None: monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps, output_dir=output_dir, summary_writer=summary_writer)) return monitors class GraphDump(BaseMonitor): """Dumps almost all tensors in the graph at every step. Note, this is very expensive, prefer `PrintTensor` in production. """ IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder", "RandomUniform", "Cast", "RestoreSlice"] def __init__(self, ignore_ops=None): """Initializes GraphDump monitor. Args: ignore_ops: `list` of `string`. Names of ops to ignore. If None, `GraphDump.IGNORE_OPS` is used. """ super(GraphDump, self).__init__() self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS self._data = {} def begin(self, max_steps=None): super(GraphDump, self).begin(max_steps=max_steps) self._tensors = [] graph = ops.get_default_graph() graph_def = graph.as_graph_def() for node in graph_def.node: if node.op in self._ignore_ops: continue logging.info("op=%s name=%s.", node.op, node.name) try: self._tensors.append(graph.get_tensor_by_name(node.name + ":0")) except KeyError: pass def step_begin(self, step): super(GraphDump, self).step_begin(step) return self._tensors def step_end(self, step, output): super(GraphDump, self).step_end(step, output) self._data[step] = output @property def data(self): return self._data # TODO(ptucker): Handle keys that are in one but not the other. def compare(self, other_dump, step, atol=1e-06): """Compares two `GraphDump` monitors and returns differences. Args: other_dump: Another `GraphDump` monitor. step: `int`, step to compare on. atol: `float`, absolute tolerance in comparison of floating arrays. Returns: Returns tuple: matched: `list` of keys that matched. non_matched: `dict` of keys to tuple of 2 mismatched values. Raises: ValueError: if a key in `data` is missing from `other_dump` at `step`. """ non_matched = {} matched = [] this_output = self.data[step] if step in self.data else {} other_output = other_dump.data[step] if step in other_dump.data else {} for key in this_output: if not isinstance(key, str) and not isinstance(key, unicode): continue if key not in other_output: raise ValueError("%s missing at step %s.", (key, step)) value1 = _extract_output(this_output, key) value2 = _extract_output(other_output, key) if isinstance(value1, str): continue if isinstance(value1, np.ndarray): if not np.allclose(value1, value2, atol=atol): non_matched[key] = value1 - value2 else: matched.append(key) else: if value1 != value2: non_matched[key] = (value1, value2) else: matched.append(key) return matched, non_matched class ExportMonitor(EveryN): """Monitor that exports Estimator every N steps.""" # TODO(philstahlfeld): Investigate switching export.export_estimator # configuration values to **kwargs so that updates to the export_estimator # function don't have to be reflected here. @deprecated_arg_values( "2016-09-23", "The signature of the input_fn accepted by export is changing to be " "consistent with what's used by tf.Learn Estimator's train/evaluate. " "input_fn (and in most cases, input_feature_key) will both become " "required args.", input_fn=None) def __init__(self, every_n_steps, export_dir, input_fn=None, input_feature_key=None, exports_to_keep=5, signature_fn=None, default_batch_size=1): """Initializes ExportMonitor. Args: every_n_steps: Run monitor every N steps. export_dir: str, folder to export. input_fn: A function that takes no argument and returns a tuple of (features, labels), where features is a dict of string key to `Tensor` and labels is a `Tensor` that's currently not used (and so can be `None`). input_feature_key: String key into the features dict returned by `input_fn` that corresponds to the raw `Example` strings `Tensor` that the exported model will take as input. Can only be `None` if you're using a custom `signature_fn` that does not use the first arg (examples). exports_to_keep: int, number of exports to keep. signature_fn: Function that returns a default signature and a named signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s for features and `dict` of `Tensor`s for predictions. default_batch_size: Default batch size of the `Example` placeholder. Raises: ValueError: If `input_fn` and `input_feature_key` are not both defined or are not both `None`. """ super(ExportMonitor, self).__init__(every_n_steps=every_n_steps) self._export_dir = export_dir self._input_fn = input_fn self._input_feature_key = input_feature_key self._use_deprecated_input_fn = input_fn is None self._exports_to_keep = exports_to_keep self._signature_fn = signature_fn self._default_batch_size = default_batch_size self._last_export_dir = None @property def export_dir(self): return self._export_dir @property def exports_to_keep(self): return self._exports_to_keep @property def signature_fn(self): return self._signature_fn @property def last_export_dir(self): """Returns the directory containing the last completed export. Returns: The string path to the exported directory. NB: this functionality was added on 2016/09/25; clients that depend on the return value may need to handle the case where this function returns None because the estimator being fitted does not yet return a value during export. """ return self._last_export_dir def every_n_step_end(self, step, outputs): super(ExportMonitor, self).every_n_step_end(step, outputs) try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: # Currently we are not syncronized with saving checkpoints, which leads to # runtime errors when we are calling export on the same global step. # Exports depend on saved checkpoints for constructing the graph and # getting the global step from the graph instance saved in the checkpoint. # If the checkpoint is stale with respect to current step, the global step # is taken to be the last saved checkpoint's global step and exporter # doesn't export the same checkpoint again with the following error. logging.info("Skipping exporting because the existing checkpoint has " "already been exported. " "Consider exporting less frequently.") def end(self, session=None): super(ExportMonitor, self).end(session=session) latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.info("Skipping export at the end since model has not been saved " "yet.") return try: self._last_export_dir = self._estimator.export( self.export_dir, exports_to_keep=self.exports_to_keep, signature_fn=self.signature_fn, input_fn=self._input_fn, default_batch_size=self._default_batch_size, input_feature_key=self._input_feature_key, use_deprecated_input_fn=self._use_deprecated_input_fn) except RuntimeError: logging.info("Skipping exporting for the same step.") class CheckpointSaver(BaseMonitor): """Saves checkpoints every N steps.""" def __init__(self, checkpoint_dir, save_secs=None, save_steps=None, saver=None, checkpoint_basename="model.ckpt", scaffold=None): """Initialize CheckpointSaver monitor. Args: checkpoint_dir: `str`, base directory for the checkpoint files. save_secs: `int`, save every N secs. save_steps: `int`, save every N steps. saver: `Saver` object, used for saving. checkpoint_basename: `str`, base name for the checkpoint files. scaffold: `Scaffold`, use to get saver object. Raises: ValueError: If both `save_steps` and `save_secs` are not `None`. ValueError: If both `save_steps` and `save_secs` are `None`. """ logging.info("Create CheckpointSaver.") super(CheckpointSaver, self).__init__() self._saver = saver self._summary_writer = SummaryWriterCache.get(checkpoint_dir) self._save_path = os.path.join(checkpoint_dir, checkpoint_basename) self._scaffold = scaffold self._save_secs = save_secs self._save_steps = save_steps self._last_saved_time = None self._last_begin_step = None self._last_saved_step = None if save_steps is None and save_secs is None: raise ValueError("Either save_steps or save_secs should be provided") if (save_steps is not None) and (save_secs is not None): raise ValueError("Can not provide both save_steps and save_secs.") def begin(self, max_steps=None): super(CheckpointSaver, self).begin(max_steps) self._last_saved_time = None self._last_begin_step = None self._last_saved_step = None def step_begin(self, step): super(CheckpointSaver, self).step_begin(step) self._last_begin_step = step def post_step(self, step, session): super(CheckpointSaver, self).post_step(step, session) if self._last_saved_time is None: self._save(step, session) if self._save_steps is not None: if step >= self._last_saved_step + self._save_steps: self._save(step, session) if self._save_secs is not None: if time.time() >= self._last_saved_time + self._save_secs: self._save(step, session) def end(self, session=None): super(CheckpointSaver, self).end(session) self._save(self._last_begin_step, session) def _save(self, step, session): """Saves the latest checkpoint.""" if step == self._last_saved_step: return logging.info("Saving checkpoints for %d into %s.", step, self._save_path) self._last_saved_time = time.time() self._last_saved_step = step if self._saver is None: self._scaffold.saver.save(session, self._save_path, global_step=step) else: self._saver.save(session, self._save_path, global_step=step) self._summary_writer.add_session_log( SessionLog( status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path), step) class StepCounter(EveryN): """Steps per second monitor.""" def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None): super(StepCounter, self).__init__(every_n_steps=every_n_steps) self._summary_tag = "global_step/sec" self._last_reported_step = None self._last_reported_time = None self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = SummaryWriterCache.get(output_dir) def set_estimator(self, estimator): super(StepCounter, self).set_estimator(estimator) if self._summary_writer is None: self._summary_writer = SummaryWriterCache.get(estimator.model_dir) def every_n_step_end(self, current_step, outputs): current_time = time.time() if self._last_reported_time is not None and self._summary_writer: added_steps = current_step - self._last_reported_step elapsed_time = current_time - self._last_reported_time steps_per_sec = added_steps / elapsed_time summary = Summary(value=[Summary.Value(tag=self._summary_tag, simple_value=steps_per_sec)]) self._summary_writer.add_summary(summary, current_step) self._last_reported_step = current_step self._last_reported_time = current_time class NanLossDuringTrainingError(RuntimeError): def __str__(self): return "NaN loss during training." class NanLoss(EveryN): """NaN Loss monitor. Monitors loss and stops training if loss is NaN. Can either fail with exception or just stop training. """ def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True): """Initializes NanLoss monitor. Args: loss_tensor: `Tensor`, the loss tensor. every_n_steps: `int`, run check every this many steps. fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN. """ super(NanLoss, self).__init__(every_n_steps=every_n_steps) self._loss_tensor = loss_tensor self._fail_on_nan_loss = fail_on_nan_loss def every_n_step_begin(self, step): super(NanLoss, self).every_n_step_begin(step) return [self._loss_tensor] def every_n_step_end(self, step, outputs): super(NanLoss, self).every_n_step_end(step, outputs) if np.isnan(_extract_output(outputs, self._loss_tensor)): failure_message = "Model diverged with loss = NaN." if self._fail_on_nan_loss: logging.error(failure_message) raise NanLossDuringTrainingError else: logging.warning(failure_message) # We don't raise an error but we return "should stop" so we stop, but # without an exception. return True class RunHookAdapterForMonitors(session_run_hook.SessionRunHook): """Wraps monitors into a SessionRunHook.""" def __init__(self, monitors): self._monitors = monitors def begin(self): self._last_step = None self._global_step_tensor = contrib_variables.get_global_step() for m in self._monitors: m.begin(max_steps=None) def before_run(self, run_context): if self._last_step is None: self._last_step = run_context.session.run(self._global_step_tensor) + 1 request = {self._global_step_tensor: self._global_step_tensor} monitor_fetches = [] for m in self._monitors: monitor_requests = m.step_begin(self._last_step) if monitor_requests: if not isinstance(monitor_requests, list): raise ValueError("Monitor.step_begin should return a list.") monitor_fetches.extend(monitor_requests) if monitor_fetches: request["monitors"] = dict( zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches])) return session_run_hook.SessionRunArgs(request) def after_run(self, run_context, run_values): result = run_values.results[ "monitors"] if "monitors" in run_values.results else {} for m in self._monitors: induce_stop = m.step_end(self._last_step, result) if induce_stop: run_context.request_stop() for m in self._monitors: m.post_step(self._last_step, run_context.session) self._last_step = run_values.results[self._global_step_tensor] + 1 def end(self, session): self._last_step = None for m in self._monitors: if "session" in inspect.getargspec(m.end).args: m.end(session=session) else: m.end() def replace_monitors_with_hooks(monitors_or_hooks, estimator): """Wraps monitors with a hook. `Monitor` is deprecated in favor of `SessionRunHook`. If you're using a monitor, you can wrap it with a hook using function. It is recommended to implement hook version of your monitor. Args: monitors_or_hooks: A `list` may contain both monitors and hooks. estimator: An `Estimator` that monitor will be used with. Returns: Returns a list of hooks. If there is any monitor in the given list, it is replaced by a hook. """ monitors_or_hooks = monitors_or_hooks or [] hooks = [ m for m in monitors_or_hooks if isinstance(m, session_run_hook.SessionRunHook) ] deprecated_monitors = [ m for m in monitors_or_hooks if not isinstance(m, session_run_hook.SessionRunHook) ] if not estimator.config.is_chief: # Prune list of monitor to the ones runnable on all workers. deprecated_monitors = [ m for m in deprecated_monitors if m.run_on_all_workers ] # Setup monitors. for monitor in deprecated_monitors: monitor.set_estimator(estimator) if deprecated_monitors: hooks.append(RunHookAdapterForMonitors(deprecated_monitors)) return hooks def _as_graph_element(obj): """Retrieves Graph element.""" graph = ops.get_default_graph() if not isinstance(obj, six.string_types): if not hasattr(obj, "graph") or obj.graph != graph: raise ValueError("Passed %s should have graph attribute that is equal " "to current graph %s." % (obj, graph)) return obj if ":" in obj: element = graph.as_graph_element(obj) else: element = graph.as_graph_element(obj + ":0") # Check that there is no :1 (e.g. it's single output). try: graph.as_graph_element(obj + ":1") except (KeyError, ValueError): pass else: raise ValueError("Name %s is ambiguous, " "as this `Operation` has multiple outputs " "(at least 2)." % obj) return element
[ "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.training.saver.latest_checkpoint", "tensorflow.contrib.framework.python.ops.variables.get_global_step", "tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get", "tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.training.summary_io.SummaryWriter", "tensorflow.python.platform.tf_logging.debug", "tensorflow.python.platform.tf_logging.error", "numpy.allclose", "tensorflow.python.platform.tf_logging.info", "tensorflow.core.framework.summary_pb2.Summary.Value", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.core.util.event_pb2.SessionLog", "tensorflow.contrib.framework.deprecated_arg_values", "tensorflow.python.util.deprecation.deprecated" ]
tensorflow/contrib/learn/python/learn/monitors.py
[(123, 'tensorflow.python.util.deprecation.deprecated', 'deprecation.deprecated', (['"""2016-12-05"""', '"""Monitors are deprecated. Please use tf.train.SessionRunHook."""'], {}), False, 'from tensorflow.python.util import deprecation\n'), (903, 'tensorflow.contrib.framework.deprecated_arg_values', 'deprecated_arg_values', (['"""2016-09-23"""', '"""The signature of the input_fn accepted by export is changing to be consistent with what\'s used by tf.Learn Estimator\'s train/evaluate. input_fn (and in most cases, input_feature_key) will both become required args."""'], {'input_fn': 'None'}), False, 'from tensorflow.contrib.framework import deprecated_arg_values\n'), (1274, 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (503, 'six.iteritems', 'six.iteritems', (['self._tensor_names'], {}), False, 'import six\n'), (533, 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.TRAINABLE_VARIABLES'], {'scope': 'self._scope'}), False, 'from tensorflow.python.framework import ops\n'), (543, 'six.iteritems', 'six.iteritems', (['self._names'], {}), False, 'import six\n'), (696, 'tensorflow.python.training.saver.latest_checkpoint', 'saver_lib.latest_checkpoint', (['self._estimator.model_dir'], {}), True, 'from tensorflow.python.training import saver as saver_lib\n'), (831, 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (1001, 'tensorflow.python.training.saver.latest_checkpoint', 'saver_lib.latest_checkpoint', (['self._estimator.model_dir'], {}), True, 'from tensorflow.python.training import saver as saver_lib\n'), (1043, 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Create CheckpointSaver."""'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (1046, 'tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get', 'SummaryWriterCache.get', (['checkpoint_dir'], {}), False, 'from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache\n'), (1047, 'os.path.join', 'os.path.join', (['checkpoint_dir', 'checkpoint_basename'], {}), False, 'import os\n'), (1091, 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Saving checkpoints for %d into %s."""', 'step', 'self._save_path'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (1092, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (1123, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (1186, 'tensorflow.contrib.framework.python.ops.variables.get_global_step', 'contrib_variables.get_global_step', ([], {}), True, 'from tensorflow.contrib.framework.python.ops import variables as contrib_variables\n'), (1206, 'tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs', 'session_run_hook.SessionRunArgs', (['request'], {}), False, 'from tensorflow.contrib.learn.python.learn import session_run_hook\n'), (577, 'tensorflow.python.training.summary_io.SummaryWriter', 'summary_io.SummaryWriter', (['output_dir'], {}), False, 'from tensorflow.python.training import summary_io\n'), (585, 'tensorflow.python.training.summary_io.SummaryWriter', 'summary_io.SummaryWriter', (['estimator.model_dir'], {}), False, 'from tensorflow.python.training import summary_io\n'), (698, 'tensorflow.python.platform.tf_logging.debug', 'logging.debug', (['"""Skipping evaluation since model has not been saved yet at step %d."""', 'step'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (702, 'tensorflow.python.platform.tf_logging.debug', 'logging.debug', (['"""Skipping evaluation due to same checkpoint %s for step %d as for step %d."""', 'latest_path', 'step', 'self._latest_path_step'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (836, 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""op=%s name=%s."""', 'node.op', 'node.name'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (1003, 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Skipping export at the end since model has not been saved yet."""'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (1099, 'tensorflow.core.util.event_pb2.SessionLog', 'SessionLog', ([], {'status': 'SessionLog.CHECKPOINT', 'checkpoint_path': 'self._save_path'}), False, 'from tensorflow.core.util.event_pb2 import SessionLog\n'), (1115, 'tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get', 'SummaryWriterCache.get', (['output_dir'], {}), False, 'from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache\n'), (1120, 'tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get', 'SummaryWriterCache.get', (['estimator.model_dir'], {}), False, 'from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache\n'), (995, 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Skipping exporting because the existing checkpoint has already been exported. Consider exporting less frequently."""'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (1016, 'tensorflow.python.platform.tf_logging.info', 'logging.info', (['"""Skipping exporting for the same step."""'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (1080, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (1169, 'tensorflow.python.platform.tf_logging.error', 'logging.error', (['failure_message'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (1172, 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['failure_message'], {}), True, 'from tensorflow.python.platform import tf_logging as logging\n'), (885, 'numpy.allclose', 'np.allclose', (['value1', 'value2'], {'atol': 'atol'}), True, 'import numpy as np\n'), (1224, 'inspect.getargspec', 'inspect.getargspec', (['m.end'], {}), False, 'import inspect\n'), (1128, 'tensorflow.core.framework.summary_pb2.Summary.Value', 'Summary.Value', ([], {'tag': 'self._summary_tag', 'simple_value': 'steps_per_sec'}), False, 'from tensorflow.core.framework.summary_pb2 import Summary\n')]
richardliaw/scalable_agent
d24bd74bd53d454b7222b7f0bea57a358e4ca33e
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests dynamic_batching.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime from multiprocessing import pool import time import dynamic_batching import tensorflow as tf _SLEEP_TIME = 1.0 class DynamicBatchingTest(tf.test.TestCase): def test_one(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output = f(tf.constant([[1, 3]]), tf.constant([2])) tf.train.start_queue_runners() result, batch_size = session.run(output) self.assertAllEqual([[3, 5]], result) self.assertAllEqual([1], batch_size) def test_two(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output0 = f(tf.constant([1]), tf.constant([2])) output1 = f(tf.constant([2]), tf.constant([3])) tp = pool.ThreadPool(2) f0 = tp.apply_async(session.run, [output0]) f1 = tp.apply_async(session.run, [output1]) # Make sure both inputs are in the batcher before starting it. time.sleep(_SLEEP_TIME) tf.train.start_queue_runners() result0, batch_size0 = f0.get() result1, batch_size1 = f1.get() self.assertAllEqual([3], result0) self.assertAllEqual([2], batch_size0) self.assertAllEqual([5], result1) self.assertAllEqual([2], batch_size1) def test_many_small(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for i in xrange(200): outputs.append(f(tf.fill([1, 5], i), tf.fill([1, 5], i))) tf.train.start_queue_runners() tp = pool.ThreadPool(10) futures = [] for output in outputs: futures.append(tp.apply_async(session.run, [output])) for i, future in enumerate(futures): result = future.get() self.assertAllEqual([[i * 2] * 5], result) def test_input_batch_size_should_be_one(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a): return a output = f(tf.constant([1, 2])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): session.run(output) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'requires batch size 1'): coord.join() def test_run_after_error_should_be_cancelled(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a): return a output = f(tf.constant([1, 2])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): session.run(output) with self.assertRaises(tf.errors.CancelledError): session.run(output) def test_input_shapes_should_be_equal(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b output0 = f(tf.constant([1]), tf.constant([2])) output1 = f(tf.constant([[2]]), tf.constant([3])) tp = pool.ThreadPool(2) f0 = tp.apply_async(session.run, [output0]) f1 = tp.apply_async(session.run, [output1]) time.sleep(_SLEEP_TIME) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): f0.get() f1.get() with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'Shapes of inputs much be equal'): coord.join() def test_output_must_have_batch_dimension(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(_): return tf.constant(1) output = f(tf.constant([1])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): session.run(output) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'Output shape must have a batch dimension'): coord.join() def test_output_must_have_same_batch_dimension_size_as_input(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(_): return tf.constant([1, 2, 3, 4]) output = f(tf.constant([1])) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): session.run(output) with self.assertRaisesRegexp( tf.errors.InvalidArgumentError, 'Output shape must have the same batch dimension as the input batch ' 'size. Expected: 1 Observed: 4'): coord.join() def test_get_inputs_cancelled(self): with tf.Graph().as_default(): @dynamic_batching.batch_fn def f(a): return a f(tf.constant([1])) # Intentionally using tf.Session() instead of self.test_session() to have # control over closing the session. test_session() is a cached session. with tf.Session(): coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) # Sleep to make sure the queue runner has started the first run call. time.sleep(_SLEEP_TIME) # Session closed. coord.request_stop() coord.join() def test_batcher_closed(self): with tf.Graph().as_default(): @dynamic_batching.batch_fn def f(a): return a f(tf.constant([1])) # Intentionally using tf.Session() instead of self.test_session() to have # control over closing the session. test_session() is a cached session. with tf.Session(): coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) time.sleep(_SLEEP_TIME) coord.request_stop() # Calls close operation. coord.join() # Session closed. def test_minimum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options( minimum_batch_size=2, timeout_ms=1000) def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) output = f(tf.constant([[1, 3]]), tf.constant([2])) tf.train.start_queue_runners() start = datetime.datetime.now() session.run(output) duration = datetime.datetime.now() - start # There should have been a timeout here because only one sample was added # and the minimum batch size is 2. self.assertLessEqual(.9, duration.total_seconds()) self.assertGreaterEqual(1.5, duration.total_seconds()) outputs = [ f(tf.constant([[1, 3]]), tf.constant([2])), f(tf.constant([[1, 3]]), tf.constant([2])) ] start = datetime.datetime.now() (_, batch_size), _ = session.run(outputs) duration = datetime.datetime.now() - start # The outputs should be executed immediately because two samples are # added. self.assertGreaterEqual(.5, duration.total_seconds()) self.assertEqual(2, batch_size) def test_maximum_batch_size(self): with self.test_session() as session: @dynamic_batching.batch_fn_with_options(maximum_batch_size=2) def f(a, b): batch_size = tf.shape(a)[0] return a + b, tf.tile([batch_size], [batch_size]) outputs = [ f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), f(tf.constant([1]), tf.constant([2])), ] tf.train.start_queue_runners() results = session.run(outputs) for value, batch_size in results: self.assertEqual(3, value) self.assertGreaterEqual(2, batch_size) def test_static_shape(self): assertions_triggered = [0] @dynamic_batching.batch_fn_with_options(minimum_batch_size=1, maximum_batch_size=2) def f0(a): self.assertEqual(None, a.shape[0].value) assertions_triggered[0] += 1 return a @dynamic_batching.batch_fn_with_options(minimum_batch_size=2, maximum_batch_size=2) def f1(a): # Even though minimum_batch_size and maximum_batch_size are equal, the # timeout can cause a batch with less than mininum_batch_size. self.assertEqual(None, a.shape[0].value) assertions_triggered[0] += 1 return a @dynamic_batching.batch_fn_with_options(minimum_batch_size=2, maximum_batch_size=2, timeout_ms=None) def f2(a): # When timeout is disabled and minimum/maximum batch size are equal, the # shape is statically known. self.assertEqual(2, a.shape[0].value) assertions_triggered[0] += 1 return a f0(tf.constant([1])) f1(tf.constant([1])) f2(tf.constant([1])) self.assertEqual(3, assertions_triggered[0]) def test_out_of_order_execution1(self): with self.test_session() as session: batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) tp = pool.ThreadPool(10) r0 = tp.apply_async(session.run, batcher.compute([[1]], [tf.int32])) (input0,), computation_id0 = session.run(batcher.get_inputs([tf.int32])) r1 = tp.apply_async(session.run, batcher.compute([[2]], [tf.int32])) (input1,), computation_id1 = session.run(batcher.get_inputs([tf.int32])) self.assertAllEqual([1], input0) self.assertAllEqual([2], input1) session.run(batcher.set_outputs([input0 + 42], computation_id0)) session.run(batcher.set_outputs([input1 + 42], computation_id1)) self.assertAllEqual([43], r0.get()) self.assertAllEqual([44], r1.get()) def test_out_of_order_execution2(self): with self.test_session() as session: batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) tp = pool.ThreadPool(10) r0 = tp.apply_async(session.run, batcher.compute([[1]], [tf.int32])) (input0,), computation_id0 = session.run(batcher.get_inputs([tf.int32])) r1 = tp.apply_async(session.run, batcher.compute([[2]], [tf.int32])) (input1,), computation_id1 = session.run(batcher.get_inputs([tf.int32])) self.assertAllEqual([1], input0) self.assertAllEqual([2], input1) # These two runs are switched from testOutOfOrderExecution1. session.run(batcher.set_outputs([input1 + 42], computation_id1)) session.run(batcher.set_outputs([input0 + 42], computation_id0)) self.assertAllEqual([43], r0.get()) self.assertAllEqual([44], r1.get()) def test_invalid_computation_id(self): with self.test_session() as session: batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) tp = pool.ThreadPool(10) tp.apply_async(session.run, batcher.compute([[1]], [tf.int32])) (input0,), _ = session.run(batcher.get_inputs([tf.int32])) self.assertAllEqual([1], input0) with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'Invalid computation id'): session.run(batcher.set_outputs([input0], 42)) def test_op_shape(self): with self.test_session(): batcher = dynamic_batching._Batcher(minimum_batch_size=1, maximum_batch_size=1, timeout_ms=None) _, computation_id = batcher.get_inputs([tf.int32]) self.assertEqual([], computation_id.shape) class DynamicBatchingBenchmarks(tf.test.Benchmark): def benchmark_batching_small(self): with tf.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(tf.ones([1, 10]), tf.ones([1, 10]))) op_to_benchmark = tf.group(*outputs) tf.train.start_queue_runners() self.run_op_benchmark( name='batching_many_small', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) def benchmark_batching_large(self): with tf.Session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b outputs = [] for _ in xrange(1000): outputs.append(f(tf.ones([1, 100000]), tf.ones([1, 100000]))) op_to_benchmark = tf.group(*outputs) tf.train.start_queue_runners() self.run_op_benchmark( name='batching_many_large', sess=session, op_or_tensor=op_to_benchmark, burn_iters=10, min_iters=50) if __name__ == '__main__': tf.test.main()
[ "tensorflow.Graph", "tensorflow.fill", "tensorflow.constant", "tensorflow.shape", "tensorflow.train.start_queue_runners", "tensorflow.train.Coordinator", "tensorflow.test.main", "tensorflow.ones", "tensorflow.Session", "tensorflow.group", "tensorflow.tile" ]
dynamic_batching_test.py
[(445, 'tensorflow.test.main', 'tf.test.main', ([], {}), True, 'import tensorflow as tf\n'), (300, 'dynamic_batching.batch_fn_with_options', 'dynamic_batching.batch_fn_with_options', ([], {'minimum_batch_size': '(1)', 'maximum_batch_size': '(2)'}), False, 'import dynamic_batching\n'), (307, 'dynamic_batching.batch_fn_with_options', 'dynamic_batching.batch_fn_with_options', ([], {'minimum_batch_size': '(2)', 'maximum_batch_size': '(2)'}), False, 'import dynamic_batching\n'), (316, 'dynamic_batching.batch_fn_with_options', 'dynamic_batching.batch_fn_with_options', ([], {'minimum_batch_size': '(2)', 'maximum_batch_size': '(2)', 'timeout_ms': 'None'}), False, 'import dynamic_batching\n'), (44, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {}), True, 'import tensorflow as tf\n'), (61, 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(2)'], {}), False, 'from multiprocessing import pool\n'), (66, 'time.sleep', 'time.sleep', (['_SLEEP_TIME'], {}), False, 'import time\n'), (68, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {}), True, 'import tensorflow as tf\n'), (90, 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(10)'], {}), False, 'from multiprocessing import pool\n'), (107, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (145, 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(2)'], {}), False, 'from multiprocessing import pool\n'), (149, 'time.sleep', 'time.sleep', (['_SLEEP_TIME'], {}), False, 'import time\n'), (151, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (241, 'dynamic_batching.batch_fn_with_options', 'dynamic_batching.batch_fn_with_options', ([], {'minimum_batch_size': '(2)', 'timeout_ms': '(1000)'}), False, 'import dynamic_batching\n'), (249, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {}), True, 'import tensorflow as tf\n'), (251, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (265, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (276, 'dynamic_batching.batch_fn_with_options', 'dynamic_batching.batch_fn_with_options', ([], {'maximum_batch_size': '(2)'}), False, 'import dynamic_batching\n'), (289, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {}), True, 'import tensorflow as tf\n'), (326, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (333, 'dynamic_batching._Batcher', 'dynamic_batching._Batcher', ([], {'minimum_batch_size': '(1)', 'maximum_batch_size': '(1)', 'timeout_ms': 'None'}), False, 'import dynamic_batching\n'), (337, 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(10)'], {}), False, 'from multiprocessing import pool\n'), (354, 'dynamic_batching._Batcher', 'dynamic_batching._Batcher', ([], {'minimum_batch_size': '(1)', 'maximum_batch_size': '(1)', 'timeout_ms': 'None'}), False, 'import dynamic_batching\n'), (358, 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(10)'], {}), False, 'from multiprocessing import pool\n'), (376, 'dynamic_batching._Batcher', 'dynamic_batching._Batcher', ([], {'minimum_batch_size': '(1)', 'maximum_batch_size': '(1)', 'timeout_ms': 'None'}), False, 'import dynamic_batching\n'), (380, 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(10)'], {}), False, 'from multiprocessing import pool\n'), (392, 'dynamic_batching._Batcher', 'dynamic_batching._Batcher', ([], {'minimum_batch_size': '(1)', 'maximum_batch_size': '(1)', 'timeout_ms': 'None'}), False, 'import dynamic_batching\n'), (404, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.group', 'tf.group', (['*outputs'], {}), True, 'import tensorflow as tf\n'), (414, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {}), True, 'import tensorflow as tf\n'), (424, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (432, 'tensorflow.group', 'tf.group', (['*outputs'], {}), True, 'import tensorflow as tf\n'), (434, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.constant', 'tf.constant', (['[[1, 3]]'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.constant', 'tf.constant', (['[3]'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.constant', 'tf.constant', (['[1, 2]'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.constant', 'tf.constant', (['[1, 2]'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.constant', 'tf.constant', (['[[2]]'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.constant', 'tf.constant', (['[3]'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.constant', 'tf.constant', (['(1)'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.constant', 'tf.constant', (['[1, 2, 3, 4]'], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (215, 'time.sleep', 'time.sleep', (['_SLEEP_TIME'], {}), False, 'import time\n'), (227, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), True, 'import tensorflow as tf\n'), (234, 'time.sleep', 'time.sleep', (['_SLEEP_TIME'], {}), False, 'import time\n'), (247, 'tensorflow.constant', 'tf.constant', (['[[1, 3]]'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (253, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (267, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (39, 'tensorflow.shape', 'tf.shape', (['a'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.tile', 'tf.tile', (['[batch_size]', '[batch_size]'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.shape', 'tf.shape', (['a'], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.tile', 'tf.tile', (['[batch_size]', '[batch_size]'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.shape', 'tf.shape', (['a'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.tile', 'tf.tile', (['[batch_size]', '[batch_size]'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.constant', 'tf.constant', (['[[1, 3]]'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.constant', 'tf.constant', (['[[1, 3]]'], {}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.shape', 'tf.shape', (['a'], {}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.tile', 'tf.tile', (['[batch_size]', '[batch_size]'], {}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.constant', 'tf.constant', (['[1]'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.constant', 'tf.constant', (['[2]'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.fill', 'tf.fill', (['[1, 5]', 'i'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.fill', 'tf.fill', (['[1, 5]', 'i'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.ones', 'tf.ones', (['[1, 10]'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.ones', 'tf.ones', (['[1, 10]'], {}), True, 'import tensorflow as tf\n'), (431, 'tensorflow.ones', 'tf.ones', (['[1, 100000]'], {}), True, 'import tensorflow as tf\n'), (431, 'tensorflow.ones', 'tf.ones', (['[1, 100000]'], {}), True, 'import tensorflow as tf\n')]
georgeliu233/DRLFD_Urban
08e448d50ba0def1f968ba51d5a24053f37a0791
import sys import time import warnings import numpy as np import tensorflow as tf from collections import deque from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter from stable_baselines.common.vec_env import VecEnv from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action from stable_baselines.common.schedules import get_schedule_fn #from stable_baselines.common.buffers import ReplayBuffer from stable_baselines.common.schedules import LinearSchedule from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer,NStepTransitionBuffer from stable_baselines.sac.policies import SACPolicy from stable_baselines import logger class SAC(OffPolicyRLModel): """ Soft Actor-Critic (SAC) Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor, This implementation borrows code from original implementation (https://github.com/haarnoja/sac) from OpenAI Spinning Up (https://github.com/openai/spinningup) and from the Softlearning repo (https://github.com/rail-berkeley/softlearning/) Paper: https://arxiv.org/abs/1801.01290 Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html :param policy: (SACPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...) :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str) :param gamma: (float) the discount factor :param learning_rate: (float or callable) learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress (from 1 to 0) :param buffer_size: (int) size of the replay buffer :param batch_size: (int) Minibatch size for each gradient update :param tau: (float) the soft update coefficient ("polyak update", between 0 and 1) :param ent_coef: (str or float) Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param train_freq: (int) Update the model every `train_freq` steps. :param learning_starts: (int) how many steps of the model to collect transitions for before learning starts :param target_update_interval: (int) update the target network every `target_network_update_freq` steps. :param gradient_steps: (int) How many gradient update after each step :param target_entropy: (str or float) target entropy when learning ent_coef (ent_coef = 'auto') :param action_noise: (ActionNoise) the action noise type (None by default), this can help for hard exploration problem. Cf DDPG for the different action noise type. :param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy) This is not needed for SAC normally but can help exploring when using HER + SAC. This hack was present in the original OpenAI Baselines repo (DDPG + HER) :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug :param tensorboard_log: (str) the log location for tensorboard (if None, no logging) :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation :param full_tensorboard_log: (bool) enable additional logging when using tensorboard Note: this has no effect on SAC logging for now :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow). If None (default), use random seed. Note that if you want completely deterministic results, you must set `n_cpu_tf_sess` to 1. :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations If None, the number of cpu of the current machine will be used. """ def __init__(self, policy, env, gamma=0.99, learning_rate=3e-4, buffer_size=50000, learning_starts=100, train_freq=1, batch_size=64, tau=0.005, ent_coef='auto', target_update_interval=2, gradient_steps=1, target_entropy='auto', action_noise=None, random_exploration=0.0, verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None,prioritized_replay=True,prioritized_replay_alpha=0.3, prioritized_replay_beta0=1.0, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6,ratio=0.75,n_step=False,update_buffer_interval=100,max_ratio=0.9): super(SAC, self).__init__(policy=policy, env=env, replay_buffer=None, verbose=verbose, policy_base=SACPolicy, requires_vec_env=False, policy_kwargs=policy_kwargs, seed=seed, n_cpu_tf_sess=n_cpu_tf_sess) self.prioritized_replay = prioritized_replay self.prioritized_replay_eps = prioritized_replay_eps self.prioritized_replay_alpha = prioritized_replay_alpha self.prioritized_replay_beta0 = prioritized_replay_beta0 self.prioritized_replay_beta_iters = prioritized_replay_beta_iters self.buffer_size = buffer_size self.learning_rate = learning_rate self.learning_starts = learning_starts self.update_buffer_interval = update_buffer_interval self.train_freq = train_freq self.batch_size = batch_size self.tau = tau self.ratio = ratio self.init_ratio = ratio self.max_ratio = max_ratio self.n_step = n_step self.n_step_length = 10 # In the original paper, same learning rate is used for all networks # self.policy_lr = learning_rate # self.qf_lr = learning_rate # self.vf_lr = learning_rate # Entropy coefficient / Entropy temperature # Inverse of the reward scale self.ent_coef = ent_coef self.target_update_interval = target_update_interval self.gradient_steps = gradient_steps self.gamma = gamma self.action_noise = action_noise self.random_exploration = random_exploration self.value_fn = None self.graph = None self.replay_buffer = None self.sess = None self.tensorboard_log = tensorboard_log self.verbose = verbose self.params = None self.summary = None self.policy_tf = None self.target_entropy = target_entropy self.full_tensorboard_log = full_tensorboard_log self.obs_target = None self.target_policy = None self.actions_ph = None self.rewards_ph = None self.terminals_ph = None self.observations_ph = None self.action_target = None self.next_observations_ph = None self.value_target = None self.step_ops = None self.target_update_op = None self.infos_names = None self.entropy = None self.target_params = None self.learning_rate_ph = None self.processed_obs_ph = None self.processed_next_obs_ph = None self.log_ent_coef = None if _init_setup_model: self.setup_model() def _get_pretrain_placeholders(self): policy = self.policy_tf # Rescale deterministic_action = unscale_action(self.action_space, self.deterministic_action) return policy.obs_ph, self.actions_ph, deterministic_action def initializeExpertBuffer(self, np_arr_list, obs_len,action_list,reward_list,done_list): """ expects to be given a list of np_arrays (trajectories), sets all rewards to 1 """ #print(self.prioritized_replay) if self.prioritized_replay: self.expert_buffer = PrioritizedReplayBuffer(obs_len, alpha=self.prioritized_replay_alpha) if self.prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = 100000 else: prioritized_replay_beta_iters = self.prioritized_replay_beta_iters self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=self.prioritized_replay_beta0, final_p=1.0) else: self.expert_buffer = ReplayBuffer(obs_len) self.exp_beta_schedule = None if self.n_step: n_step_buffer=deque(maxlen=self.n_step_length) self.expert_N_buffer = NStepTransitionBuffer(obs_len,n_step=self.n_step_length,gamma=self.gamma) for i in range(obs_len-2): obs,obs_ = np_arr_list[i],np_arr_list[i+1] obs = np.reshape(obs,(64,64,3)) obs_ = np.reshape(obs_,(64,64,3)) if done_list[i]==2 or done_list[i]==True: done = True else: done = False if not self.n_step: self.expert_buffer.add(obs,action_list[i],reward_list[i],obs_,done,1) else: trans = (obs,action_list[i],reward_list[i],obs_,done) n_step_buffer.append(trans) self.expert_N_buffer.add((obs,action_list[i],reward_list[i],obs_,done)) if len(n_step_buffer)== self.n_step_length: #self.expert_buffer.add(obs,action_list[i],reward_list[i],obs_,done_list[i],1) one_step = n_step_buffer[0] self.expert_buffer.add(one_step[0],one_step[1],one_step[2],one_step[3],one_step[4],1) def setup_model(self): with SetVerbosity(self.verbose): self.graph = tf.Graph() with self.graph.as_default(): self.set_random_seed(self.seed) self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph) if self.prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer(self.buffer_size, alpha=self.prioritized_replay_alpha) if self.prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = 100000 else: prioritized_replay_beta_iters = self.prioritized_replay_beta_iters self.beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=self.prioritized_replay_beta0, final_p=1.0) else: self.replay_buffer = ReplayBuffer(self.buffer_size) self.beta_schedule = None if self.n_step: self.replay_N_buffer=NStepTransitionBuffer(self.buffer_size,self.n_step_length,self.gamma) with tf.variable_scope("input", reuse=False): # Create policy and target TF objects self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs) self.target_policy = self.policy(self.sess, self.observation_space, self.action_space, **self.policy_kwargs) # Initialize Placeholders self.observations_ph = self.policy_tf.obs_ph # Normalized observation for pixels self.processed_obs_ph = self.policy_tf.processed_obs self.next_observations_ph = self.target_policy.obs_ph self.processed_next_obs_ph = self.target_policy.processed_obs self.action_target = self.target_policy.action_ph self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals') self.rewards_ph = tf.placeholder(tf.float32, shape=(None, 1), name='rewards') self.is_demo_ph = tf.placeholder(tf.float32, shape=(None, 1), name='is_demonstrations') self.weight_ph = tf.placeholder(tf.float32, shape=(None, 1), name='importance_weight') self.actions_ph = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions') self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph") if self.n_step: self.next_observations_ph_n = self.target_policy.obs_ph self.processed_next_obs_ph_n = self.target_policy.processed_obs self.rewards_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_rewards') self.terminals_ph_n = tf.placeholder(tf.float32, shape=(None, 1), name='n_step_terminals') with tf.variable_scope("model", reuse=False): # Create the policy # first return value corresponds to deterministic actions # policy_out corresponds to stochastic actions, used for training # logp_pi is the log probability of actions taken by the policy self.deterministic_action, policy_out, logp_pi = self.policy_tf.make_actor(self.processed_obs_ph) # Monitor the entropy of the policy, # this is not used for training self.entropy = tf.reduce_mean(self.policy_tf.entropy) self.obs_ph, self.actions_ph, self.deterministic_actions_ph = self._get_pretrain_placeholders() # Use two Q-functions to improve performance by reducing overestimation bias. qf1, qf2, value_fn = self.policy_tf.make_critics(self.processed_obs_ph, self.actions_ph, create_qf=True, create_vf=True) qf1_pi, qf2_pi, _ = self.policy_tf.make_critics(self.processed_obs_ph, policy_out, create_qf=True, create_vf=False, reuse=True) dtm_qf1,dtm_qf2,_ = self.policy_tf.make_critics(self.processed_obs_ph, self.deterministic_actions_ph, create_qf=True,create_vf=False, reuse=True) # Target entropy is used when learning the entropy coefficient if self.target_entropy == 'auto': # automatically set target entropy if needed self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32) else: # Force conversion # this will also throw an error for unexpected string self.target_entropy = float(self.target_entropy) # The entropy coefficient or entropy can be learned automatically # see Automating Entropy Adjustment for Maximum Entropy RL section # of https://arxiv.org/abs/1812.05905 if isinstance(self.ent_coef, str) and self.ent_coef.startswith('auto'): # Default initial value of ent_coef when learned init_value = 1.0 if '_' in self.ent_coef: init_value = float(self.ent_coef.split('_')[1]) assert init_value > 0., "The initial value of ent_coef must be greater than 0" self.log_ent_coef = tf.get_variable('log_ent_coef', dtype=tf.float32, initializer=np.log(init_value).astype(np.float32)) self.ent_coef = tf.exp(self.log_ent_coef) else: # Force conversion to float # this will throw an error if a malformed string (different from 'auto') # is passed self.ent_coef = float(self.ent_coef) with tf.variable_scope("target", reuse=False): # Create the value network _, _, value_target = self.target_policy.make_critics(self.processed_next_obs_ph, create_qf=False, create_vf=True) self.value_target = value_target if self.n_step: _,_,value_target_n = self.policy_tf.make_critics(self.processed_next_obs_ph_n, create_qf=False, create_vf=True,reuse=True) self.value_target_n = value_target_n with tf.variable_scope("loss", reuse=False): # Take the min of the two Q-Values (Double-Q Learning) min_qf_pi = tf.minimum(qf1_pi, qf2_pi) # Target for Q value regression q_backup = tf.stop_gradient( self.rewards_ph + (1 - self.terminals_ph) * self.gamma * self.value_target ) # Compute Q-Function loss # TODO: test with huber loss (it would avoid too high values) qf1_loss = 0.5 * tf.reduce_mean(((q_backup - qf1) ** 2)*self.weight_ph) qf1_loss_col = tf.reduce_mean(((q_backup - qf1) ** 2),1) qf2_loss = 0.5 * tf.reduce_mean(((q_backup - qf2) ** 2)*self.weight_ph) if self.n_step: q_backup_n = tf.stop_gradient( self.rewards_ph_n + (1 - self.terminals_ph_n) *( self.gamma**self.n_step_length ) * self.value_target_n) qf1_loss_n = 0.5 * tf.reduce_mean(((q_backup_n - qf1) ** 2)*self.weight_ph) qf1_loss_n_col = tf.reduce_mean(((q_backup_n - qf1) ** 2),1) qf2_loss_n = 0.5 * tf.reduce_mean(((q_backup_n - qf2) ** 2)*self.weight_ph) if self.n_step: value_for_priority = qf1_loss_col + qf1_loss_n_col else: value_for_priority = qf1_loss_col # Compute the entropy temperature loss # it is used when the entropy coefficient is learned ent_coef_loss, entropy_optimizer = None, None if not isinstance(self.ent_coef, float): ent_coef_loss = -tf.reduce_mean( self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy)*self.weight_ph) entropy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) # Compute the policy loss # Alternative: policy_kl_loss = tf.reduce_mean(logp_pi - min_qf_pi) policy_kl_loss = tf.reduce_mean((self.ent_coef * logp_pi - min_qf_pi)*self.weight_ph) actor_for_priority = tf.reduce_mean(self.ent_coef * logp_pi - min_qf_pi,1) # NOTE: in the original implementation, they have an additional # regularization loss for the Gaussian parameters # this is not used for now # policy_loss = (policy_kl_loss + policy_regularization_loss) min_q = tf.minimum(dtm_qf1,dtm_qf2) Q_filter = tf.cast((qf1 > min_q)|(qf2 > min_q),tf.float32) #Q_filter_1 = tf.cast(qf1 > min_q,tf.float32) #Q_filter_2 = tf.cast(qf2 > min_q,tf.float32) im_loss1 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter*self.is_demo_ph #im_loss2 = tf.square(self.actions_ph - self.deterministic_actions_ph)*Q_filter_2*self.is_demo_ph #actor_loss_di1 = tf.reduce_mean(im_loss1) #actor_loss_di2 = tf.reduce_mean(im_loss2) self.actor_loss_di = tf.reduce_mean(im_loss1) imitation_for_priority = tf.reduce_mean(im_loss1,axis=1) regularizerpi = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope="model/pi") all_trainable_weights_pi = tf.trainable_variables('model/pi') regularization_penalty_pi = tf.contrib.layers.apply_regularization(regularizerpi, all_trainable_weights_pi) policy_loss = policy_kl_loss + regularization_penalty_pi + self.actor_loss_di # Target for value fn regression # We update the vf towards the min of two Q-functions in order to # reduce overestimation bias from function approximation error. v_backup = tf.stop_gradient(min_qf_pi - self.ent_coef * logp_pi) value_loss = 0.5 * tf.reduce_mean(((value_fn - v_backup) ** 2)*self.weight_ph) #value_for_priority = tf.reduce_mean((value_fn - v_backup) ** 2,1) regularizervf = tf.contrib.layers.l1_l2_regularizer(scale_l1=0.0, scale_l2=1e-5, scope='model/values_fn') all_trainable_weights_vf = tf_util.get_trainable_vars('model/values_fn') regularization_penalty_vf = tf.contrib.layers.apply_regularization(regularizervf, all_trainable_weights_vf) if self.n_step: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf + qf1_loss_n + qf2_loss_n else: values_losses = qf1_loss + qf2_loss + value_loss + regularization_penalty_vf # Policy train op # (has to be separate from value train op, because min_qf_pi appears in policy_loss) policy_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) policy_train_op = policy_optimizer.minimize(policy_loss, var_list=tf_util.get_trainable_vars('model/pi')) # Value train op value_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph) values_params = tf_util.get_trainable_vars('model/values_fn') source_params = tf_util.get_trainable_vars("model/values_fn/vf") target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Polyak averaging for target variables self.target_update_op = [ tf.assign(target, (1 - self.tau) * target + self.tau * source) for target, source in zip(target_params, source_params) ] # Initializing target to match source variables target_init_op = [ tf.assign(target, source) for target, source in zip(target_params, source_params) ] # Control flow is used because sess.run otherwise evaluates in nondeterministic order # and we first need to compute the policy action before computing q values losses with tf.control_dependencies([policy_train_op]): train_values_op = value_optimizer.minimize(values_losses, var_list=values_params) self.infos_names = ['policy_loss', 'qf1_loss', 'qf2_loss', 'value_loss', 'entropy'] # All ops to call during one training step self.step_ops = [policy_loss, qf1_loss, qf2_loss, value_loss, qf1, qf2, value_fn, logp_pi, self.entropy,actor_for_priority,value_for_priority,imitation_for_priority,self.actor_loss_di, policy_train_op, train_values_op] # Add entropy coefficient optimization operation if needed if ent_coef_loss is not None: with tf.control_dependencies([train_values_op]): ent_coef_op = entropy_optimizer.minimize(ent_coef_loss, var_list=self.log_ent_coef) self.infos_names += ['ent_coef_loss', 'ent_coef'] self.step_ops += [ent_coef_op, ent_coef_loss, self.ent_coef] # Monitor losses and entropy in tensorboard tf.summary.scalar('policy_loss', policy_loss) tf.summary.scalar('qf1_loss', qf1_loss) tf.summary.scalar('qf2_loss', qf2_loss) tf.summary.scalar('value_loss', value_loss) tf.summary.scalar("Imitation_loss",self.actor_loss_di) tf.summary.scalar('entropy', self.entropy) tf.summary.scalar('importance weight',tf.reduce_mean(self.weight_ph)) if ent_coef_loss is not None: tf.summary.scalar('ent_coef_loss', ent_coef_loss) tf.summary.scalar('ent_coef', self.ent_coef) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph)) # Retrieve parameters that must be saved self.params = tf_util.get_trainable_vars("model") self.target_params = tf_util.get_trainable_vars("target/values_fn/vf") # Initialize Variables and target network with self.sess.as_default(): self.sess.run(tf.global_variables_initializer()) self.sess.run(target_init_op) self.summary = tf.summary.merge_all() def pretrain_sac(self,pretrain_steps): print("=====SAC Pretraining=====") for step in range(pretrain_steps): # Compute current learning_rate frac = 1.0 - step / pretrain_steps current_lr = self.learning_rate(frac) # Update policy and critics (q functions) policy_loss, qf1_loss, qf2_loss, value_loss,*entropy =self._train_step(step, writer=None,learning_rate=current_lr,pretrain=True) if step % 50==0: print("** Pretraining step: |",step/pretrain_steps," Actor loss: |",policy_loss, "Critic loss|",value_loss," Actor expert loss|",entropy[-1] ) # Update target network if step % self.target_update_interval == 0: # Update target network self.sess.run(self.target_update_op) self.step += 1 print("Pretrin complete!!!") def _train_step(self, step, writer, learning_rate,pretrain=False): # Sample a batch from the replay buffer if not pretrain: a = self.ratio if not self.prioritized_replay: batch = self.replay_buffer.sample(int(self.batch_size*a)) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,batch_idx = batch weight= np.ones_like(batch_rewards) else: batch = self.replay_buffer.sample(int(self.batch_size*a),beta=self.beta_schedule.value(self.num_timesteps)) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,weight,batch_idx = batch batch_rewards = batch_rewards.reshape(-1, 1) one_batch_r = batch_rewards batch_dones = batch_dones.reshape(-1, 1) batch_demos = batch_demos.reshape(-1, 1) weight = weight weight = weight.reshape(-1,1) if not self.prioritized_replay: expert_batch = self.expert_buffer.sample(int(self.batch_size*(1-a))) exp_batch_obs, exp_batch_actions, exp_batch_rewards, exp_batch_next_obs, exp_batch_dones,exp_demos,exp_batch_idx = expert_batch exp_weight= np.ones_like(exp_batch_rewards) else: expert_batch = self.expert_buffer.sample(int(self.batch_size*(1-a)),beta=self.beta_schedule.value(self.num_timesteps)) exp_batch_obs, exp_batch_actions, exp_batch_rewards, exp_batch_next_obs, exp_batch_dones,exp_demos,exp_weight,exp_batch_idx = expert_batch #print(exp_batch_idx.shape) exp_batch_rewards = exp_batch_rewards.reshape(-1, 1) #self.new_ratio = self.ratio ##summ_r = np.mean(batch_rewards)>np.mean(exp_batch_rewards) #if summ_r: # self.new_ratio = min(self.new_ratio + 2/self.batch_size,0.9) #else: # self.new_ratio = max(self.new_ratio - 1/self.batch_size,0.1) exp_batch_dones = exp_batch_dones.reshape(-1, 1) exp_demos = exp_demos.reshape(-1,1) exp_weight = exp_weight exp_weight =exp_weight.reshape(-1,1) batch_obs = np.vstack((batch_obs,exp_batch_obs)) batch_actions = np.vstack((batch_actions,exp_batch_actions)) batch_rewards = np.vstack((batch_rewards,exp_batch_rewards)) batch_next_obs = np.vstack((batch_next_obs,exp_batch_next_obs)) batch_dones = np.vstack((batch_dones,exp_batch_dones)) batch_demos = np.vstack((batch_demos,exp_demos)) weight = np.vstack((weight,exp_weight)) if self.n_step: nbatch = self.replay_N_buffer.sample(batch_idx) ex_nbatch = self.expert_N_buffer.sample(exp_batch_idx) _,_, nbatch_rewards, nbatch_next_obs, nbatch_dones = nbatch nbatch_rewards = nbatch_rewards.reshape(-1, 1) nbatch_dones = nbatch_dones.reshape(-1, 1) _,_, ex_nbatch_rewards, ex_nbatch_next_obs, ex_nbatch_dones = ex_nbatch ex_nbatch_rewards = ex_nbatch_rewards.reshape(-1, 1) ex_nbatch_dones = ex_nbatch_dones.reshape(-1, 1) nbatch_rewards = np.vstack((nbatch_rewards,ex_nbatch_rewards)) nbatch_next_obs = np.vstack((nbatch_next_obs,ex_nbatch_next_obs)) nbatch_dones = np.vstack((nbatch_dones,ex_nbatch_dones)) #print(nbatch_dones.shape,ex_nbatch_dones.shape) else: if not self.prioritized_replay: batch = self.expert_buffer.sample(self.batch_size) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,batch_idx = batch weight= np.ones_like(batch_rewards) else: batch = self.expert_buffer.sample(self.batch_size,beta=self.beta_schedule.value(self.step)) batch_obs, batch_actions, batch_rewards, batch_next_obs, batch_dones,batch_demos,weight,batch_idx = batch batch_rewards = batch_rewards.reshape(-1, 1) batch_dones = batch_dones.reshape(-1, 1) batch_demos = batch_demos.reshape(-1, 1) weight = weight.reshape(-1,1) if self.n_step: nbatch = self.expert_N_buffer.sample(batch_idx) _,_, nbatch_rewards, nbatch_next_obs, nbatch_dones = nbatch nbatch_rewards = nbatch_rewards.reshape(-1, 1) nbatch_dones = nbatch_dones.reshape(-1, 1) if self.n_step: feed_dict = { self.observations_ph: batch_obs, self.actions_ph: batch_actions, self.next_observations_ph: batch_next_obs, self.weight_ph:weight, self.rewards_ph: batch_rewards, self.is_demo_ph:batch_demos, self.terminals_ph: batch_dones, self.learning_rate_ph: learning_rate, self.next_observations_ph_n: nbatch_next_obs, self.rewards_ph_n: nbatch_rewards, self.terminals_ph_n: nbatch_dones, self.is_demo_ph:batch_demos } else: feed_dict = { self.observations_ph: batch_obs, self.actions_ph: batch_actions, self.next_observations_ph: batch_next_obs, self.weight_ph:weight, self.rewards_ph: batch_rewards, self.is_demo_ph:batch_demos, self.terminals_ph: batch_dones, self.learning_rate_ph: learning_rate } # out = [policy_loss, qf1_loss, qf2_loss, # value_loss, qf1, qf2, value_fn, logp_pi, # self.entropy, policy_train_op, train_values_op] # Do one gradient step # and optionally compute log for tensorboard if writer is not None and not pretrain: out = self.sess.run([self.summary] + self.step_ops, feed_dict) summary = out.pop(0) writer.add_summary(summary, step) else: out = self.sess.run(self.step_ops, feed_dict) # Unpack to monitor losses and entropy policy_loss, qf1_loss, qf2_loss, value_loss, *values = out # qf1, qf2, value_fn, logp_pi, entropy, *_ = values entropy = values[4] actor_for_priority = values[5] value_for_priority = values[6] imitation_for_priority = values[7] actor_loss_di = values[8] #print(values[0].shape,values[2].shape) #print(actor_for_priority.shape,value_for_priority.shape) if self.prioritized_replay: if not pretrain: td = self.prioritized_replay_eps + 1*(actor_for_priority**2)[:int(self.batch_size*a),] + value_for_priority[:int(self.batch_size*a),] td_expert = self.prioritized_replay_eps + 1*(imitation_for_priority)[int(self.batch_size*a):,] + value_for_priority[int(self.batch_size*a):,] self.replay_buffer.update_priorities(batch_idx, td) self.expert_buffer.update_priorities(exp_batch_idx, td_expert) else: td = self.prioritized_replay_eps + 1*actor_for_priority**2 + value_for_priority self.expert_buffer.update_priorities(batch_idx, td) if self.log_ent_coef is not None: ent_coef_loss, ent_coef = values[-2:] return policy_loss, qf1_loss, qf2_loss, value_loss, entropy, ent_coef_loss, ent_coef,actor_loss_di,one_batch_r,exp_batch_rewards return policy_loss, qf1_loss, qf2_loss, value_loss, entropy,actor_loss_di,one_batch_r,exp_batch_rewards def learn(self, total_timesteps,pretrain_steps,mean_expert_reward, callback=None, log_interval=4, tb_log_name="SAC", reset_num_timesteps=True, replay_wrapper=None): new_tb_log = self._init_num_timesteps(reset_num_timesteps) callback = self._init_callback(callback) if replay_wrapper is not None: self.replay_buffer = replay_wrapper(self.replay_buffer) with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \ as writer: self._setup_learn() # Transform to callable if needed self.learning_rate = get_schedule_fn(self.learning_rate) self.step = 0 if pretrain_steps is not 0: self.pretrain_sac(pretrain_steps) # Initial learning rate current_lr = self.learning_rate(1) start_time = time.time() episode_rewards = [0.0] episode_successes = [] if self.action_noise is not None: self.action_noise.reset() obs = self.env.reset() # Retrieve unnormalized observation for saving into the buffer if self._vec_normalize_env is not None: obs_ = self._vec_normalize_env.get_original_obs().squeeze() n_updates = 0 buffer_n = deque(maxlen=self.n_step_length) infos_values = [] callback.on_training_start(locals(), globals()) callback.on_rollout_start() print("=====SAC Exploring=====") all_r = [] all_exp_r=[] all_r_step = [] all_exp_r_step=[] for step in range(total_timesteps): # Before training starts, randomly sample actions # from a uniform distribution for better exploration. # Afterwards, use the learned policy # if random_exploration is set to 0 (normal setting) if self.num_timesteps < self.learning_starts or np.random.rand() < self.random_exploration: # actions sampled from action space are from range specific to the environment # but algorithm operates on tanh-squashed actions therefore simple scaling is used unscaled_action = self.env.action_space.sample() action = scale_action(self.action_space, unscaled_action) else: action = self.policy_tf.step(obs[None], deterministic=False).flatten() # Add noise to the action (improve exploration, # not needed in general) if self.action_noise is not None: action = np.clip(action + self.action_noise(), -1, 1) # inferred actions need to be transformed to environment action_space before stepping unscaled_action = unscale_action(self.action_space, action) assert action.shape == self.env.action_space.shape new_obs, reward, done, info = self.env.step(unscaled_action) self.num_timesteps += 1 # Only stop training if return value is False, not when it is None. This is for backwards # compatibility with callbacks that have no return statement. if callback.on_step() is False: break # Store only the unnormalized version if self._vec_normalize_env is not None: new_obs_ = self._vec_normalize_env.get_original_obs().squeeze() reward_ = self._vec_normalize_env.get_original_reward().squeeze() else: # Avoid changing the original ones obs_, new_obs_, reward_ = obs, new_obs, reward if self.n_step: trans = (obs_, action, reward_, new_obs_, float(done)) buffer_n.append(trans) self.replay_N_buffer.add((obs_, action, reward_, new_obs_, float(done))) if len(buffer_n)==self.n_step_length: #self.replay_buffer.add(obs_, action, reward_, new_obs_, float(done),0) one_step = buffer_n[0] self.replay_buffer.add(one_step[0], one_step[1], one_step[2], one_step[3], float(one_step[4]),0) else: # Store transition in the replay buffer. self.replay_buffer.add(obs_, action, reward_, new_obs_, float(done),0) obs = new_obs # Save the unnormalized observation if self._vec_normalize_env is not None: obs_ = new_obs_ # Retrieve reward and episode length if using Monitor wrapper maybe_ep_info = info.get('episode') if maybe_ep_info is not None: self.ep_info_buf.extend([maybe_ep_info]) if writer is not None: # Write reward per episode to tensorboard ep_reward = np.array([reward_]).reshape((1, -1)) ep_done = np.array([done]).reshape((1, -1)) tf_util.total_episode_reward_logger(self.episode_reward, ep_reward, ep_done, writer, self.num_timesteps) if step % self.train_freq == 0: callback.on_rollout_end() mb_infos_vals = [] # Update policy, critics and target networks for grad_step in range(self.gradient_steps): # Break if the warmup phase is not over # or if there are not enough samples in the replay buffer if not self.replay_buffer.can_sample(self.batch_size) \ or self.num_timesteps < self.learning_starts: break n_updates += 1 # Compute current learning_rate frac = 1.0 - step / total_timesteps current_lr = self.learning_rate(frac) # Update policy and critics (q functions) infoss = self._train_step(step, writer, current_lr) all_r.append(np.sum(infoss[-2])) all_exp_r.append(np.sum(infoss[-1])) all_r_step.append(infoss[-2].shape[0]) all_exp_r_step.append(infoss[-1].shape[0]) mb_infos_vals.append(infoss[:-2]) # Update target network if (step + grad_step) % self.target_update_interval == 0: # Update target network self.sess.run(self.target_update_op) # Log losses and entropy, useful for monitor training if len(mb_infos_vals) > 0: infos_values = np.mean(mb_infos_vals, axis=0) callback.on_rollout_start() if step % self.update_buffer_interval ==0 and step>self.learning_starts: mean_agent = sum(all_r)/sum(all_r_step) mean_exp = sum(all_exp_r)/sum(all_exp_r_step) add_r = mean_agent>mean_exp-0.5 all_r = [] all_exp_r = [] all_r_step = [] all_exp_r_step = [] if add_r: self.ratio = min(self.ratio+2/self.batch_size,self.max_ratio) else: self.ratio = max(self.ratio-1/self.batch_size,self.init_ratio) print('|new-ratio:',self.ratio,'|mean-agent:',mean_agent,'|mean-exp:',mean_exp-0.5,'|') smry = tf.Summary(value=[tf.Summary.Value(tag="ratio", simple_value=self.ratio)]) writer.add_summary(smry,step) episode_rewards[-1] += reward_ if done: if self.action_noise is not None: self.action_noise.reset() if not isinstance(self.env, VecEnv): obs = self.env.reset() #if episode_rewards[-1] >= mean_expert_reward: # self.ratio = np.clip((self.ratio+1/self.batch_size),0,60/self.batch_s episode_rewards.append(0.0) maybe_is_success = info.get('is_success') if maybe_is_success is not None: episode_successes.append(float(maybe_is_success)) if len(episode_rewards[-101:-1]) == 0: mean_reward = -np.inf else: mean_reward = round(float(np.mean(episode_rewards[-101:-1])), 1) num_episodes = len(episode_rewards) # Display training infos if self.verbose >= 1 and done and log_interval is not None and len(episode_rewards) % log_interval == 0: fps = int(step / (time.time() - start_time)) logger.logkv("episodes", num_episodes) logger.logkv("mean 100 episode reward", mean_reward) if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0: logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf])) logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf])) logger.logkv("n_updates", n_updates) logger.logkv("current_lr", current_lr) logger.logkv("fps", fps) logger.logkv('time_elapsed', int(time.time() - start_time)) if len(episode_successes) > 0: logger.logkv("success rate", np.mean(episode_successes[-100:])) if len(infos_values) > 0: for (name, val) in zip(self.infos_names, infos_values): logger.logkv(name, val) logger.logkv("total timesteps", self.num_timesteps) logger.dumpkvs() # Reset infos: infos_values = [] callback.on_training_end() return self def action_probability(self, observation, state=None, mask=None, actions=None, logp=False): if actions is not None: raise ValueError("Error: SAC does not have action probabilities.") warnings.warn("Even though SAC has a Gaussian policy, it cannot return a distribution as it " "is squashed by a tanh before being scaled and outputed.") return None def predict(self, observation, state=None, mask=None, deterministic=True): observation = np.array(observation) vectorized_env = self._is_vectorized_observation(observation, self.observation_space) observation = observation.reshape((-1,) + self.observation_space.shape) actions = self.policy_tf.step(observation, deterministic=deterministic) actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape actions = unscale_action(self.action_space, actions) # scale the output for the prediction if not vectorized_env: actions = actions[0] return actions, None def get_parameter_list(self): return (self.params + self.target_params) def save(self, save_path, cloudpickle=False): data = { "learning_rate": self.learning_rate, "buffer_size": self.buffer_size, "learning_starts": self.learning_starts, "train_freq": self.train_freq, "batch_size": self.batch_size, "tau": self.tau, "ent_coef": self.ent_coef if isinstance(self.ent_coef, float) else 'auto', "target_entropy": self.target_entropy, # Should we also store the replay buffer? # this may lead to high memory usage # with all transition inside # "replay_buffer": self.replay_buffer "gamma": self.gamma, "verbose": self.verbose, "observation_space": self.observation_space, "action_space": self.action_space, "policy": self.policy, "n_envs": self.n_envs, "n_cpu_tf_sess": self.n_cpu_tf_sess, "seed": self.seed, "action_noise": self.action_noise, "random_exploration": self.random_exploration, "_vectorize_action": self._vectorize_action, "policy_kwargs": self.policy_kwargs, "prioritized_replay": self.prioritized_replay, "prioritized_replay_eps": self.prioritized_replay_eps, "prioritized_replay_alpha": self.prioritized_replay_alpha, "prioritized_replay_beta0": self.prioritized_replay_beta0, "prioritized_replay_beta_iters": self.prioritized_replay_beta_iters } params_to_save = self.get_parameters() self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
[ "tensorflow.contrib.layers.apply_regularization", "tensorflow.contrib.layers.l1_l2_regularizer", "tensorflow.control_dependencies", "tensorflow.minimum", "tensorflow.cast", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.Graph", "numpy.ones_like", "numpy.reshape", "tensorflow.stop_gradient", "tensorflow.square", "tensorflow.trainable_variables", "numpy.log", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "numpy.random.rand", "numpy.array", "numpy.sum", "tensorflow.reduce_mean", "tensorflow.assign", "tensorflow.Summary.Value", "numpy.prod", "tensorflow.variable_scope", "numpy.vstack" ]
sac.py
[(147, 'stable_baselines.common.math_util.unscale_action', 'unscale_action', (['self.action_space', 'self.deterministic_action'], {}), False, 'from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action\n'), (818, 'warnings.warn', 'warnings.warn', (['"""Even though SAC has a Gaussian policy, it cannot return a distribution as it is squashed by a tanh before being scaled and outputed."""'], {}), False, 'import warnings\n'), (824, 'numpy.array', 'np.array', (['observation'], {}), True, 'import numpy as np\n'), (830, 'stable_baselines.common.math_util.unscale_action', 'unscale_action', (['self.action_space', 'actions'], {}), False, 'from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action\n'), (156, 'stable_baselines.common.DQFD_buffers.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['obs_len'], {'alpha': 'self.prioritized_replay_alpha'}), False, 'from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer, NStepTransitionBuffer\n'), (161, 'stable_baselines.common.schedules.LinearSchedule', 'LinearSchedule', (['prioritized_replay_beta_iters'], {'initial_p': 'self.prioritized_replay_beta0', 'final_p': '(1.0)'}), False, 'from stable_baselines.common.schedules import LinearSchedule\n'), (165, 'stable_baselines.common.DQFD_buffers.ReplayBuffer', 'ReplayBuffer', (['obs_len'], {}), False, 'from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer, NStepTransitionBuffer\n'), (168, 'collections.deque', 'deque', ([], {'maxlen': 'self.n_step_length'}), False, 'from collections import deque\n'), (169, 'stable_baselines.common.DQFD_buffers.NStepTransitionBuffer', 'NStepTransitionBuffer', (['obs_len'], {'n_step': 'self.n_step_length', 'gamma': 'self.gamma'}), False, 'from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer, NStepTransitionBuffer\n'), (173, 'numpy.reshape', 'np.reshape', (['obs', '(64, 64, 3)'], {}), True, 'import numpy as np\n'), (174, 'numpy.reshape', 'np.reshape', (['obs_', '(64, 64, 3)'], {}), True, 'import numpy as np\n'), (191, 'stable_baselines.common.SetVerbosity', 'SetVerbosity', (['self.verbose'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (192, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (498, 'numpy.vstack', 'np.vstack', (['(batch_obs, exp_batch_obs)'], {}), True, 'import numpy as np\n'), (499, 'numpy.vstack', 'np.vstack', (['(batch_actions, exp_batch_actions)'], {}), True, 'import numpy as np\n'), (500, 'numpy.vstack', 'np.vstack', (['(batch_rewards, exp_batch_rewards)'], {}), True, 'import numpy as np\n'), (501, 'numpy.vstack', 'np.vstack', (['(batch_next_obs, exp_batch_next_obs)'], {}), True, 'import numpy as np\n'), (502, 'numpy.vstack', 'np.vstack', (['(batch_dones, exp_batch_dones)'], {}), True, 'import numpy as np\n'), (503, 'numpy.vstack', 'np.vstack', (['(batch_demos, exp_demos)'], {}), True, 'import numpy as np\n'), (504, 'numpy.vstack', 'np.vstack', (['(weight, exp_weight)'], {}), True, 'import numpy as np\n'), (617, 'stable_baselines.common.SetVerbosity', 'SetVerbosity', (['self.verbose'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (617, 'stable_baselines.common.TensorboardWriter', 'TensorboardWriter', (['self.graph', 'self.tensorboard_log', 'tb_log_name', 'new_tb_log'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (623, 'stable_baselines.common.schedules.get_schedule_fn', 'get_schedule_fn', (['self.learning_rate'], {}), False, 'from stable_baselines.common.schedules import get_schedule_fn\n'), (631, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (642, 'collections.deque', 'deque', ([], {'maxlen': 'self.n_step_length'}), False, 'from collections import deque\n'), (195, 'stable_baselines.common.tf_util.make_session', 'tf_util.make_session', ([], {'num_cpu': 'self.n_cpu_tf_sess', 'graph': 'self.graph'}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (433, 'stable_baselines.common.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""model"""'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (434, 'stable_baselines.common.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""target/values_fn/vf"""'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (441, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (466, 'numpy.ones_like', 'np.ones_like', (['batch_rewards'], {}), True, 'import numpy as np\n'), (480, 'numpy.ones_like', 'np.ones_like', (['exp_batch_rewards'], {}), True, 'import numpy as np\n'), (516, 'numpy.vstack', 'np.vstack', (['(nbatch_rewards, ex_nbatch_rewards)'], {}), True, 'import numpy as np\n'), (517, 'numpy.vstack', 'np.vstack', (['(nbatch_next_obs, ex_nbatch_next_obs)'], {}), True, 'import numpy as np\n'), (518, 'numpy.vstack', 'np.vstack', (['(nbatch_dones, ex_nbatch_dones)'], {}), True, 'import numpy as np\n'), (526, 'numpy.ones_like', 'np.ones_like', (['batch_rewards'], {}), True, 'import numpy as np\n'), (197, 'stable_baselines.common.DQFD_buffers.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['self.buffer_size'], {'alpha': 'self.prioritized_replay_alpha'}), False, 'from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer, NStepTransitionBuffer\n'), (202, 'stable_baselines.common.schedules.LinearSchedule', 'LinearSchedule', (['prioritized_replay_beta_iters'], {'initial_p': 'self.prioritized_replay_beta0', 'final_p': '(1.0)'}), False, 'from stable_baselines.common.schedules import LinearSchedule\n'), (206, 'stable_baselines.common.DQFD_buffers.ReplayBuffer', 'ReplayBuffer', (['self.buffer_size'], {}), False, 'from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer, NStepTransitionBuffer\n'), (210, 'stable_baselines.common.DQFD_buffers.NStepTransitionBuffer', 'NStepTransitionBuffer', (['self.buffer_size', 'self.n_step_length', 'self.gamma'], {}), False, 'from stable_baselines.common.DQFD_buffers import ReplayBuffer, PrioritizedReplayBuffer, NStepTransitionBuffer\n'), (212, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input"""'], {'reuse': '(False)'}), True, 'import tensorflow as tf\n'), (226, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""terminals"""'}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""rewards"""'}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""is_demonstrations"""'}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""importance_weight"""'}), True, 'import tensorflow as tf\n'), (230, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '((None,) + self.action_space.shape)', 'name': '"""actions"""'}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""learning_rate_ph"""'}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(False)'}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.policy_tf.entropy'], {}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target"""'], {'reuse': '(False)'}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {'reuse': '(False)'}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.minimum', 'tf.minimum', (['qf1_pi', 'qf2_pi'], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(self.rewards_ph + (1 - self.terminals_ph) * self.gamma * self.value_target)'], {}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup - qf1) ** 2)', '(1)'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((self.ent_coef * logp_pi - min_qf_pi) * self.weight_ph)'], {}), True, 'import tensorflow as tf\n'), (338, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.ent_coef * logp_pi - min_qf_pi)', '(1)'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.minimum', 'tf.minimum', (['dtm_qf1', 'dtm_qf2'], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.cast', 'tf.cast', (['((qf1 > min_q) | (qf2 > min_q))', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (352, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['im_loss1'], {}), True, 'import tensorflow as tf\n'), (353, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['im_loss1'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (354, 'tensorflow.contrib.layers.l1_l2_regularizer', 'tf.contrib.layers.l1_l2_regularizer', ([], {'scale_l1': '(0.0)', 'scale_l2': '(1e-05)', 'scope': '"""model/pi"""'}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""model/pi"""'], {}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['regularizerpi', 'all_trainable_weights_pi'], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(min_qf_pi - self.ent_coef * logp_pi)'], {}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.contrib.layers.l1_l2_regularizer', 'tf.contrib.layers.l1_l2_regularizer', ([], {'scale_l1': '(0.0)', 'scale_l2': '(1e-05)', 'scope': '"""model/values_fn"""'}), True, 'import tensorflow as tf\n'), (369, 'stable_baselines.common.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""model/values_fn"""'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (370, 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['regularizervf', 'all_trainable_weights_vf'], {}), True, 'import tensorflow as tf\n'), (379, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate_ph'}), True, 'import tensorflow as tf\n'), (383, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate_ph'}), True, 'import tensorflow as tf\n'), (384, 'stable_baselines.common.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""model/values_fn"""'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (386, 'stable_baselines.common.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""model/values_fn/vf"""'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (387, 'stable_baselines.common.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""target/values_fn/vf"""'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (419, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""policy_loss"""', 'policy_loss'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""qf1_loss"""', 'qf1_loss'], {}), True, 'import tensorflow as tf\n'), (421, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""qf2_loss"""', 'qf2_loss'], {}), True, 'import tensorflow as tf\n'), (422, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""value_loss"""', 'value_loss'], {}), True, 'import tensorflow as tf\n'), (423, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Imitation_loss"""', 'self.actor_loss_di'], {}), True, 'import tensorflow as tf\n'), (424, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""entropy"""', 'self.entropy'], {}), True, 'import tensorflow as tf\n'), (661, 'stable_baselines.common.math_util.scale_action', 'scale_action', (['self.action_space', 'unscaled_action'], {}), False, 'from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action\n'), (669, 'stable_baselines.common.math_util.unscale_action', 'unscale_action', (['self.action_space', 'action'], {}), False, 'from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action\n'), (717, 'stable_baselines.common.tf_util.total_episode_reward_logger', 'tf_util.total_episode_reward_logger', (['self.episode_reward', 'ep_reward', 'ep_done', 'writer', 'self.num_timesteps'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (793, 'stable_baselines.logger.logkv', 'logger.logkv', (['"""episodes"""', 'num_episodes'], {}), False, 'from stable_baselines import logger\n'), (794, 'stable_baselines.logger.logkv', 'logger.logkv', (['"""mean 100 episode reward"""', 'mean_reward'], {}), False, 'from stable_baselines import logger\n'), (798, 'stable_baselines.logger.logkv', 'logger.logkv', (['"""n_updates"""', 'n_updates'], {}), False, 'from stable_baselines import logger\n'), (799, 'stable_baselines.logger.logkv', 'logger.logkv', (['"""current_lr"""', 'current_lr'], {}), False, 'from stable_baselines import logger\n'), (800, 'stable_baselines.logger.logkv', 'logger.logkv', (['"""fps"""', 'fps'], {}), False, 'from stable_baselines import logger\n'), (807, 'stable_baselines.logger.logkv', 'logger.logkv', (['"""total timesteps"""', 'self.num_timesteps'], {}), False, 'from stable_baselines import logger\n'), (808, 'stable_baselines.logger.dumpkvs', 'logger.dumpkvs', ([], {}), False, 'from stable_baselines import logger\n'), (236, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""n_step_rewards"""'}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""n_step_terminals"""'}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.exp', 'tf.exp', (['self.log_ent_coef'], {}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup - qf1) ** 2 * self.weight_ph)'], {}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup - qf2) ** 2 * self.weight_ph)'], {}), True, 'import tensorflow as tf\n'), (317, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(self.rewards_ph_n + (1 - self.terminals_ph_n) * self.gamma ** self.\n n_step_length * self.value_target_n)'], {}), True, 'import tensorflow as tf\n'), (321, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup_n - qf1) ** 2)', '(1)'], {}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate_ph'}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((value_fn - v_backup) ** 2 * self.weight_ph)'], {}), True, 'import tensorflow as tf\n'), (391, 'tensorflow.assign', 'tf.assign', (['target', '((1 - self.tau) * target + self.tau * source)'], {}), True, 'import tensorflow as tf\n'), (396, 'tensorflow.assign', 'tf.assign', (['target', 'source'], {}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[policy_train_op]'], {}), True, 'import tensorflow as tf\n'), (425, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.weight_ph'], {}), True, 'import tensorflow as tf\n'), (427, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ent_coef_loss"""', 'ent_coef_loss'], {}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""ent_coef"""', 'self.ent_coef'], {}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.learning_rate_ph'], {}), True, 'import tensorflow as tf\n'), (438, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (657, 'numpy.random.rand', 'np.random.rand', ([], {}), True, 'import numpy as np\n'), (748, 'numpy.mean', 'np.mean', (['mb_infos_vals'], {'axis': '(0)'}), True, 'import numpy as np\n'), (320, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup_n - qf1) ** 2 * self.weight_ph)'], {}), True, 'import tensorflow as tf\n'), (322, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup_n - qf2) ** 2 * self.weight_ph)'], {}), True, 'import tensorflow as tf\n'), (348, 'tensorflow.square', 'tf.square', (['(self.actions_ph - self.deterministic_actions_ph)'], {}), True, 'import tensorflow as tf\n'), (380, 'stable_baselines.common.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""model/pi"""'], {}), False, 'from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter\n'), (715, 'numpy.array', 'np.array', (['[reward_]'], {}), True, 'import numpy as np\n'), (716, 'numpy.array', 'np.array', (['[done]'], {}), True, 'import numpy as np\n'), (737, 'numpy.sum', 'np.sum', (['infoss[-2]'], {}), True, 'import numpy as np\n'), (738, 'numpy.sum', 'np.sum', (['infoss[-1]'], {}), True, 'import numpy as np\n'), (787, 'numpy.mean', 'np.mean', (['episode_rewards[-101:-1]'], {}), True, 'import numpy as np\n'), (796, 'stable_baselines.common.math_util.safe_mean', 'safe_mean', (["[ep_info['r'] for ep_info in self.ep_info_buf]"], {}), False, 'from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action\n'), (797, 'stable_baselines.common.math_util.safe_mean', 'safe_mean', (["[ep_info['l'] for ep_info in self.ep_info_buf]"], {}), False, 'from stable_baselines.common.math_util import safe_mean, unscale_action, scale_action\n'), (803, 'numpy.mean', 'np.mean', (['episode_successes[-100:]'], {}), True, 'import numpy as np\n'), (806, 'stable_baselines.logger.logkv', 'logger.logkv', (['name', 'val'], {}), False, 'from stable_baselines import logger\n'), (413, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[train_values_op]'], {}), True, 'import tensorflow as tf\n'), (766, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""ratio"""', 'simple_value': 'self.ratio'}), True, 'import tensorflow as tf\n'), (792, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (801, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (266, 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), True, 'import numpy as np\n'), (283, 'numpy.log', 'np.log', (['init_value'], {}), True, 'import numpy as np\n'), (332, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(logp_pi + self.target_entropy)'], {}), True, 'import tensorflow as tf\n')]
huzongxiang/CrystalNetwork
a434f76fa4347d42b3c905852ce265cd0bcefca3
# -*- coding: utf-8 -*- """ Created on Tue Dec 7 13:26:59 2021 @author: huzongxiang source code from tensorflow_graphics """ import numpy as np import tensorflow as tf from typing import Tuple from .tensor import TensorLike def _double_factorial_loop_body(n, result, two): result = tf.where(tf.greater_equal(n, two), result * n, result) return n - two, result, two def _double_factorial_loop_condition(n, result, two): return tf.cast(tf.math.count_nonzero(tf.greater_equal(n, two)), tf.bool) def double_factorial(n: TensorLike) -> TensorLike: n = tf.convert_to_tensor(value=n) two = tf.ones_like(n) * 2 result = tf.ones_like(n) _, result, _ = tf.while_loop( cond=_double_factorial_loop_condition, body=_double_factorial_loop_body, loop_vars=[n, result, two]) return result def factorial(n: TensorLike) -> TensorLike: n = tf.convert_to_tensor(value=n) return tf.exp(tf.math.lgamma(n + 1)) def generate_l_m_permutations( max_band: int, name: str = "spherical_harmonics_generate_l_m_permutations") -> Tuple[TensorLike, TensorLike]: with tf.name_scope(name): degree_l = [] order_m = [] for degree in range(0, max_band + 1): for order in range(-degree, degree + 1): degree_l.append(degree) order_m.append(order) return (tf.convert_to_tensor(value=degree_l), tf.convert_to_tensor(value=order_m)) def _evaluate_legendre_polynomial_pmm_eval(m, x): pmm = tf.pow(1.0 - tf.pow(x, 2.0), tf.cast(m, dtype=x.dtype) / 2.0) ones = tf.ones_like(m) pmm *= tf.cast( tf.pow(-ones, m) * double_factorial(2 * m - 1), dtype=pmm.dtype) return pmm def _evaluate_legendre_polynomial_loop_cond(x, n, l, m, pmm, pmm1): return tf.cast(tf.math.count_nonzero(n <= l), tf.bool) def _evaluate_legendre_polynomial_loop_body(x, n, l, m, pmm, pmm1): n_float = tf.cast(n, dtype=x.dtype) m_float = tf.cast(m, dtype=x.dtype) pmn = (x * (2.0 * n_float - 1.0) * pmm1 - (n_float + m_float - 1) * pmm) / ( n_float - m_float) pmm = tf.where(tf.less_equal(n, l), pmm1, pmm) pmm1 = tf.where(tf.less_equal(n, l), pmn, pmm1) n += 1 return x, n, l, m, pmm, pmm1 def _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1): n = m + 2 x, n, l, m, pmm, pmm1 = tf.while_loop( cond=_evaluate_legendre_polynomial_loop_cond, body=_evaluate_legendre_polynomial_loop_body, loop_vars=[x, n, l, m, pmm, pmm1]) return pmm1 def _evaluate_legendre_polynomial_branch(l, m, x, pmm): pmm1 = x * (2.0 * tf.cast(m, dtype=x.dtype) + 1.0) * pmm # if, l == m + 1 return pmm1, otherwise lift to the next band. res = tf.where( tf.equal(l, m + 1), pmm1, _evaluate_legendre_polynomial_loop(x, m, l, pmm, pmm1)) return res def evaluate_legendre_polynomial(degree_l: TensorLike, order_m: TensorLike, x: TensorLike) -> TensorLike: degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) x = tf.convert_to_tensor(value=x) pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x) return tf.where( tf.equal(degree_l, order_m), pmm, _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm)) def _spherical_harmonics_normalization(l, m, var_type=tf.float64): l = tf.cast(l, dtype=var_type) m = tf.cast(m, dtype=var_type) numerator = (2.0 * l + 1.0) * factorial(l - tf.abs(m)) denominator = 4.0 * np.pi * factorial(l + tf.abs(m)) return tf.sqrt(numerator / denominator) def _evaluate_spherical_harmonics_branch(degree, order, theta, phi, sign_order, var_type=tf.float64): sqrt_2 = tf.constant(1.41421356237, dtype=var_type) order_float = tf.cast(order, dtype=var_type) tmp = sqrt_2 * _spherical_harmonics_normalization( degree, order, var_type) * evaluate_legendre_polynomial( degree, order, tf.cos(theta)) positive = tmp * tf.cos(order_float * phi) negative = tmp * tf.sin(order_float * phi) return tf.where(tf.greater(sign_order, 0), positive, negative) def evaluate_spherical_harmonics( degree_l: TensorLike, order_m: TensorLike, theta: TensorLike, phi: TensorLike, name: str = "spherical_harmonics_evaluate_spherical_harmonics") -> TensorLike: # pylint: disable=line-too-long with tf.name_scope(name): degree_l = tf.convert_to_tensor(value=degree_l) order_m = tf.convert_to_tensor(value=order_m) theta = tf.convert_to_tensor(value=theta) phi = tf.convert_to_tensor(value=phi) var_type = theta.dtype sign_m = tf.math.sign(order_m) order_m = tf.abs(order_m) zeros = tf.zeros_like(order_m) result_m_zero = _spherical_harmonics_normalization( degree_l, zeros, var_type) * evaluate_legendre_polynomial( degree_l, zeros, tf.cos(theta)) result_branch = _evaluate_spherical_harmonics_branch( degree_l, order_m, theta, phi, sign_m, var_type) return tf.where(tf.equal(order_m, zeros), result_m_zero, result_branch)
[ "tensorflow.convert_to_tensor", "tensorflow.math.sign", "tensorflow.cast", "tensorflow.equal", "tensorflow.math.lgamma", "tensorflow.while_loop", "tensorflow.greater", "tensorflow.name_scope", "tensorflow.pow", "tensorflow.less_equal", "tensorflow.zeros_like", "tensorflow.sin", "tensorflow.constant", "tensorflow.cos", "tensorflow.math.count_nonzero", "tensorflow.ones_like", "tensorflow.sqrt", "tensorflow.greater_equal", "tensorflow.abs" ]
matdgl/utils/spherical_harmonics.py
[(25, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'n'}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.ones_like', 'tf.ones_like', (['n'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.while_loop', 'tf.while_loop', ([], {'cond': '_double_factorial_loop_condition', 'body': '_double_factorial_loop_body', 'loop_vars': '[n, result, two]'}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'n'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.ones_like', 'tf.ones_like', (['m'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.cast', 'tf.cast', (['n'], {'dtype': 'x.dtype'}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.cast', 'tf.cast', (['m'], {'dtype': 'x.dtype'}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.while_loop', 'tf.while_loop', ([], {'cond': '_evaluate_legendre_polynomial_loop_cond', 'body': '_evaluate_legendre_polynomial_loop_body', 'loop_vars': '[x, n, l, m, pmm, pmm1]'}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'degree_l'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'order_m'}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'x'}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.cast', 'tf.cast', (['l'], {'dtype': 'var_type'}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.cast', 'tf.cast', (['m'], {'dtype': 'var_type'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.sqrt', 'tf.sqrt', (['(numerator / denominator)'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.constant', 'tf.constant', (['(1.41421356237)'], {'dtype': 'var_type'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.cast', 'tf.cast', (['order'], {'dtype': 'var_type'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.greater_equal', 'tf.greater_equal', (['n', 'two'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.ones_like', 'tf.ones_like', (['n'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.math.lgamma', 'tf.math.lgamma', (['(n + 1)'], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['(n <= l)'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.less_equal', 'tf.less_equal', (['n', 'l'], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.less_equal', 'tf.less_equal', (['n', 'l'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.equal', 'tf.equal', (['l', '(m + 1)'], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.equal', 'tf.equal', (['degree_l', 'order_m'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.cos', 'tf.cos', (['(order_float * phi)'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.sin', 'tf.sin', (['(order_float * phi)'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.greater', 'tf.greater', (['sign_order', '(0)'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'degree_l'}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'order_m'}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'theta'}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'phi'}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.math.sign', 'tf.math.sign', (['order_m'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.abs', 'tf.abs', (['order_m'], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.zeros_like', 'tf.zeros_like', (['order_m'], {}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.greater_equal', 'tf.greater_equal', (['n', 'two'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'degree_l'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'order_m'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.pow', 'tf.pow', (['x', '(2.0)'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.cast', 'tf.cast', (['m'], {'dtype': 'x.dtype'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.pow', 'tf.pow', (['(-ones)', 'm'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.cos', 'tf.cos', (['theta'], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.equal', 'tf.equal', (['order_m', 'zeros'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.abs', 'tf.abs', (['m'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.abs', 'tf.abs', (['m'], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.cos', 'tf.cos', (['theta'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.cast', 'tf.cast', (['m'], {'dtype': 'x.dtype'}), True, 'import tensorflow as tf\n')]
alvinwan/deep-q-learning
fed9d6bad6d0388fb5a9b4fd8be0db812671ce65
""" Usage: run_dqn_atari.py [options] Options: --batch-size=<size> Batch size [default: 32] --envid=<envid> Environment id [default: SpaceInvadersNoFrameskip-v4] --model=(atari|simple|fesimple|random) Model to use for training [default: atari] --num-filters=<num> Number of output filters for simple model [default: 64] --timesteps=<steps> Number of timesteps to run [default: 40000000] --restore=<store> Checkpoint to restore network from --ckpt-dir=<dir> Directory contain checkpoint files [default: ./checkpoints] --learning-starts=<start> Timestep when learning starts [default: 200000] """ import docopt import dqn import gym import time import os import os.path as osp import random import numpy as np import tensorflow as tf import tensorflow.contrib.layers as layers from atari_wrappers import * from dqn_utils import * from gym import wrappers from tensorflow.contrib.layers.python.layers import initializers def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with tf.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with tf.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = img_in out = layers.flatten(out) # stddev = 1/n, where n = number of inputs gauss_initializer = initializers.xavier_initializer(uniform=False) with tf.variable_scope("action_value"): out = layers.fully_connected( out, num_outputs=num_actions, activation_fn=tf.nn.relu, biases_initializer=None, weights_initializer=gauss_initializer, weights_regularizer=None) return out def atari_learn(env, session, num_timesteps, model, restore=None, checkpoint_dir='./checkpoints', batch_size=32, num_filters=64, learning_starts=200000): # This is just a rough estimate num_iterations = float(num_timesteps) / 4.0 learning_starts = int(learning_starts) / 4.0 lr_multiplier = 1.0 lr_schedule = PiecewiseSchedule([ (0, 1e-4 * lr_multiplier), (num_iterations / 10, 1e-4 * lr_multiplier), (num_iterations / 2, 5e-5 * lr_multiplier), ], outside_value=5e-5 * lr_multiplier) if model == 'fesimple': optimizer = dqn.OptimizerSpec( constructor=tf.train.GradientDescentOptimizer, kwargs=dict(), lr_schedule=lr_schedule ) else: optimizer = dqn.OptimizerSpec( constructor=tf.train.AdamOptimizer, kwargs=dict(epsilon=1e-4), lr_schedule=lr_schedule ) def stopping_criterion(env, t): # notice that here t is the number of steps of the wrapped env, # which is different from the number of steps in the underlying env return get_wrapper_by_name(env, "Monitor").get_total_steps() >= num_timesteps exploration_schedule = PiecewiseSchedule( [ (0, 1.0), (1e6, 0.1), (num_iterations / 2 if num_iterations > 1e6 else 1e9, 0.01), ], outside_value=0.01 ) if model == 'atari': q_func = atari_model elif model =='fesimple': q_func = simple_model_w_feat_eng else: q_func = lambda *args, **kwargs:\ simple_model(*args, num_filters=num_filters, **kwargs) save_path = dqn.learn( env, q_func=q_func, optimizer_spec=optimizer, session=session, exploration=exploration_schedule, stopping_criterion=stopping_criterion, replay_buffer_size=1000000, batch_size=batch_size, gamma=0.99, learning_starts=learning_starts, learning_freq=4, frame_history_len=4, target_update_freq=10000, grad_norm_clipping=10, restore=restore, checkpoint_dir=checkpoint_dir ) env.close() return save_path def get_available_gpus(): from tensorflow.python.client import device_lib local_device_protos = device_lib.list_local_devices() return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU'] def set_global_seeds(i): try: import tensorflow as tf except ImportError: pass else: tf.set_random_seed(i) np.random.seed(i) random.seed(i) def get_session(): tf.reset_default_graph() tf_config = tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) session = tf.Session(config=tf_config) print("AVAILABLE GPUS: ", get_available_gpus()) return session def get_env(env_id, seed): env = gym.make(env_id) set_global_seeds(seed) env.seed(seed) expt_dir = './tmp/hw3_vid_dir2/' env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True) env = wrap_deepmind(env) return env def get_custom_env(env_id, seed): env = gym.make(env_id) set_global_seeds(seed) env.seed(seed) expt_dir = './tmp/hw3_vid_dir2/' env = wrappers.Monitor(env, osp.join(expt_dir, "gym"), force=True) env = wrap_custom(env) return env def main(): arguments = docopt.docopt(__doc__) # Run training seed = 0 # Use a seed of zero (you may want to randomize the seed!) env = get_env(arguments['--envid'], seed) with get_session() as session: model = arguments['--model'].lower() num_filters = int(arguments['--num-filters']) batch_size = int(arguments['--batch-size']) print(' * [INFO] %s model (Filters: %d, Batch Size: %d)' % ( model, num_filters, batch_size)) save_path = atari_learn( env, session, num_timesteps=int(arguments['--timesteps']), num_filters=num_filters, model=model, batch_size=batch_size, restore=arguments['--restore'], checkpoint_dir=arguments['--ckpt-dir'], learning_starts=arguments['--learning-starts']) reader = tf.train.NewCheckpointReader(save_path) W = reader.get_tensor('q_func/action_value/fully_connected/weights') print('Largest entry:', np.linalg.norm(W, ord=np.inf)) print('Frobenius norm:', np.linalg.norm(W, ord='fro')) if __name__ == "__main__": main()
[ "tensorflow.contrib.layers.convolution2d", "numpy.random.seed", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.contrib.layers.python.layers.initializers.xavier_initializer", "numpy.linalg.norm", "tensorflow.set_random_seed", "tensorflow.ConfigProto", "tensorflow.contrib.layers.fully_connected", "tensorflow.reset_default_graph", "tensorflow.contrib.layers.flatten", "tensorflow.train.NewCheckpointReader", "tensorflow.Session", "tensorflow.variable_scope" ]
run_dqn_atari.py
[(139, 'dqn.learn', 'dqn.learn', (['env'], {'q_func': 'q_func', 'optimizer_spec': 'optimizer', 'session': 'session', 'exploration': 'exploration_schedule', 'stopping_criterion': 'stopping_criterion', 'replay_buffer_size': '(1000000)', 'batch_size': 'batch_size', 'gamma': '(0.99)', 'learning_starts': 'learning_starts', 'learning_freq': '(4)', 'frame_history_len': '(4)', 'target_update_freq': '(10000)', 'grad_norm_clipping': '(10)', 'restore': 'restore', 'checkpoint_dir': 'checkpoint_dir'}), False, 'import dqn\n'), (163, 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), False, 'from tensorflow.python.client import device_lib\n'), (174, 'numpy.random.seed', 'np.random.seed', (['i'], {}), True, 'import numpy as np\n'), (175, 'random.seed', 'random.seed', (['i'], {}), False, 'import random\n'), (179, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'inter_op_parallelism_threads': '(1)', 'intra_op_parallelism_threads': '(1)'}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), True, 'import tensorflow as tf\n'), (189, 'gym.make', 'gym.make', (['env_id'], {}), False, 'import gym\n'), (202, 'gym.make', 'gym.make', (['env_id'], {}), False, 'import gym\n'), (215, 'docopt.docopt', 'docopt.docopt', (['__doc__'], {}), False, 'import docopt\n'), (36, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['out'], {}), True, 'import tensorflow.contrib.layers as layers\n'), (52, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.contrib.layers.python.layers.initializers.xavier_initializer', 'initializers.xavier_initializer', ([], {'uniform': '(False)'}), False, 'from tensorflow.contrib.layers.python.layers import initializers\n'), (60, 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['out'], {}), True, 'import tensorflow.contrib.layers as layers\n'), (68, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['out'], {}), True, 'import tensorflow.contrib.layers as layers\n'), (72, 'tensorflow.contrib.layers.python.layers.initializers.xavier_initializer', 'initializers.xavier_initializer', ([], {'uniform': '(False)'}), False, 'from tensorflow.contrib.layers.python.layers import initializers\n'), (173, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['i'], {}), True, 'import tensorflow as tf\n'), (195, 'os.path.join', 'osp.join', (['expt_dir', '"""gym"""'], {}), True, 'import os.path as osp\n'), (208, 'os.path.join', 'osp.join', (['expt_dir', '"""gym"""'], {}), True, 'import os.path as osp\n'), (238, 'tensorflow.train.NewCheckpointReader', 'tf.train.NewCheckpointReader', (['save_path'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""convnet"""'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(32)', 'kernel_size': '(8)', 'stride': '(4)', 'activation_fn': 'tf.nn.relu'}), True, 'import tensorflow.contrib.layers as layers\n'), (41, 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64)', 'kernel_size': '(4)', 'stride': '(2)', 'activation_fn': 'tf.nn.relu'}), True, 'import tensorflow.contrib.layers as layers\n'), (42, 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64)', 'kernel_size': '(3)', 'stride': '(1)', 'activation_fn': 'tf.nn.relu'}), True, 'import tensorflow.contrib.layers as layers\n'), (44, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_value"""'], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': '(512)', 'activation_fn': 'tf.nn.relu'}), True, 'import tensorflow.contrib.layers as layers\n'), (46, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_actions', 'activation_fn': 'None'}), True, 'import tensorflow.contrib.layers as layers\n'), (55, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""convnet"""'], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': 'num_filters', 'kernel_size': '(8)', 'stride': '(4)', 'activation_fn': 'tf.nn.relu', 'weights_initializer': 'gauss_initializer', 'trainable': '(False)'}), True, 'import tensorflow.contrib.layers as layers\n'), (61, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_value"""'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_actions', 'activation_fn': 'None'}), True, 'import tensorflow.contrib.layers as layers\n'), (73, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_value"""'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_actions', 'activation_fn': 'tf.nn.relu', 'biases_initializer': 'None', 'weights_initializer': 'gauss_initializer', 'weights_regularizer': 'None'}), True, 'import tensorflow.contrib.layers as layers\n'), (240, 'numpy.linalg.norm', 'np.linalg.norm', (['W'], {'ord': 'np.inf'}), True, 'import numpy as np\n'), (241, 'numpy.linalg.norm', 'np.linalg.norm', (['W'], {'ord': '"""fro"""'}), True, 'import numpy as np\n')]
helloyide/Cross-stitch-Networks-for-Multi-task-Learning
c07edb758aad7e0a2eb8da82e63105eae2ef77a4
import pickle from datetime import datetime import sys import argparse import numpy as np import tensorflow as tf import tensorflow.contrib as contrib from keras.utils import to_categorical def load_data(): with open("saved_data", "rb") as file: # data is a list with length 2000 # elements are { # 'image_path': str # 'gender': 'f'/'m' # 'age_young': bool # 'embedding': ndarray with shape (128,) dtype float64 # } data = np.array(pickle.load(file)) with open("saved_data_flip", "rb") as file: data_flip = np.array(pickle.load(file)) np.random.seed(1) random_index = np.random.permutation(len(data)) test_index = random_index[:200] train_index = random_index[200:] test = np.append(data[test_index], data_flip[test_index]) train = np.append(data[train_index], data_flip[train_index]) train_X = np.array([t["embedding"] for t in train]) test_X = np.array([t["embedding"] for t in test]) n_class_1 = 2 train_y_1 = [0 if t["gender"] == 'f' else 1 for t in train] test_y_1 = [0 if t["gender"] == 'f' else 1 for t in test] n_class_2 = 2 train_y_2 = [1 if t["age_young"] else 0 for t in train] test_y_2 = [1 if t["age_young"] else 0 for t in test] # train_X: (3600, 128) # train_y: (3600, n_class) # test_X: (400, 128) # test_y: (400, n_class) train_y_1 = to_categorical(train_y_1, n_class_1) test_y_1 = to_categorical(test_y_1, n_class_1) train_y_2 = to_categorical(train_y_2, n_class_2) test_y_2 = to_categorical(test_y_2, n_class_2) return train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 def apply_cross_stitch(input1, input2): input1_reshaped = contrib.layers.flatten(input1) input2_reshaped = contrib.layers.flatten(input2) input = tf.concat((input1_reshaped, input2_reshaped), axis=1) # initialize with identity matrix cross_stitch = tf.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=tf.float32, collections=['cross_stitches', tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.initializers.identity()) output = tf.matmul(input, cross_stitch) # need to call .value to convert Dimension objects to normal value input1_shape = list(-1 if s.value is None else s.value for s in input1.shape) input2_shape = list(-1 if s.value is None else s.value for s in input2.shape) output1 = tf.reshape(output[:, :input1_reshaped.shape[1]], shape=input1_shape) output2 = tf.reshape(output[:, input1_reshaped.shape[1]:], shape=input2_shape) return output1, output2 def main(args): train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 = load_data() m = train_X.shape[0] n_output_1 = test_y_1.shape[1] n_output_2 = test_y_2.shape[1] lr = args.lr n_epoch = args.n_epoch n_batch_size = args.n_batch_size reg_lambda = args.reg_lambda keep_prob = args.keep_prob cross_stitch_enabled = args.cross_stitch_enabled with tf.variable_scope("placeholder"): X = tf.placeholder(tf.float32, (None, 128), "X") y_1 = tf.placeholder(tf.float32, (None, n_output_1), "y_1") y_2 = tf.placeholder(tf.float32, (None, n_output_2), "y_2") is_training = tf.placeholder(tf.bool, (), "is_training") with tf.variable_scope("network"): with contrib.framework.arg_scope( [contrib.layers.fully_connected], # he initialization weights_initializer=contrib.layers.variance_scaling_initializer(), # l2 regularization weights_regularizer=contrib.layers.l2_regularizer(reg_lambda), # BN normalizer_fn=contrib.layers.batch_norm, normalizer_params={ "is_training": is_training, "scale": True, "updates_collections": None } ): fc1_1 = contrib.layers.fully_connected(X, 32, scope="fc1_1") fc1_2 = contrib.layers.fully_connected(X, 32, scope="fc1_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_1"): stitch1_1, stitch1_2 = apply_cross_stitch(fc1_1, fc1_2) else: stitch1_1, stitch1_2 = fc1_1, fc1_2 fc2_1 = contrib.layers.fully_connected(stitch1_1, 32, scope="fc2_1") fc2_2 = contrib.layers.fully_connected(stitch1_2, 32, scope="fc2_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_2"): stitch2_1, stitch2_2 = apply_cross_stitch(fc2_1, fc2_2) else: stitch2_1, stitch2_2 = fc2_1, fc2_2 dropout2_1 = contrib.layers.dropout(stitch2_1, keep_prob=keep_prob, is_training=is_training, scope="dropout2_1") dropout2_2 = contrib.layers.dropout(stitch2_2, keep_prob=keep_prob, is_training=is_training, scope="dropout2_2") fc3_1 = contrib.layers.fully_connected(dropout2_1, 32, scope="fc3_1") fc3_2 = contrib.layers.fully_connected(dropout2_2, 32, scope="fc3_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_3"): stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2) else: stitch3_1, stitch3_2 = fc3_1, fc3_2 dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training, scope="dropout3_1") dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training, scope="dropout3_2") output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1") output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2") with tf.variable_scope("loss"): loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1), tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2") accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy") with tf.variable_scope("train"): global_step = tf.get_variable("global_step", shape=(), dtype=tf.int32, trainable=False) train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step) with tf.variable_scope("summary"): summary_loss_total = tf.summary.scalar("loss_total", loss_total) summary_accuracy_test = tf.summary.scalar("accuracy_test", accuracy) summary_accuracy_train = tf.summary.scalar("accuracy_train", accuracy) # standardization train_X_reshaped = train_X.reshape([train_X.shape[0], -1]) train_X_means = np.mean(train_X_reshaped, axis=0, keepdims=True) train_X_stds = np.std(train_X_reshaped, axis=0, keepdims=True) def standardization(x): x_reshaped = x.reshape([x.shape[0], -1]) result = (x_reshaped - train_X_means) / (train_X_stds + 1e-9) return result.reshape(x.shape) normalized_test_X = standardization(test_X) with tf.Session() as sess, tf.summary.FileWriter( "./tf_logs/fashion_minst_multi_task_learning/" + str(datetime.now().timestamp()), graph=tf.get_default_graph()) as f: sess.run(tf.global_variables_initializer()) # similar logic as mnist's next_batch() epoch = 0 index_in_epoch = 0 while epoch < n_epoch: for _ in range(m // n_batch_size + 1): start = index_in_epoch if start + n_batch_size > m: epoch += 1 n_rest_data = m - start train_X_batch_rest = train_X[start:m] train_y_batch_rest_1 = train_y_1[start:m] train_y_batch_rest_2 = train_y_2[start:m] # Shuffle train data perm = np.arange(m) np.random.shuffle(perm) train_X = train_X[perm] train_y_1 = train_y_1[perm] train_y_2 = train_y_2[perm] # Start next epoch start = 0 index_in_epoch = n_batch_size - n_rest_data end = index_in_epoch train_X_batch_new = train_X[start:end] train_y_batch_new_1 = train_y_1[start:end] train_y_batch_new_2 = train_y_2[start:end] # concatenate train_X_batch = np.concatenate((train_X_batch_rest, train_X_batch_new), axis=0) train_y_batch_1 = np.concatenate((train_y_batch_rest_1, train_y_batch_new_1), axis=0) train_y_batch_2 = np.concatenate((train_y_batch_rest_2, train_y_batch_new_2), axis=0) else: index_in_epoch += n_batch_size end = index_in_epoch train_X_batch = train_X[start:end] train_y_batch_1 = train_y_1[start:end] train_y_batch_2 = train_y_2[start:end] _, global_step_value, loss_total_value, summary_loss_total_value = \ sess.run([train_op, global_step, loss_total, summary_loss_total], feed_dict={X: standardization(train_X_batch), y_1: train_y_batch_1, y_2: train_y_batch_2, is_training: True}) if global_step_value % 100 == 0: accuracy_train_value, summary_accuracy_train_value = \ sess.run([accuracy, summary_accuracy_train], feed_dict={X: standardization(train_X), y_1: train_y_1, y_2: train_y_2, is_training: False}) accuracy_test_value, summary_accuracy_test_value = \ sess.run([accuracy, summary_accuracy_test], feed_dict={X: normalized_test_X, y_1: test_y_1, y_2: test_y_2, is_training: False}) print(global_step_value, epoch, loss_total_value, accuracy_train_value, accuracy_test_value) # cross_stitches = tf.get_collection("cross_stitches") # print(cross_stitches[0].eval(sess)) f.add_summary(summary_loss_total_value, global_step=global_step_value) f.add_summary(summary_accuracy_train_value, global_step=global_step_value) f.add_summary(summary_accuracy_test_value, global_step=global_step_value) def parse_args(argv): parser = argparse.ArgumentParser() parser.add_argument("--lr", type=float, help="learning rate", default=0.0003) parser.add_argument("--n_epoch", type=int, help="number of epoch", default=800) parser.add_argument("--n_batch_size", type=int, help="mini batch size", default=128) parser.add_argument("--reg_lambda", type=float, help="L2 regularization lambda", default=1e-3) parser.add_argument("--keep_prob", type=float, help="Dropout keep probability", default=0.8) parser.add_argument("--cross_stitch_enabled", type=bool, help="Use Cross Stitch or not", default=True) return parser.parse_args(argv) if __name__ == "__main__": main(parse_args(sys.argv[1:]))
[ "tensorflow.get_variable", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.concat", "tensorflow.reduce_sum", "numpy.concatenate", "tensorflow.contrib.layers.flatten", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "tensorflow.summary.scalar", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.initializers.identity", "tensorflow.get_collection", "numpy.arange", "tensorflow.divide", "numpy.std", "tensorflow.Session", "tensorflow.argmax", "tensorflow.matmul", "tensorflow.placeholder", "tensorflow.contrib.layers.dropout", "tensorflow.global_variables_initializer", "numpy.append", "numpy.array", "numpy.random.seed", "tensorflow.reshape", "numpy.random.shuffle", "tensorflow.contrib.layers.fully_connected", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.variable_scope" ]
gender_age_multi_task_learning.py
[(25, 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), True, 'import numpy as np\n'), (30, 'numpy.append', 'np.append', (['data[test_index]', 'data_flip[test_index]'], {}), True, 'import numpy as np\n'), (31, 'numpy.append', 'np.append', (['data[train_index]', 'data_flip[train_index]'], {}), True, 'import numpy as np\n'), (33, 'numpy.array', 'np.array', (["[t['embedding'] for t in train]"], {}), True, 'import numpy as np\n'), (34, 'numpy.array', 'np.array', (["[t['embedding'] for t in test]"], {}), True, 'import numpy as np\n'), (45, 'keras.utils.to_categorical', 'to_categorical', (['train_y_1', 'n_class_1'], {}), False, 'from keras.utils import to_categorical\n'), (46, 'keras.utils.to_categorical', 'to_categorical', (['test_y_1', 'n_class_1'], {}), False, 'from keras.utils import to_categorical\n'), (47, 'keras.utils.to_categorical', 'to_categorical', (['train_y_2', 'n_class_2'], {}), False, 'from keras.utils import to_categorical\n'), (48, 'keras.utils.to_categorical', 'to_categorical', (['test_y_2', 'n_class_2'], {}), False, 'from keras.utils import to_categorical\n'), (53, 'tensorflow.contrib.layers.flatten', 'contrib.layers.flatten', (['input1'], {}), True, 'import tensorflow.contrib as contrib\n'), (54, 'tensorflow.contrib.layers.flatten', 'contrib.layers.flatten', (['input2'], {}), True, 'import tensorflow.contrib as contrib\n'), (55, 'tensorflow.concat', 'tf.concat', (['(input1_reshaped, input2_reshaped)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.matmul', 'tf.matmul', (['input', 'cross_stitch'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.reshape', 'tf.reshape', (['output[:, :input1_reshaped.shape[1]]'], {'shape': 'input1_shape'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.reshape', 'tf.reshape', (['output[:, input1_reshaped.shape[1]:]'], {'shape': 'input2_shape'}), True, 'import tensorflow as tf\n'), (172, 'numpy.mean', 'np.mean', (['train_X_reshaped'], {'axis': '(0)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (173, 'numpy.std', 'np.std', (['train_X_reshaped'], {'axis': '(0)', 'keepdims': '(True)'}), True, 'import numpy as np\n'), (254, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (85, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""placeholder"""'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 128)', '"""X"""'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, n_output_1)', '"""y_1"""'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, n_output_2)', '"""y_2"""'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', '()', '"""is_training"""'], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""network"""'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""evaluation"""'], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.divide', 'tf.divide', (['(accuracy_1 + accuracy_2)', '(2.0)'], {'name': '"""accuracy"""'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""train"""'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.get_variable', 'tf.get_variable', (['"""global_step"""'], {'shape': '()', 'dtype': 'tf.int32', 'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""summary"""'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_total"""', 'loss_total'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy_test"""', 'accuracy'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy_train"""', 'accuracy'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (21, 'pickle.load', 'pickle.load', (['file'], {}), False, 'import pickle\n'), (23, 'pickle.load', 'pickle.load', (['file'], {}), False, 'import pickle\n'), (60, 'tensorflow.initializers.identity', 'tf.initializers.identity', ([], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['X', '(32)'], {'scope': '"""fc1_1"""'}), True, 'import tensorflow.contrib as contrib\n'), (107, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['X', '(32)'], {'scope': '"""fc1_2"""'}), True, 'import tensorflow.contrib as contrib\n'), (115, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['stitch1_1', '(32)'], {'scope': '"""fc2_1"""'}), True, 'import tensorflow.contrib as contrib\n'), (116, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['stitch1_2', '(32)'], {'scope': '"""fc2_2"""'}), True, 'import tensorflow.contrib as contrib\n'), (124, 'tensorflow.contrib.layers.dropout', 'contrib.layers.dropout', (['stitch2_1'], {'keep_prob': 'keep_prob', 'is_training': 'is_training', 'scope': '"""dropout2_1"""'}), True, 'import tensorflow.contrib as contrib\n'), (126, 'tensorflow.contrib.layers.dropout', 'contrib.layers.dropout', (['stitch2_2'], {'keep_prob': 'keep_prob', 'is_training': 'is_training', 'scope': '"""dropout2_2"""'}), True, 'import tensorflow.contrib as contrib\n'), (129, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['dropout2_1', '(32)'], {'scope': '"""fc3_1"""'}), True, 'import tensorflow.contrib as contrib\n'), (130, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['dropout2_2', '(32)'], {'scope': '"""fc3_2"""'}), True, 'import tensorflow.contrib as contrib\n'), (138, 'tensorflow.contrib.layers.dropout', 'contrib.layers.dropout', (['stitch3_1'], {'keep_prob': 'keep_prob', 'is_training': 'is_training', 'scope': '"""dropout3_1"""'}), True, 'import tensorflow.contrib as contrib\n'), (140, 'tensorflow.contrib.layers.dropout', 'contrib.layers.dropout', (['stitch3_2'], {'keep_prob': 'keep_prob', 'is_training': 'is_training', 'scope': '"""dropout3_2"""'}), True, 'import tensorflow.contrib as contrib\n'), (143, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['dropout3_1', 'n_output_1'], {'activation_fn': 'None', 'scope': '"""output_1"""'}), True, 'import tensorflow.contrib as contrib\n'), (144, 'tensorflow.contrib.layers.fully_connected', 'contrib.layers.fully_connected', (['dropout3_2', 'n_output_2'], {'activation_fn': 'None', 'scope': '"""output_2"""'}), True, 'import tensorflow.contrib as contrib\n'), (147, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'y_1', 'logits': 'output_1'}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'y_2', 'logits': 'output_2'}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['reg_losses'], {}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.contrib.layers.variance_scaling_initializer', 'contrib.layers.variance_scaling_initializer', ([], {}), True, 'import tensorflow.contrib as contrib\n'), (97, 'tensorflow.contrib.layers.l2_regularizer', 'contrib.layers.l2_regularizer', (['reg_lambda'], {}), True, 'import tensorflow.contrib as contrib\n'), (110, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cross_stitch_1"""'], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cross_stitch_2"""'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cross_stitch_3"""'], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.argmax', 'tf.argmax', (['output_1'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.argmax', 'tf.argmax', (['y_1'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.argmax', 'tf.argmax', (['output_2'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.argmax', 'tf.argmax', (['y_2'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (200, 'numpy.arange', 'np.arange', (['m'], {}), True, 'import numpy as np\n'), (201, 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), True, 'import numpy as np\n'), (213, 'numpy.concatenate', 'np.concatenate', (['(train_X_batch_rest, train_X_batch_new)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (214, 'numpy.concatenate', 'np.concatenate', (['(train_y_batch_rest_1, train_y_batch_new_1)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (215, 'numpy.concatenate', 'np.concatenate', (['(train_y_batch_rest_2, train_y_batch_new_2)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (183, 'datetime.datetime.now', 'datetime.now', ([], {}), False, 'from datetime import datetime\n')]
MTonyM/PReMVOS
3d01f0c6156628083a4c8441b4b57622c500e04e
# this file is copied from https://github.com/tensorflow/models/blob/master/research/deeplab/model.py # Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Provides DeepLab model definition and helper functions. DeepLab is a deep learning system for semantic image segmentation with the following features: (1) Atrous convolution to explicitly control the resolution at which feature responses are computed within Deep Convolutional Neural Networks. (2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at multiple scales with filters at multiple sampling rates and effective fields-of-views. (3) ASPP module augmented with image-level feature and batch normalization. (4) A simple yet effective decoder module to recover the object boundaries. See the following papers for more details: "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. (https://arxiv.org/abs/1802.02611) "Rethinking Atrous Convolution for Semantic Image Segmentation," Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam (https://arxiv.org/abs/1706.05587) "DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs", Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L Yuille (* equal contribution) (https://arxiv.org/abs/1606.00915) "Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs" Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L. Yuille (* equal contribution) (https://arxiv.org/abs/1412.7062) """ import tensorflow as tf from ..deeplab.core import feature_extractor slim = tf.contrib.slim _LOGITS_SCOPE_NAME = 'logits' _MERGED_LOGITS_SCOPE = 'merged_logits' _IMAGE_POOLING_SCOPE = 'image_pooling' _ASPP_SCOPE = 'aspp' _CONCAT_PROJECTION_SCOPE = 'concat_projection' _DECODER_SCOPE = 'decoder' def get_extra_layer_scopes(): """Gets the scopes for extra layers. Returns: A list of scopes for extra layers. """ return [ _LOGITS_SCOPE_NAME, _IMAGE_POOLING_SCOPE, _ASPP_SCOPE, _CONCAT_PROJECTION_SCOPE, _DECODER_SCOPE, ] def predict_labels_multi_scale(images, model_options, eval_scales=(1.0,), add_flipped_images=False): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. eval_scales: The scales to resize images for evaluation. add_flipped_images: Add flipped images for evaluation or not. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_predictions = { output: [] for output in model_options.outputs_to_num_classes } for i, image_scale in enumerate(eval_scales): with tf.variable_scope(tf.get_variable_scope(), reuse=True if i else None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( tf.reverse_v2(images, [2]), model_options=model_options, image_pyramid=[image_scale], is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = tf.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = tf.image.resize_bilinear( tf.reverse_v2(scales_to_logits_reversed[_MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], align_corners=True) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] # Compute average prediction across different scales and flipped images. predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) outputs_to_predictions[output] = tf.argmax(predictions, 3) return outputs_to_predictions def predict_labels(images, model_options, image_pyramid=None): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. image_pyramid: Input image scales for multi-scale feature extraction. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, image_pyramid=image_pyramid, is_training=False, fine_tune_batch_norm=False) predictions = {} for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = tf.image.resize_bilinear( scales_to_logits[_MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], align_corners=True) predictions[output] = tf.argmax(logits, 3) return predictions def scale_dimension(dim, scale): """Scales the input dimension. Args: dim: Input dimension (a scalar or a scalar Tensor). scale: The amount of scaling applied to the input. Returns: Scaled dimension. """ if isinstance(dim, tf.Tensor): return tf.cast((tf.to_float(dim) - 1.0) * scale + 1.0, dtype=tf.int32) else: return int((float(dim) - 1.0) * scale + 1.0) def multi_scale_logits(images, model_options, image_pyramid, weight_decay=0.0001, is_training=False, fine_tune_batch_norm=False): """Gets the logits for multi-scale inputs. The returned logits are all downsampled (due to max-pooling layers) for both training and evaluation. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. image_pyramid: Input image scales for multi-scale feature extraction. weight_decay: The weight decay for model variables. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: outputs_to_scales_to_logits: A map of maps from output_type (e.g., semantic prediction) to a dictionary of multi-scale logits names to logits. For each output_type, the dictionary has keys which correspond to the scales and values which correspond to the logits. For example, if `scales` equals [1.0, 1.5], then the keys would include 'merged_logits', 'logits_1.00' and 'logits_1.50'. Raises: ValueError: If model_options doesn't specify crop_size and its add_image_level_feature = True, since add_image_level_feature requires crop_size information. """ # Setup default values. if not image_pyramid: image_pyramid = [1.0] #if model_options.crop_size is None and model_options.add_image_level_feature: # raise ValueError( # 'Crop size must be specified for using image-level feature.') if model_options.model_variant == 'mobilenet_v2': if (model_options.atrous_rates is not None or model_options.decoder_output_stride is not None): # Output a warning and users should make sure if the setting is desired. tf.logging.warning('Our provided mobilenet_v2 checkpoint does not ' 'include ASPP and decoder modules.') crop_height = ( model_options.crop_size[0] if model_options.crop_size else tf.shape(images)[1]) crop_width = ( model_options.crop_size[1] if model_options.crop_size else tf.shape(images)[2]) # Compute the height, width for the output logits. logits_output_stride = ( model_options.decoder_output_stride or model_options.output_stride) logits_height = scale_dimension( crop_height, max(1.0, max(image_pyramid)) / logits_output_stride) logits_width = scale_dimension( crop_width, max(1.0, max(image_pyramid)) / logits_output_stride) # Compute the logits for each scale in the image pyramid. outputs_to_scales_to_logits = { k: {} for k in model_options.outputs_to_num_classes } for count, image_scale in enumerate(image_pyramid): if image_scale != 1.0: scaled_height = scale_dimension(crop_height, image_scale) scaled_width = scale_dimension(crop_width, image_scale) scaled_crop_size = [scaled_height, scaled_width] scaled_images = tf.image.resize_bilinear( images, scaled_crop_size, align_corners=True) if model_options.crop_size: scaled_images.set_shape([None, scaled_height, scaled_width, 3]) else: scaled_crop_size = model_options.crop_size scaled_images = images updated_options = model_options._replace(crop_size=scaled_crop_size) outputs_to_logits = _get_logits( scaled_images, updated_options, weight_decay=weight_decay, reuse=True if count else None, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) # Resize the logits to have the same dimension before merging. for output in sorted(outputs_to_logits): outputs_to_logits[output] = tf.image.resize_bilinear( outputs_to_logits[output], [logits_height, logits_width], align_corners=True) # Return when only one input scale. if len(image_pyramid) == 1: for output in sorted(model_options.outputs_to_num_classes): outputs_to_scales_to_logits[output][ _MERGED_LOGITS_SCOPE] = outputs_to_logits[output] return outputs_to_scales_to_logits # Save logits to the output map. for output in sorted(model_options.outputs_to_num_classes): outputs_to_scales_to_logits[output][ 'logits_%.2f' % image_scale] = outputs_to_logits[output] # Merge the logits from all the multi-scale inputs. for output in sorted(model_options.outputs_to_num_classes): # Concatenate the multi-scale logits for each output type. all_logits = [ tf.expand_dims(logits, axis=4) for logits in outputs_to_scales_to_logits[output].values() ] all_logits = tf.concat(all_logits, 4) merge_fn = ( tf.reduce_max if model_options.merge_method == 'max' else tf.reduce_mean) outputs_to_scales_to_logits[output][_MERGED_LOGITS_SCOPE] = merge_fn( all_logits, axis=4) return outputs_to_scales_to_logits def _extract_features(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Extracts features by the particular model_variant. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: concat_logits: A tensor of size [batch, feature_height, feature_width, feature_channels], where feature_height/feature_width are determined by the images height/width and output_stride. end_points: A dictionary from components of the network to the corresponding activation. """ features, end_points = feature_extractor.extract_features( images, output_stride=model_options.output_stride, multi_grid=model_options.multi_grid, model_variant=model_options.model_variant, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) if not model_options.aspp_with_batch_norm: return features, end_points else: batch_norm_params = { 'is_training': is_training and fine_tune_batch_norm, 'decay': 0.9997, 'epsilon': 1e-5, 'scale': True, } with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([slim.batch_norm], **batch_norm_params): depth = 256 branch_logits = [] if model_options.add_image_level_feature: # modified by Paul Voigtlaender if is_training: pool_height = scale_dimension(model_options.crop_size[0], 1. / model_options.output_stride) pool_width = scale_dimension(model_options.crop_size[1], 1. / model_options.output_stride) image_feature = slim.avg_pool2d( features, [pool_height, pool_width], [pool_height, pool_width], padding='VALID') else: pool_height = tf.shape(features)[1] pool_width = tf.shape(features)[2] image_feature = tf.reduce_mean(features, axis=[1,2])[:, tf.newaxis, tf.newaxis, :] image_feature = slim.conv2d( image_feature, depth, 1, scope=_IMAGE_POOLING_SCOPE) image_feature = tf.image.resize_bilinear( image_feature, [pool_height, pool_width], align_corners=True) if is_training: image_feature.set_shape([None, pool_height, pool_width, depth]) branch_logits.append(image_feature) # Employ a 1x1 convolution. branch_logits.append(slim.conv2d(features, depth, 1, scope=_ASPP_SCOPE + str(0))) if model_options.atrous_rates: # Employ 3x3 convolutions with different atrous rates. for i, rate in enumerate(model_options.atrous_rates, 1): scope = _ASPP_SCOPE + str(i) if model_options.aspp_with_separable_conv: aspp_features = _split_separable_conv2d( features, filters=depth, rate=rate, weight_decay=weight_decay, scope=scope) else: aspp_features = slim.conv2d( features, depth, 3, rate=rate, scope=scope) branch_logits.append(aspp_features) # Merge branch logits. concat_logits = tf.concat(branch_logits, 3) concat_logits = slim.conv2d( concat_logits, depth, 1, scope=_CONCAT_PROJECTION_SCOPE) concat_logits = slim.dropout( concat_logits, keep_prob=0.9, is_training=is_training, scope=_CONCAT_PROJECTION_SCOPE + '_dropout') return concat_logits, end_points def _get_logits(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Gets the logits by atrous/image spatial pyramid pooling. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: outputs_to_logits: A map from output_type to logits. """ features, end_points = _extract_features( images, model_options, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) if model_options.decoder_output_stride is not None: if is_training: decoder_height = scale_dimension(model_options.crop_size[0], 1.0 / model_options.decoder_output_stride) decoder_width = scale_dimension(model_options.crop_size[1], 1.0 / model_options.decoder_output_stride) else: decoder_height = scale_dimension(tf.shape(images)[1], 1.0 / model_options.decoder_output_stride) decoder_width = scale_dimension(tf.shape(images)[2], 1.0 / model_options.decoder_output_stride) features = refine_by_decoder( features, end_points, decoder_height=decoder_height, decoder_width=decoder_width, decoder_use_separable_conv=model_options.decoder_use_separable_conv, model_variant=model_options.model_variant, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) outputs_to_logits = {} for output in sorted(model_options.outputs_to_num_classes): outputs_to_logits[output] = _get_branch_logits( features, model_options.outputs_to_num_classes[output], model_options.atrous_rates, aspp_with_batch_norm=model_options.aspp_with_batch_norm, kernel_size=model_options.logits_kernel_size, weight_decay=weight_decay, reuse=reuse, scope_suffix=output) return outputs_to_logits def refine_by_decoder(features, end_points, decoder_height, decoder_width, decoder_use_separable_conv=False, model_variant=None, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False): """Adds the decoder to obtain sharper segmentation results. Args: features: A tensor of size [batch, features_height, features_width, features_channels]. end_points: A dictionary from components of the network to the corresponding activation. decoder_height: The height of decoder feature maps. decoder_width: The width of decoder feature maps. decoder_use_separable_conv: Employ separable convolution for decoder or not. model_variant: Model variant for feature extraction. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. Returns: Decoder output with size [batch, decoder_height, decoder_width, decoder_channels]. """ batch_norm_params = { 'is_training': is_training and fine_tune_batch_norm, 'decay': 0.9997, 'epsilon': 1e-5, 'scale': True, } with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with tf.variable_scope(_DECODER_SCOPE, _DECODER_SCOPE, [features]): feature_list = feature_extractor.networks_to_feature_maps[ model_variant][feature_extractor.DECODER_END_POINTS] if feature_list is None: tf.logging.info('Not found any decoder end points.') return features else: decoder_features = features for i, name in enumerate(feature_list): decoder_features_list = [decoder_features] feature_name = '{}/{}'.format( feature_extractor.name_scope[model_variant], name) decoder_features_list.append( slim.conv2d( # end_points["refinement_net/" + feature_name], end_points[feature_name], 48, 1, scope='feature_projection' + str(i))) # Resize to decoder_height/decoder_width. for j, feature in enumerate(decoder_features_list): decoder_features_list[j] = tf.image.resize_bilinear( feature, [decoder_height, decoder_width], align_corners=True) if is_training: decoder_features_list[j].set_shape( [None, decoder_height, decoder_width, None]) decoder_depth = 256 if decoder_use_separable_conv: decoder_features = _split_separable_conv2d( tf.concat(decoder_features_list, 3), filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv0') decoder_features = _split_separable_conv2d( decoder_features, filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv1') else: num_convs = 2 decoder_features = slim.repeat( tf.concat(decoder_features_list, 3), num_convs, slim.conv2d, decoder_depth, 3, scope='decoder_conv' + str(i)) return decoder_features def _get_branch_logits(features, num_classes, atrous_rates=None, aspp_with_batch_norm=False, kernel_size=1, weight_decay=0.0001, reuse=None, scope_suffix=''): """Gets the logits from each model's branch. The underlying model is branched out in the last layer when atrous spatial pyramid pooling is employed, and all branches are sum-merged to form the final logits. Args: features: A float tensor of shape [batch, height, width, channels]. num_classes: Number of classes to predict. atrous_rates: A list of atrous convolution rates for last layer. aspp_with_batch_norm: Use batch normalization layers for ASPP. kernel_size: Kernel size for convolution. weight_decay: Weight decay for the model variables. reuse: Reuse model variables or not. scope_suffix: Scope suffix for the model variables. Returns: Merged logits with shape [batch, height, width, num_classes]. Raises: ValueError: Upon invalid input kernel_size value. """ # When using batch normalization with ASPP, ASPP has been applied before # in _extract_features, and thus we simply apply 1x1 convolution here. if aspp_with_batch_norm or atrous_rates is None: if kernel_size != 1: raise ValueError('Kernel size must be 1 when atrous_rates is None or ' 'using aspp_with_batch_norm. Gets %d.' % kernel_size) atrous_rates = [1] with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.01), reuse=reuse): with tf.variable_scope(_LOGITS_SCOPE_NAME, _LOGITS_SCOPE_NAME, [features]): branch_logits = [] for i, rate in enumerate(atrous_rates): scope = scope_suffix if i: scope += '_%d' % i branch_logits.append( slim.conv2d( features, num_classes, kernel_size=kernel_size, rate=rate, activation_fn=None, normalizer_fn=None, scope=scope)) return tf.add_n(branch_logits) def _split_separable_conv2d(inputs, filters, rate=1, weight_decay=0.00004, depthwise_weights_initializer_stddev=0.33, pointwise_weights_initializer_stddev=0.06, scope=None): """Splits a separable conv2d into depthwise and pointwise conv2d. This operation differs from `tf.layers.separable_conv2d` as this operation applies activation function between depthwise and pointwise conv2d. Args: inputs: Input tensor with shape [batch, height, width, channels]. filters: Number of filters in the 1x1 pointwise convolution. rate: Atrous convolution rate for the depthwise convolution. weight_decay: The weight decay to use for regularizing the model. depthwise_weights_initializer_stddev: The standard deviation of the truncated normal weight initializer for depthwise convolution. pointwise_weights_initializer_stddev: The standard deviation of the truncated normal weight initializer for pointwise convolution. scope: Optional scope for the operation. Returns: Computed features after split separable conv2d. """ outputs = slim.separable_conv2d( inputs, None, 3, depth_multiplier=1, rate=rate, weights_initializer=tf.truncated_normal_initializer( stddev=depthwise_weights_initializer_stddev), weights_regularizer=None, scope=scope + '_depthwise') return slim.conv2d( outputs, filters, 1, weights_initializer=tf.truncated_normal_initializer( stddev=pointwise_weights_initializer_stddev), weights_regularizer=slim.l2_regularizer(weight_decay), scope=scope + '_pointwise')
[ "tensorflow.image.resize_bilinear", "tensorflow.logging.warning", "tensorflow.concat", "tensorflow.nn.softmax", "tensorflow.shape", "tensorflow.reduce_mean", "tensorflow.expand_dims", "tensorflow.truncated_normal_initializer", "tensorflow.logging.info", "tensorflow.to_float", "tensorflow.reverse_v2", "tensorflow.variable_scope", "tensorflow.argmax", "tensorflow.get_variable_scope", "tensorflow.add_n" ]
code/refinement_net/network/deeplab/model.py
[(147, 'tensorflow.argmax', 'tf.argmax', (['predictions', '(3)'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.argmax', 'tf.argmax', (['logits', '(3)'], {}), True, 'import tensorflow as tf\n'), (318, 'tensorflow.concat', 'tf.concat', (['all_logits', '(4)'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.concat', 'tf.concat', (['predictions', '(4)'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.logging.warning', 'tf.logging.warning', (['"""Our provided mobilenet_v2 checkpoint does not include ASPP and decoder modules."""'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['images', 'scaled_crop_size'], {'align_corners': '(True)'}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['outputs_to_logits[output]', '[logits_height, logits_width]'], {'align_corners': '(True)'}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.expand_dims', 'tf.expand_dims', (['logits'], {'axis': '(4)'}), True, 'import tensorflow as tf\n'), (644, 'tensorflow.variable_scope', 'tf.variable_scope', (['_LOGITS_SCOPE_NAME', '_LOGITS_SCOPE_NAME', '[features]'], {}), True, 'import tensorflow as tf\n'), (661, 'tensorflow.add_n', 'tf.add_n', (['branch_logits'], {}), True, 'import tensorflow as tf\n'), (696, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'depthwise_weights_initializer_stddev'}), True, 'import tensorflow as tf\n'), (704, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'pointwise_weights_initializer_stddev'}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.concat', 'tf.concat', (['branch_logits', '(3)'], {}), True, 'import tensorflow as tf\n'), (549, 'tensorflow.variable_scope', 'tf.variable_scope', (['_DECODER_SCOPE', '_DECODER_SCOPE', '[features]'], {}), True, 'import tensorflow as tf\n'), (642, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.reverse_v2', 'tf.reverse_v2', (['images', '[2]'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.reverse_v2', 'tf.reverse_v2', (['scales_to_logits_reversed[_MERGED_LOGITS_SCOPE]', '[2]'], {}), True, 'import tensorflow as tf\n'), (399, 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image_feature', '[pool_height, pool_width]'], {'align_corners': '(True)'}), True, 'import tensorflow as tf\n'), (472, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (474, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (553, 'tensorflow.logging.info', 'tf.logging.info', (['"""Not found any decoder end points."""'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits_reversed'], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.to_float', 'tf.to_float', (['dim'], {}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.shape', 'tf.shape', (['features'], {}), True, 'import tensorflow as tf\n'), (395, 'tensorflow.shape', 'tf.shape', (['features'], {}), True, 'import tensorflow as tf\n'), (396, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['features'], {'axis': '[1, 2]'}), True, 'import tensorflow as tf\n'), (570, 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['feature', '[decoder_height, decoder_width]'], {'align_corners': '(True)'}), True, 'import tensorflow as tf\n'), (578, 'tensorflow.concat', 'tf.concat', (['decoder_features_list', '(3)'], {}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.concat', 'tf.concat', (['decoder_features_list', '(3)'], {}), True, 'import tensorflow as tf\n')]
Naereen/MetaLearningGP
f2b7bdea594b31ad3046d910e6e41e2c9ff3e0fc
# Copyright 2016 Valentine Svensson, James Hensman, alexggmatthews # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from gpflow import settings, mean_functions from gpflow.decors import name_scope from gpflow.dispatch import conditional, sample_conditional from gpflow.expectations import expectation from gpflow.features import Kuu, Kuf, InducingPoints, InducingFeature from gpflow.kernels import Kernel, Combination from gpflow.probability_distributions import Gaussian logger = settings.logger() # ---------------------------------------------------------------------------- ############################### CONDITIONAL ################################## # ---------------------------------------------------------------------------- @conditional.register(object, InducingFeature, Kernel, object) @name_scope("conditional") def _conditional(Xnew, feat, kern, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): """ Single-output GP conditional. The covariance matrices used to calculate the conditional have the following shape: - Kuu: M x M - Kuf: M x N - Kff: N or N x N Further reference ----------------- - See `gpflow.conditionals._conditional` (below) for a detailed explanation of conditional in the single-output case. - See the multiouput notebook for more information about the multiouput framework. Parameters ---------- :param Xnew: data matrix, size N x D. :param f: data matrix, M x R :param full_cov: return the covariance between the datapoints :param full_output_cov: return the covariance between the outputs. Note: as we are using a single-output kernel with repetitions these covariances will be zero. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size M x R or R x M x M. :param white: boolean of whether to use the whitened representation :return: - mean: N x R - variance: N x R, R x N x N, N x R x R or N x R x N x R Please see `gpflow.conditional._expand_independent_outputs` for more information about the shape of the variance, depending on `full_cov` and `full_output_cov`. """ logger.debug("Conditional: Inducing Feature - Kernel") Kmm = Kuu(feat, kern, jitter=settings.numerics.jitter_level) # M x M Kmn = Kuf(feat, kern, Xnew) # M x N Knn = kern.K(Xnew) if full_cov else kern.Kdiag(Xnew) fmean, fvar = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) # N x R, R x N x N or N x R return fmean, _expand_independent_outputs(fvar, full_cov, full_output_cov) @conditional.register(object, object, Kernel, object) @name_scope("conditional") def _conditional(Xnew, X, kern, f, *, full_cov=False, q_sqrt=None, white=False): """ Given f, representing the GP at the points X, produce the mean and (co-)variance of the GP at the points Xnew. Additionally, there may be Gaussian uncertainty about f as represented by q_sqrt. In this case `f` represents the mean of the distribution and q_sqrt the square-root of the covariance. Additionally, the GP may have been centered (whitened) so that p(v) = N(0, I) f = L v thus p(f) = N(0, LL^T) = N(0, K). In this case `f` represents the values taken by v. The method can either return the diagonals of the covariance matrix for each output (default) or the full covariance matrix (full_cov=True). We assume R independent GPs, represented by the columns of f (and the first dimension of q_sqrt). :param Xnew: data matrix, size N x D. Evaluate the GP at these new points :param X: data points, size M x D. :param kern: GPflow kernel. :param f: data matrix, M x R, representing the function values at X, for K functions. :param q_sqrt: matrix of standard-deviations or Cholesky matrices, size M x R or R x M x M. :param white: boolean of whether to use the whitened representation as described above. :return: - mean: N x R - variance: N x R (full_cov = False), R x N x N (full_cov = True) """ logger.debug("Conditional: Kernel") num_data = tf.shape(X)[0] # M Kmm = kern.K(X) + tf.eye(num_data, dtype=settings.float_type) * settings.numerics.jitter_level Kmn = kern.K(X, Xnew) if full_cov: Knn = kern.K(Xnew) else: Knn = kern.Kdiag(Xnew) mean, var = base_conditional(Kmn, Kmm, Knn, f, full_cov=full_cov, q_sqrt=q_sqrt, white=white) return mean, var # N x R, N x R or R x N x N # ---------------------------------------------------------------------------- ############################ SAMPLE CONDITIONAL ############################## # ---------------------------------------------------------------------------- @sample_conditional.register(object, InducingFeature, Kernel, object) @name_scope("sample_conditional") def _sample_conditional(Xnew, feat, kern, f, *, full_output_cov=False, q_sqrt=None, white=False): """ `sample_conditional` will return a sample from the conditional distribution. In most cases this means calculating the conditional mean m and variance v and then returning m + sqrt(v) * eps, with eps ~ N(0, 1). However, for some combinations of Mok and Mof more efficient sampling routines exists. The dispatcher will make sure that we use the most efficient one. :return: N x P (full_output_cov = False) or N x P x P (full_output_cov = True) """ logger.debug("sample conditional: InducingFeature Kernel") mean, var = conditional(Xnew, feat, kern, f, full_cov=False, full_output_cov=full_output_cov, q_sqrt=q_sqrt, white=white) # N x P, N x P (x P) cov_structure = "full" if full_output_cov else "diag" return _sample_mvn(mean, var, cov_structure) @sample_conditional.register(object, object, Kernel, object) @name_scope("sample_conditional") def _sample_conditional(Xnew, X, kern, f, *, q_sqrt=None, white=False): logger.debug("sample conditional: Kernel") mean, var = conditional(Xnew, X, kern, f, q_sqrt=q_sqrt, white=white, full_cov=False) # N x P, N x P return _sample_mvn(mean, var, "diag") # N x P # ---------------------------------------------------------------------------- ############################# CONDITIONAL MATHS ############################## # ---------------------------------------------------------------------------- @name_scope() def base_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, q_sqrt=None, white=False): """ Given a g1 and g2, and distribution p and q such that p(g2) = N(g2;0,Kmm) p(g1) = N(g1;0,Knn) p(g1|g2) = N(g1;0,Knm) And q(g2) = N(g2;f,q_sqrt*q_sqrt^T) This method computes the mean and (co)variance of q(g1) = \int q(g2) p(g1|g2) :param Kmn: M x N :param Kmm: M x M :param Knn: N x N or N :param f: M x R :param full_cov: bool :param q_sqrt: None or R x M x M (lower triangular) :param white: bool :return: N x R or R x N x N """ logger.debug("base conditional") # compute kernel stuff num_func = tf.shape(f)[1] # R Lm = tf.cholesky(Kmm) # Compute the projection matrix A A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False) # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N elif q_sqrt.get_shape().ndims == 3: L = tf.matrix_band_part(q_sqrt, -1, 0) # R x M x M A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1])) LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N else: # pragma: no cover raise ValueError("Bad dimension for q_sqrt: %s" % str(q_sqrt.get_shape().ndims)) if full_cov: fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # R x N x N else: fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # R x N if not full_cov: fvar = tf.transpose(fvar) # N x R return fmean, fvar # N x R, R x N x N or N x R # ---------------------------------------------------------------------------- ############################ UNCERTAIN CONDITIONAL ########################### # ---------------------------------------------------------------------------- @name_scope() def uncertain_conditional(Xnew_mu, Xnew_var, feat, kern, q_mu, q_sqrt, *, Luu=None, mean_function=None, full_output_cov=False, full_cov=False, white=False): """ Calculates the conditional for uncertain inputs Xnew, p(Xnew) = N(Xnew_mu, Xnew_var). See ``conditional`` documentation for further reference. :param Xnew_mu: mean of the inputs, size N x Din :param Xnew_var: covariance matrix of the inputs, size N x Din x Din :param feat: gpflow.InducingFeature object, only InducingPoints is supported :param kern: gpflow kernel or ekernel object. :param q_mu: mean inducing points, size M x Dout :param q_sqrt: cholesky of the covariance matrix of the inducing points, size Dout x M x M :param full_output_cov: boolean wheter to compute covariance between output dimension. Influences the shape of return value ``fvar``. Default is False :param white: boolean whether to use whitened representation. Default is False. :return fmean, fvar: mean and covariance of the conditional, size ``fmean`` is N x Dout, size ``fvar`` depends on ``full_output_cov``: if True ``f_var`` is N x Dout x Dout, if False then ``f_var`` is N x Dout """ # TODO(VD): Tensorflow 1.7 doesn't support broadcasting in``tf.matmul`` and # ``tf.matrix_triangular_solve``. This is reported in issue 216. # As a temporary workaround, we are using ``tf.einsum`` for the matrix # multiplications and tiling in the triangular solves. # The code that should be used once the bug is resolved is added in comments. if not isinstance(feat, InducingPoints): raise NotImplementedError if full_cov: # TODO(VD): ``full_cov`` True would return a ``fvar`` of shape N x N x D x D, # encoding the covariance between input datapoints as well. # This is not implemented as this feature is only used for plotting purposes. raise NotImplementedError pXnew = Gaussian(Xnew_mu, Xnew_var) num_data = tf.shape(Xnew_mu)[0] # number of new inputs (N) num_ind = tf.shape(q_mu)[0] # number of inducing points (M) num_func = tf.shape(q_mu)[1] # output dimension (D) q_sqrt_r = tf.matrix_band_part(q_sqrt, -1, 0) # D x M x M eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1) if Luu is None: Kuu = feat.Kuu(kern, jitter=settings.numerics.jitter_level) # M x M Luu = tf.cholesky(Kuu) # M x M if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True) Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu), lower=True) # N x M x M cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M if mean_function is None or isinstance(mean_function, mean_functions.Zero): e_related_to_mean = tf.zeros((num_data, num_func, num_func), dtype=settings.float_type) else: # Update mean: \mu(x) + m(x) fmean = fmean + expectation(pXnew, mean_function) # Calculate: m(x) m(x)^T + m(x) \mu(x)^T + \mu(x) m(x)^T, # where m(x) is the mean_function and \mu(x) is fmean e_mean_mean = expectation(pXnew, mean_function, mean_function) # N x D x D Lit_q_mu = tf.matrix_triangular_solve(Luu, q_mu, adjoint=True) e_mean_Kuf = expectation(pXnew, mean_function, (kern, feat)) # N x D x M # einsum isn't able to infer the rank of e_mean_Kuf, hence we explicitly set the rank of the tensor: e_mean_Kuf = tf.reshape(e_mean_Kuf, [num_data, num_func, num_ind]) e_fmean_mean = tf.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D e_related_to_mean = e_fmean_mean + tf.matrix_transpose(e_fmean_mean) + e_mean_mean if full_output_cov: fvar = ( tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) + tf.matrix_diag(tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) + # tf.matrix_diag(tf.trace(tf.matmul(Li_eKuffu_Lit, cov))) + tf.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) - # tf.matmul(q_mu, tf.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) - fmean[:, :, None] * fmean[:, None, :] + e_related_to_mean ) else: fvar = ( (eKff - tf.trace(Li_eKuffu_Lit))[:, None] + tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) + tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) - fmean ** 2 + tf.matrix_diag_part(e_related_to_mean) ) return fmean, fvar # --------------------------------------------------------------- ########################## HELPERS ############################## # --------------------------------------------------------------- def _sample_mvn(mean, cov, cov_structure): """ Returns a sample from a D-dimensional Multivariate Normal distribution :param mean: N x D :param cov: N x D or N x D x D :param cov_structure: "diag" or "full" - "diag": cov holds the diagonal elements of the covariance matrix - "full": cov holds the full covariance matrix (without jitter) :return: sample from the MVN of shape N x D """ eps = tf.random_normal(tf.shape(mean), dtype=settings.float_type) # N x P if cov_structure == "diag": sample = mean + tf.sqrt(cov) * eps # N x P elif cov_structure == "full": cov = cov + (tf.eye(tf.shape(mean)[1], dtype=settings.float_type) * settings.numerics.jitter_level)[None, ...] # N x P x P chol = tf.cholesky(cov) # N x P x P return mean + (tf.matmul(chol, eps[..., None])[..., 0]) # N x P else: raise NotImplementedError # pragma: no cover return sample # N x P def _expand_independent_outputs(fvar, full_cov, full_output_cov): """ Reshapes fvar to the correct shape, specified by `full_cov` and `full_output_cov`. :param fvar: has shape N x P (full_cov = False) or P x N x N (full_cov = True). :return: 1. full_cov: True and full_output_cov: True fvar N x P x N x P 2. full_cov: True and full_output_cov: False fvar P x N x N 3. full_cov: False and full_output_cov: True fvar N x P x P 4. full_cov: False and full_output_cov: False fvar N x P """ if full_cov and full_output_cov: fvar = tf.matrix_diag(tf.transpose(fvar)) # N x N x P x P fvar = tf.transpose(fvar, [0, 2, 1, 3]) # N x P x N x P if not full_cov and full_output_cov: fvar = tf.matrix_diag(fvar) # N x P x P if full_cov and not full_output_cov: pass # P x N x N if not full_cov and not full_output_cov: pass # N x P return fvar
[ "tensorflow.matrix_band_part", "tensorflow.zeros", "tensorflow.matrix_diag_part", "tensorflow.stack", "tensorflow.trace", "tensorflow.cholesky", "tensorflow.matrix_triangular_solve", "tensorflow.square", "tensorflow.matrix_diag", "tensorflow.matrix_transpose", "tensorflow.tile", "tensorflow.matmul", "tensorflow.shape", "tensorflow.transpose", "tensorflow.reshape", "tensorflow.eye", "tensorflow.expand_dims", "tensorflow.einsum", "tensorflow.sqrt" ]
gpflow_mod/conditionals.py
[(27, 'gpflow.settings.logger', 'settings.logger', ([], {}), False, 'from gpflow import settings, mean_functions\n'), (34, 'gpflow.dispatch.conditional.register', 'conditional.register', (['object', 'InducingFeature', 'Kernel', 'object'], {}), False, 'from gpflow.dispatch import conditional, sample_conditional\n'), (35, 'gpflow.decors.name_scope', 'name_scope', (['"""conditional"""'], {}), False, 'from gpflow.decors import name_scope\n'), (74, 'gpflow.dispatch.conditional.register', 'conditional.register', (['object', 'object', 'Kernel', 'object'], {}), False, 'from gpflow.dispatch import conditional, sample_conditional\n'), (75, 'gpflow.decors.name_scope', 'name_scope', (['"""conditional"""'], {}), False, 'from gpflow.decors import name_scope\n'), (124, 'gpflow.dispatch.sample_conditional.register', 'sample_conditional.register', (['object', 'InducingFeature', 'Kernel', 'object'], {}), False, 'from gpflow.dispatch import conditional, sample_conditional\n'), (125, 'gpflow.decors.name_scope', 'name_scope', (['"""sample_conditional"""'], {}), False, 'from gpflow.decors import name_scope\n'), (142, 'gpflow.dispatch.sample_conditional.register', 'sample_conditional.register', (['object', 'object', 'Kernel', 'object'], {}), False, 'from gpflow.dispatch import conditional, sample_conditional\n'), (143, 'gpflow.decors.name_scope', 'name_scope', (['"""sample_conditional"""'], {}), False, 'from gpflow.decors import name_scope\n'), (154, 'gpflow.decors.name_scope', 'name_scope', ([], {}), False, 'from gpflow.decors import name_scope\n'), (222, 'gpflow.decors.name_scope', 'name_scope', ([], {}), False, 'from gpflow.decors import name_scope\n'), (66, 'gpflow.features.Kuf', 'Kuf', (['feat', 'kern', 'Xnew'], {}), False, 'from gpflow.features import Kuu, Kuf, InducingPoints, InducingFeature\n'), (136, 'gpflow.dispatch.conditional', 'conditional', (['Xnew', 'feat', 'kern', 'f'], {'full_cov': '(False)', 'full_output_cov': 'full_output_cov', 'q_sqrt': 'q_sqrt', 'white': 'white'}), False, 'from gpflow.dispatch import conditional, sample_conditional\n'), (146, 'gpflow.dispatch.conditional', 'conditional', (['Xnew', 'X', 'kern', 'f'], {'q_sqrt': 'q_sqrt', 'white': 'white', 'full_cov': '(False)'}), False, 'from gpflow.dispatch import conditional, sample_conditional\n'), (177, 'tensorflow.cholesky', 'tf.cholesky', (['Kmm'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Lm', 'Kmn'], {'lower': '(True)'}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.matmul', 'tf.matmul', (['A', 'f'], {'transpose_a': '(True)'}), True, 'import tensorflow as tf\n'), (257, 'gpflow.probability_distributions.Gaussian', 'Gaussian', (['Xnew_mu', 'Xnew_var'], {}), False, 'from gpflow.probability_distributions import Gaussian\n'), (263, 'tensorflow.matrix_band_part', 'tf.matrix_band_part', (['q_sqrt', '(-1)', '(0)'], {}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Luu', 'eKuf'], {'lower': '(True)'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.matmul', 'tf.matmul', (['Li_eKuf', 'q_mu'], {'transpose_a': '(True)'}), True, 'import tensorflow as tf\n'), (278, 'gpflow.expectations.expectation', 'expectation', (['pXnew', 'kern'], {}), False, 'from gpflow.expectations import expectation\n'), (279, 'gpflow.expectations.expectation', 'expectation', (['pXnew', '(kern, feat)', '(kern, feat)'], {}), False, 'from gpflow.expectations import expectation\n'), (280, 'tensorflow.tile', 'tf.tile', (['Luu[(None), :, :]', '[num_data, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Luu_tiled', 'eKuffu'], {'lower': '(True)'}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.matmul', 'tf.matmul', (['q_sqrt_r', 'q_sqrt_r'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.shape', 'tf.shape', (['X'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.shape', 'tf.shape', (['f'], {}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.tile', 'tf.tile', (['fvar[(None), :, :]', '[num_func, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.tile', 'tf.tile', (['fvar[(None), :]', '[num_func, 1]'], {}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.transpose', 'tf.transpose', (['fvar'], {}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.shape', 'tf.shape', (['Xnew_mu'], {}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.shape', 'tf.shape', (['q_mu'], {}), True, 'import tensorflow as tf\n'), (265, 'gpflow.expectations.expectation', 'expectation', (['pXnew', '(kern, feat)'], {}), False, 'from gpflow.expectations import expectation\n'), (268, 'tensorflow.cholesky', 'tf.cholesky', (['Kuu'], {}), True, 'import tensorflow as tf\n'), (271, 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Luu', 'q_mu'], {'lower': '(True)'}), True, 'import tensorflow as tf\n'), (272, 'tensorflow.tile', 'tf.tile', (['Luu[(None), :, :]', '[num_func, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Luu_tiled', 'q_sqrt_r'], {'lower': '(True)'}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.matrix_transpose', 'tf.matrix_transpose', (['Li_eKuffu'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.zeros', 'tf.zeros', (['(num_data, num_func, num_func)'], {'dtype': 'settings.float_type'}), True, 'import tensorflow as tf\n'), (293, 'gpflow.expectations.expectation', 'expectation', (['pXnew', 'mean_function', 'mean_function'], {}), False, 'from gpflow.expectations import expectation\n'), (294, 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Luu', 'q_mu'], {'adjoint': '(True)'}), True, 'import tensorflow as tf\n'), (295, 'gpflow.expectations.expectation', 'expectation', (['pXnew', 'mean_function', '(kern, feat)'], {}), False, 'from gpflow.expectations import expectation\n'), (297, 'tensorflow.reshape', 'tf.reshape', (['e_mean_Kuf', '[num_data, num_func, num_ind]'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.einsum', 'tf.einsum', (['"""nqm,mz->nqz"""', 'e_mean_Kuf', 'Lit_q_mu'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.shape', 'tf.shape', (['mean'], {}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.transpose', 'tf.transpose', (['fvar', '[0, 2, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (367, 'tensorflow.matrix_diag', 'tf.matrix_diag', (['fvar'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.eye', 'tf.eye', (['num_data'], {'dtype': 'settings.float_type'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.matmul', 'tf.matmul', (['A', 'A'], {'transpose_a': '(True)'}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.transpose', 'tf.transpose', (['Lm'], {}), True, 'import tensorflow as tf\n'), (289, 'gpflow.expectations.expectation', 'expectation', (['pXnew', 'mean_function'], {}), False, 'from gpflow.expectations import expectation\n'), (317, 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['e_related_to_mean'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.cholesky', 'tf.cholesky', (['cov'], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.transpose', 'tf.transpose', (['fvar'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.square', 'tf.square', (['A'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.matrix_band_part', 'tf.matrix_band_part', (['q_sqrt', '(-1)', '(0)'], {}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.matmul', 'tf.matmul', (['L', 'A_tiled'], {'transpose_a': '(True)'}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.matmul', 'tf.matmul', (['LTA', 'LTA'], {'transpose_a': '(True)'}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.matrix_transpose', 'tf.matrix_transpose', (['e_fmean_mean'], {}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.sqrt', 'tf.sqrt', (['cov'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.transpose', 'tf.transpose', (['q_sqrt'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.expand_dims', 'tf.expand_dims', (['A', '(0)'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.stack', 'tf.stack', (['[num_func, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.square', 'tf.square', (['LTA'], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.einsum', 'tf.einsum', (['"""ig,nij,jh->ngh"""', 'q_mu', 'Li_eKuffu_Lit', 'q_mu'], {}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.einsum', 'tf.einsum', (['"""ig,nij,jg->ng"""', 'q_mu', 'Li_eKuffu_Lit', 'q_mu'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.matmul', 'tf.matmul', (['chol', 'eps[..., None]'], {}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.einsum', 'tf.einsum', (['"""nij,dji->nd"""', 'Li_eKuffu_Lit', 'cov'], {}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.einsum', 'tf.einsum', (['"""nij,dji->nd"""', 'Li_eKuffu_Lit', 'cov'], {}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.trace', 'tf.trace', (['Li_eKuffu_Lit'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.shape', 'tf.shape', (['mean'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.trace', 'tf.trace', (['Li_eKuffu_Lit'], {}), True, 'import tensorflow as tf\n')]
stjordanis/gradient-checkpointing
43444e0523495c9f305f2c32d81eeea2328a1b18
from toposort import toposort import contextlib import numpy as np import tensorflow as tf import tensorflow.contrib.graph_editor as ge import time import sys sys.setrecursionlimit(10000) # refers back to current module if we decide to split helpers out util = sys.modules[__name__] # getting rid of "WARNING:tensorflow:VARIABLES collection name is deprecated" setattr(tf.GraphKeys, "VARIABLES", "variables") # save original gradients since tf.gradient could be monkey-patched to point # to our version from tensorflow.python.ops import gradients as tf_gradients_lib tf_gradient_function = tf_gradients_lib.gradients # ISSUE: https://github.com/cybertronai/gradient-checkpointing/issues/38 def tf_gradients(ys, *args, **kwargs): """Decorate tf.gradients calls with explicit device placement to avoid memory leaks when splitting model across multiple GPUs""" source = ys[0] if isinstance(ys, (list, tuple)) else ys device = source.op.node_def.device if isinstance(source, tf.Tensor) else None with tf.device(device): return tf_gradient_function(ys, *args, **kwargs) MIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing # specific versions we can use to do process-wide replacement of tf.gradients def gradients_speed(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs) def gradients_memory(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs) def gradients_collection(ys, xs, grad_ys=None, **kwargs): return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs) def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs): ''' Authors: Tim Salimans & Yaroslav Bulatov memory efficient gradient implementation inspired by "Training Deep Nets with Sublinear Memory Cost" by Chen et al. 2016 (https://arxiv.org/abs/1604.06174) ys,xs,grad_ys,kwargs are the arguments to standard tensorflow tf.gradients (https://www.tensorflow.org/versions/r0.12/api_docs/python/train.html#gradients) 'checkpoints' can either be - a list consisting of tensors from the forward pass of the neural net that we should re-use when calculating the gradients in the backward pass all other tensors that do not appear in this list will be re-computed - a string specifying how this list should be determined. currently we support - 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive, so checkpointing them maximizes the running speed (this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory) - 'memory': try to minimize the memory usage (currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint) - 'collection': look for a tensorflow collection named 'checkpoints', which holds the tensors to checkpoint ''' # print("Calling memsaving gradients with", checkpoints) if not isinstance(ys,list): ys = [ys] if not isinstance(xs,list): xs = [xs] bwd_ops = ge.get_backward_walk_ops([y.op for y in ys], inclusive=True) debug_print("bwd_ops: %s", bwd_ops) # forward ops are all ops that are candidates for recomputation fwd_ops = ge.get_forward_walk_ops([x.op for x in xs], inclusive=True, within_ops=bwd_ops) debug_print("fwd_ops: %s", fwd_ops) # exclude ops with no inputs fwd_ops = [op for op in fwd_ops if op.inputs] # don't recompute xs, remove variables xs_ops = _to_ops(xs) fwd_ops = [op for op in fwd_ops if not op in xs_ops] fwd_ops = [op for op in fwd_ops if not '/assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name] fwd_ops = [op for op in fwd_ops if not '/read' in op.name] ts_all = ge.filter_ts(fwd_ops, True) # get the tensors ts_all = [t for t in ts_all if '/read' not in t.name] ts_all = set(ts_all) - set(xs) - set(ys) # construct list of tensors to checkpoint during forward pass, if not # given as input if type(checkpoints) is not list: if checkpoints == 'collection': checkpoints = tf.get_collection('checkpoints') elif checkpoints == 'speed': # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul') elif checkpoints == 'memory': # remove very small tensors and some weird ops def fixdims(t): # tf.Dimension values are not compatible with int, convert manually try: return [int(e if e.value is not None else 64) for e in t] except: return [0] # unknown shape ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE] ts_all = [t for t in ts_all if 'L2Loss' not in t.name] ts_all = [t for t in ts_all if 'entropy' not in t.name] ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name] ts_all = [t for t in ts_all if 'Switch' not in t.name] ts_all = [t for t in ts_all if 'dropout' not in t.name] # DV: FP16_FIX - need to add 'Cast' layer here to make it work for FP16 ts_all = [t for t in ts_all if 'Cast' not in t.name] # filter out all tensors that are inputs of the backward graph with util.capture_ops() as bwd_ops: tf_gradients(ys, xs, grad_ys, **kwargs) bwd_inputs = [t for op in bwd_ops for t in op.inputs] # list of tensors in forward graph that is in input to bwd graph ts_filtered = list(set(bwd_inputs).intersection(ts_all)) debug_print("Using tensors %s", ts_filtered) # try two slightly different ways of getting bottlenecks tensors # to checkpoint for ts in [ts_filtered, ts_all]: # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all) f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all) if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all): bottleneck_ts.append(t) # we have a bottleneck! else: debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp))) # success? or try again without filtering? if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found! break if not bottleneck_ts: raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".') # sort the bottlenecks bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops) sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts] # save an approximately optimal number ~ sqrt(N) N = len(ts_filtered) if len(bottleneck_ts) <= np.ceil(np.sqrt(N)): checkpoints = sorted_bottlenecks else: step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N))) checkpoints = sorted_bottlenecks[step::step] else: raise Exception('%s is unsupported input for "checkpoints"' % (checkpoints,)) checkpoints = list(set(checkpoints).intersection(ts_all)) # at this point automatic selection happened and checkpoints is list of nodes assert isinstance(checkpoints, list) debug_print("Checkpoint nodes used: %s", checkpoints) # better error handling of special cases # xs are already handled as checkpoint nodes, so no need to include them xs_intersect_checkpoints = set(xs).intersection(set(checkpoints)) if xs_intersect_checkpoints: debug_print("Warning, some input nodes are also checkpoint nodes: %s", xs_intersect_checkpoints) ys_intersect_checkpoints = set(ys).intersection(set(checkpoints)) debug_print("ys: %s, checkpoints: %s, intersect: %s", ys, checkpoints, ys_intersect_checkpoints) # saving an output node (ys) gives no benefit in memory while creating # new edge cases, exclude them if ys_intersect_checkpoints: debug_print("Warning, some output nodes are also checkpoints nodes: %s", format_ops(ys_intersect_checkpoints)) # remove initial and terminal nodes from checkpoints list if present checkpoints = list(set(checkpoints) - set(ys) - set(xs)) # check that we have some nodes to checkpoint if not checkpoints: raise Exception('no checkpoints nodes found or given as input! ') # disconnect dependencies between checkpointed tensors checkpoints_disconnected = {} for x in checkpoints: if x.op and x.op.name is not None: grad_node = tf.stop_gradient(x, name=x.op.name+"_sg") else: grad_node = tf.stop_gradient(x) grad_node.op._set_device(x.op.node_def.device) checkpoints_disconnected[x] = grad_node # partial derivatives to the checkpointed tensors and xs ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys], stop_at_ts=checkpoints, within_ops=fwd_ops) debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s", len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints) debug_print("ops_to_copy = %s", ops_to_copy) debug_print("Processing list %s", ys) copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops) # get gradients with respect to current boundary + original x's copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys] boundary = list(checkpoints_disconnected.values()) dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs) debug_print("Got gradients %s", dv) debug_print("for %s", copied_ys) debug_print("with respect to %s", boundary+xs) inputs_to_do_before = [y.op for y in ys] if grad_ys is not None: inputs_to_do_before += grad_ys wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None] my_add_control_inputs(wait_to_do_ops, inputs_to_do_before) # partial derivatives to the checkpointed nodes # dictionary of "node: backprop" for nodes in the boundary d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(), dv[:len(checkpoints_disconnected)])} # partial derivatives to xs (usually the params of the neural net) d_xs = dv[len(checkpoints_disconnected):] # incorporate derivatives flowing through the checkpointed nodes checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops) for ts in checkpoints_sorted_lists[::-1]: debug_print("Processing list %s", ts) checkpoints_other = [r for r in checkpoints if r not in ts] checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other] # copy part of the graph below current checkpoint node, stopping at # other checkpoints nodes ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other) debug_print("Found %s ops to copy within %s, seed %s, stop_at %s", len(ops_to_copy), fwd_ops, [r.op for r in ts], checkpoints_other) debug_print("ops_to_copy = %s", ops_to_copy) if not ops_to_copy: # we're done! break copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {}) for origin_op, op in info._transformed_ops.items(): op._set_device(origin_op.node_def.device) copied_ops = info._transformed_ops.values() debug_print("Copied %s to %s", ops_to_copy, copied_ops) ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops) debug_print("Rewired %s in place of %s restricted to %s", checkpoints_disconnected_other, checkpoints_other, copied_ops) # gradient flowing through the checkpointed node boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts] substitute_backprops = [d_checkpoints[r] for r in ts] dv = tf_gradients(boundary, checkpoints_disconnected_other+xs, grad_ys=substitute_backprops, **kwargs) debug_print("Got gradients %s", dv) debug_print("for %s", boundary) debug_print("with respect to %s", checkpoints_disconnected_other+xs) debug_print("with boundary backprop substitutions %s", substitute_backprops) inputs_to_do_before = [d_checkpoints[r].op for r in ts] wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None] my_add_control_inputs(wait_to_do_ops, inputs_to_do_before) # partial derivatives to the checkpointed nodes for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]): if dr is not None: if d_checkpoints[r] is None: d_checkpoints[r] = dr else: d_checkpoints[r] += dr def _unsparsify(x): if not isinstance(x, tf.IndexedSlices): return x assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape" indices = x.indices while indices.shape.ndims < x.values.shape.ndims: indices = tf.expand_dims(indices, -1) return tf.scatter_nd(indices, x.values, x.dense_shape) # partial derivatives to xs (usually the params of the neural net) d_xs_new = dv[len(checkpoints_other):] for j in range(len(xs)): if d_xs_new[j] is not None: if d_xs[j] is None: d_xs[j] = _unsparsify(d_xs_new[j]) else: d_xs[j] += _unsparsify(d_xs_new[j]) return d_xs def tf_toposort(ts, within_ops=None): all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops) deps = {} for op in all_ops: for o in op.outputs: deps[o] = set(op.inputs) sorted_ts = toposort(deps) # only keep the tensors from our original list ts_sorted_lists = [] for l in sorted_ts: keep = list(set(l).intersection(ts)) if keep: ts_sorted_lists.append(keep) return ts_sorted_lists def fast_backward_ops(within_ops, seed_ops, stop_at_ts): bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts)) ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts]) return list(ops) @contextlib.contextmanager def capture_ops(): """Decorator to capture ops created in the block. with capture_ops() as ops: # create some ops print(ops) # => prints ops created. """ micros = int(time.time()*10**6) scope_name = str(micros) op_list = [] with tf.name_scope(scope_name): yield op_list g = tf.get_default_graph() op_list.extend(ge.select_ops(scope_name+"/.*", graph=g)) def _to_op(tensor_or_op): if hasattr(tensor_or_op, "op"): return tensor_or_op.op return tensor_or_op def _to_ops(iterable): if not _is_iterable(iterable): return iterable return [_to_op(i) for i in iterable] def _is_iterable(o): try: _ = iter(o) except Exception: return False return True DEBUG_LOGGING=False def debug_print(s, *args): """Like logger.log, but also replaces all TensorFlow ops/tensors with their names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug Usage: debug_print("see tensors %s for %s", tensorlist, [1,2,3]) """ if DEBUG_LOGGING: formatted_args = [format_ops(arg) for arg in args] print("DEBUG "+s % tuple(formatted_args)) def format_ops(ops, sort_outputs=True): """Helper method for printing ops. Converts Tensor/Operation op to op.name, rest to str(op).""" if hasattr(ops, '__iter__') and not isinstance(ops, str): l = [(op.name if hasattr(op, "name") else str(op)) for op in ops] if sort_outputs: return sorted(l) return l else: return ops.name if hasattr(ops, "name") else str(ops) def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before): for op in wait_to_do_ops: ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs] ge.add_control_inputs(op, ci)
[ "tensorflow.contrib.graph_editor.filter_ts", "tensorflow.device", "numpy.sqrt", "tensorflow.get_collection", "tensorflow.contrib.graph_editor.get_backward_walk_ops", "tensorflow.contrib.graph_editor.get_forward_walk_ops", "tensorflow.contrib.graph_editor.reroute_ts", "tensorflow.contrib.graph_editor.sgv", "tensorflow.scatter_nd", "tensorflow.expand_dims", "tensorflow.stop_gradient", "tensorflow.contrib.graph_editor.select_ops", "tensorflow.name_scope", "tensorflow.get_default_graph", "tensorflow.contrib.graph_editor.filter_ts_from_regex", "tensorflow.contrib.graph_editor.add_control_inputs" ]
memory_saving_gradients.py
[(8, 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), False, 'import sys\n'), (71, 'tensorflow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', (['[y.op for y in ys]'], {'inclusive': '(True)'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (77, 'tensorflow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', (['[x.op for x in xs]'], {'inclusive': '(True)', 'within_ops': 'bwd_ops'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (91, 'tensorflow.contrib.graph_editor.filter_ts', 'ge.filter_ts', (['fwd_ops', '(True)'], {}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (314, 'tensorflow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', (['[x.op for x in ts]'], {'within_ops': 'within_ops'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (320, 'toposort.toposort', 'toposort', (['deps'], {}), False, 'from toposort import toposort\n'), (350, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.device', 'tf.device', (['device'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.contrib.graph_editor.sgv', 'ge.sgv', (['ops_to_copy'], {}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (266, 'tensorflow.contrib.graph_editor.reroute_ts', 'ge.reroute_ts', (['checkpoints_disconnected_other', 'checkpoints_other'], {'can_modify': 'copied_ops'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (332, 'tensorflow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', (['seed_ops'], {'stop_at_ts': 'stop_at_ts'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (347, 'tensorflow.name_scope', 'tf.name_scope', (['scope_name'], {}), True, 'import tensorflow as tf\n'), (351, 'tensorflow.contrib.graph_editor.select_ops', 'ge.select_ops', (["(scope_name + '/.*')"], {'graph': 'g'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (398, 'tensorflow.contrib.graph_editor.add_control_inputs', 'ge.add_control_inputs', (['op', 'ci'], {}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (99, 'tensorflow.get_collection', 'tf.get_collection', (['"""checkpoints"""'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['x'], {'name': "(x.op.name + '_sg')"}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['x'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.contrib.graph_editor.sgv', 'ge.sgv', (['ops_to_copy'], {}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (299, 'tensorflow.scatter_nd', 'tf.scatter_nd', (['indices', 'x.values', 'x.dense_shape'], {}), True, 'import tensorflow as tf\n'), (344, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (103, 'tensorflow.contrib.graph_editor.filter_ts_from_regex', 'ge.filter_ts_from_regex', (['fwd_ops', '"""conv2d|Conv|MatMul"""'], {}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (298, 'tensorflow.expand_dims', 'tf.expand_dims', (['indices', '(-1)'], {}), True, 'import tensorflow as tf\n'), (161, 'numpy.sqrt', 'np.sqrt', (['N'], {}), True, 'import numpy as np\n'), (138, 'tensorflow.contrib.graph_editor.get_backward_walk_ops', 'ge.get_backward_walk_ops', (['t.op'], {'inclusive': '(True)', 'within_ops': 'fwd_ops'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (139, 'tensorflow.contrib.graph_editor.get_forward_walk_ops', 'ge.get_forward_walk_ops', (['t.op'], {'inclusive': '(False)', 'within_ops': 'fwd_ops'}), True, 'import tensorflow.contrib.graph_editor as ge\n'), (164, 'numpy.sqrt', 'np.sqrt', (['N'], {}), True, 'import numpy as np\n')]
dbehrlich/sisyphus2
4e99c5f9b5de78d6011d7a6d0a0a76ac729b7fb2
from __future__ import print_function import tensorflow as tf import numpy as np from time import time # Lets make sure to keep things object-oriented, # so that all future networks we build will extend # the Model class below # This will mean (in the future) making Model less specific so # that future networks will "fill in the specifics" instead # i.e. we can make a denseRNN, a sparseRNN, a denseCNN etc class Model(object): def __init__(self, params): # Network sizes (tensor dimensions) N_in = self.N_in = params['N_in'] N_rec = self.N_rec = params['N_rec'] N_out = self.N_out = params['N_out'] N_steps = self.N_steps = params['N_steps'] N_batch = self.N_batch = params['N_batch'] # Physical parameters self.dt = params['dt'] self.tau = params['tau'] self.alpha = self.dt / self.tau self.dale_ratio = params['dale_ratio'] self.rec_noise = params['rec_noise'] # load weights path self.load_weights_path = params.get('load_weights_path', None) # Dale matrix dale_vec = np.ones(N_rec) if self.dale_ratio is not None: dale_vec[int(self.dale_ratio * N_rec):] = -1 self.dale_rec = np.diag(dale_vec) dale_vec[int(self.dale_ratio * N_rec):] = 0 self.dale_out = np.diag(dale_vec) else: self.dale_rec = np.diag(dale_vec) self.dale_out = np.diag(dale_vec) # Connectivity self.input_connectivity_mask = params.get('input_connectivity_mask', None) self.recurrent_connectivity_mask = params.get('recurrent_connectivity_mask', None) self.output_connectivity_mask = params.get('output_connectivity_mask', None) if self.input_connectivity_mask is None: self.input_connectivity_mask = np.ones((N_rec, N_in)) if self.recurrent_connectivity_mask is None: self.recurrent_connectivity_mask = np.ones((N_rec, N_rec)) if self.output_connectivity_mask is None: self.output_connectivity_mask = np.ones((N_out, N_rec)) # regularization coefficients self.L1_in = params.get('L1_in', 0) self.L1_rec = params.get('L1_rec', 0) self.L1_out = params.get('L1_out', 0) self.L2_in = params.get('L2_in', 0) self.L2_rec = params.get('L2_rec',0) self.L2_out = params.get('L2_out',0) self.L2_firing_rate = params.get('L2_firing_rate', 0) self.sussillo_constant = params.get('sussillo_constant', 0) # trainable features self.W_in_train = params.get('W_in_train', True) self.W_rec_train = params.get('W_rec_train', True) self.W_out_train = params.get('W_out_train', True) self.b_rec_train = params.get('b_rec_train', True) self.b_out_train = params.get('b_out_train', True) self.init_state_train = params.get('init_state_train', True) # Tensorflow initializations self.x = tf.placeholder("float", [N_batch, N_steps, N_in]) self.y = tf.placeholder("float", [N_batch, N_steps, N_out]) self.output_mask = tf.placeholder("float", [N_batch, N_steps, N_out]) # trainable variables with tf.variable_scope("model"): # ------------------------------------------------ # Random initialization Load weights from weights path # for Initial state, Weight matrices, and bias weights # ------------------------------------------------ if self.load_weights_path is None: # random initializations init_state_initializer = tf.random_normal_initializer(mean=0.1, stddev=0.01) W_in_initializer = tf.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_rec, self.N_in))) W_rec_initializer = tf.constant_initializer(self.initial_W()) W_out_initializer = tf.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_out, self.N_rec))) b_rec_initializer = tf.constant_initializer(0.0) b_out_initializer = tf.constant_initializer(0.0) else: print("Loading Weights") weights = np.load(self.load_weights_path) init_state_initializer = tf.constant_initializer(weights['init_state']) W_in_initializer = tf.constant_initializer(weights['W_in']) W_rec_initializer = tf.constant_initializer(weights['W_rec']) W_out_initializer = tf.constant_initializer(weights['W_out']) b_rec_initializer = tf.constant_initializer(weights['b_rec']) b_out_initializer = tf.constant_initializer(weights['b_out']) self.input_connectivity_mask = weights['input_Connectivity'] self.recurrent_connectivity_mask = weights['rec_Connectivity'] self.output_connectivity_mask = weights['output_Connectivity'] self.init_state = tf.get_variable('init_state', [N_batch, N_rec], initializer=init_state_initializer) # ------------------------------------------------ # Trainable variables: # Weight matrices and bias weights # ------------------------------------------------ # Input weight matrix: # (uniform initialization as in pycog) self.W_in = \ tf.get_variable('W_in', [N_rec, N_in], initializer=W_in_initializer, trainable=self.W_in_train) # Recurrent weight matrix: # (gamma (Dale) or normal (non-Dale) initialization) self.W_rec = \ tf.get_variable( 'W_rec', [N_rec, N_rec], initializer=W_rec_initializer, trainable=self.W_rec_train) # Output weight matrix: # (uniform initialization as in pycog) self.W_out = tf.get_variable('W_out', [N_out, N_rec], initializer=W_out_initializer, trainable=self.W_out_train) # Recurrent bias: self.b_rec = tf.get_variable('b_rec', [N_rec], initializer=b_rec_initializer, trainable=self.b_rec_train) # Output bias: self.b_out = tf.get_variable('b_out', [N_out], initializer=b_out_initializer, trainable=self.b_out_train) # ------------------------------------------------ # Non-trainable variables: # Overall connectivity and Dale's law matrices # ------------------------------------------------ # Recurrent Dale's law weight matrix: self.Dale_rec = tf.get_variable('Dale_rec', [N_rec, N_rec], initializer=tf.constant_initializer(self.dale_rec), trainable=False) # Output Dale's law weight matrix: self.Dale_out = tf.get_variable('Dale_out', [N_rec, N_rec], initializer=tf.constant_initializer(self.dale_out), trainable=False) # Connectivity weight matrices: self.input_Connectivity = tf.get_variable('input_Connectivity', [N_rec, N_in], initializer=tf.constant_initializer( self.input_connectivity_mask), trainable=False) self.rec_Connectivity = tf.get_variable('rec_Connectivity', [N_rec, N_rec], initializer=tf.constant_initializer( self.recurrent_connectivity_mask), trainable=False) self.output_Connectivity = tf.get_variable('output_Connectivity', [N_out, N_rec], initializer=tf.constant_initializer( self.output_connectivity_mask), trainable=False) # ------------------------------------------------ # Network loss # ------------------------------------------------ self.predictions, self.states = self.compute_predictions() self.error = self.mean_square_error() self.loss = self.error + self.regularization() # regularized loss function def reg_loss(self): return self.mean_square_error() + self.regularization() # mean squared error def mean_square_error(self): return tf.reduce_mean(tf.square(self.output_mask * (self.predictions - self.y))) # regularizations def regularization(self): reg = 0 # L1 weight regularization reg += self.L1_in * tf.reduce_mean(tf.abs(self.W_in) * self.input_Connectivity) reg += self.L1_rec * tf.reduce_mean(tf.abs(self.W_rec) * self.rec_Connectivity) if self.dale_ratio: reg += self.L1_out * tf.reduce_mean(tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out)) else: reg += self.L1_out * tf.reduce_mean(tf.abs(self.W_out) * self.output_Connectivity) # L2 weight regularization reg += self.L2_in * tf.reduce_mean(tf.square(tf.abs(self.W_in) * self.input_Connectivity)) reg += self.L2_rec * tf.reduce_mean(tf.square(tf.abs(self.W_rec) * self.rec_Connectivity)) if self.dale_ratio: reg += self.L2_out * tf.reduce_mean(tf.square( tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out))) else: reg += self.L2_out * tf.reduce_mean(tf.square(tf.abs(self.W_out) * self.output_Connectivity)) # L2 firing rate regularization reg += self.L2_firing_rate * tf.reduce_mean(tf.square(tf.nn.relu(self.states))) # susillo regularization reg += self.sussillo_constant * self.sussillo_reg() return reg # implement one step of the RNN def rnn_step(self, rnn_in, state): if self.dale_ratio: new_state = (1-self.alpha) * state \ + self.alpha * ( tf.matmul( tf.nn.relu(state), tf.matmul( tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec, name="in_1"), transpose_b=True, name="1") + tf.matmul( rnn_in, tf.abs(self.W_in) * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec)\ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) else: new_state = ((1-self.alpha) * state) \ + self.alpha * ( tf.matmul( tf.nn.relu(state), self.W_rec * self.rec_Connectivity, transpose_b=True, name="1") + tf.matmul( rnn_in, self.W_in * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec)\ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) return new_state def rnn_output(self, new_state): if self.dale_ratio: new_output = tf.matmul(tf.nn.relu(new_state), tf.matmul(tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out, name="in_2"), transpose_b=True, name="3") \ + self.b_out else: new_output = tf.matmul(tf.nn.relu(new_state), self.W_out * self.output_Connectivity, transpose_b=True, name="3") \ + self.b_out return new_output def rnn_step_scan(self, state, rnn_in): if self.dale_ratio: new_state = (1-self.alpha) * state \ + self.alpha * ( tf.matmul( tf.nn.relu(state), tf.matmul( tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec, name="in_1"), transpose_b=True, name="1") + tf.matmul( rnn_in, tf.abs(self.W_in) * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec) \ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) else: new_state = ((1 - self.alpha) * state) \ + self.alpha * ( tf.matmul( tf.nn.relu(state), self.W_rec * self.rec_Connectivity, transpose_b=True, name="1") + tf.matmul( rnn_in, self.W_in * self.input_Connectivity, transpose_b=True, name="2") + self.b_rec) \ + np.sqrt(2.0 * self.alpha * self.rec_noise * self.rec_noise)\ * tf.random_normal(state.get_shape(), mean=0.0, stddev=1.0) return new_state def output_step_scan(self, dummy, new_state): if self.dale_ratio: new_output = tf.matmul( tf.nn.relu(new_state), tf.matmul( tf.abs(self.W_out) * self.output_Connectivity, self.Dale_out, name="in_2"), transpose_b=True, name="3")\ + self.b_out else: new_output = tf.matmul(tf.nn.relu(new_state), self.W_out * self.output_Connectivity, transpose_b=True, name="3") + self.b_out return new_output def compute_predictions(self): rnn_inputs = tf.unstack(self.x, axis=1) state = self.init_state rnn_outputs = [] rnn_states = [] for rnn_input in rnn_inputs: state = self.rnn_step(rnn_input, state) output = self.rnn_output(state) rnn_outputs.append(output) rnn_states.append(state) return tf.transpose(rnn_outputs, [1, 0, 2]), rnn_states def compute_predictions_scan(self): state = self.init_state rnn_states = \ tf.scan( self.rnn_step_scan, tf.transpose(self.x, [1, 0, 2]), initializer=state, parallel_iterations=1) rnn_outputs = \ tf.scan( self.output_step_scan, rnn_states, initializer=tf.zeros([self.N_batch, self.N_out]), parallel_iterations= 1) return tf.transpose(rnn_outputs, [1, 0, 2]), tf.unstack(rnn_states) # fix spectral radius of recurrent matrix def initial_W(self): # added gamma distributed initial weights as in pycog if self.dale_ratio: self.W_dist0 = 'gamma' else: self.W_dist0 = 'normal' if self.W_dist0 == 'normal': w0 = np.random.normal(scale=1, size=(self.N_rec, self.N_rec)) elif self.W_dist0 == 'gamma': k = 2 theta = 0.1/k w0 = np.random.gamma(k, theta, size=(self.N_rec, self.N_rec)) if self.dale_ratio: W = np.matmul(abs(w0), self.dale_rec) else: W = w0 rho = max(abs(np.linalg.eigvals(W))) # +np.diag(np.ones(self.N_rec)*(1-self.alpha))))) # add diagnal matrix 1-alpha to account for persistance tau return (1.1/rho) * W # - .9*np.diag(np.ones(self.N_rec)*(1-self.alpha)) #correct for tau # vanishing gradient regularization, Omega, as in Pascanu # NOTE: this is RELU specific def dOmega_dWrec(self): # states in shape timesteps, batch, n_rec states = self.states dxt_list = tf.gradients(self.error, states) #dxt_list[0] = tf.Print(dxt_list[0], [dxt_list[0]], "dxt 0: ") test = tf.gradients(states[0], states[-1]) dxt = tf.stack(dxt_list) xt = tf.stack(states) num = (1 - self.alpha) * dxt + tf.tensordot(self.alpha * dxt , tf.transpose( tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity,self.Dale_rec)), axes=1) * \ tf.where(tf.greater(xt, 0), tf.ones_like(xt), tf.zeros_like(xt)) denom = dxt # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2) bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1) # sum mean over each batch by time steps Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems)) out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad") return out, test def sussillo_reg(self): states = self.states reg = 0 for state in states: dJr = tf.matmul(tf.nn.relu(state), tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec)) reg += tf.reduce_sum(tf.square(dJr)) return reg / (self.N_steps * self.N_batch) # train the model using Adam def train(self, sess, generator, learning_rate=.001, training_iters=50000, batch_size=64, display_step=10,weight_save_step=100, save_weights_path= None, generator_function= None, training_weights_path = None): # train with gradient clipping optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) grads = optimizer.compute_gradients(self.loss) clipped_grads = [(tf.clip_by_norm(grad, 1.0), var) if grad is not None else (grad, var) for grad, var in grads] # add vanishing gradient regularizer #out, test = self.dOmega_dWrec() #clipped_grads[0] = (tf.add(out[0], clipped_grads[0][0]), clipped_grads[0][1]) #clipped_grads[0] = (tf.Print(clipped_grads[0][0], [clipped_grads[0][0]], "gw_rec"), clipped_grads[0][1]) optimize = optimizer.apply_gradients(clipped_grads) # run session sess.run(tf.global_variables_initializer()) step = 1 # time training t1 = time() # Keep training until reach max iterations while step * batch_size < training_iters: batch_x, batch_y, output_mask = generator.next() sess.run(optimize, feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask}) if step % display_step == 0: # Calculate batch loss loss = sess.run(self.loss, feed_dict={self.x: batch_x, self.y: batch_y, self.output_mask: output_mask}) print("Iter " + str(step * batch_size) + ", Minibatch Loss= " + \ "{:.6f}".format(loss)) # allow for curriculum learning if generator_function is not None: generator = generator_function(loss, step) # allow for saving weights during training if step % weight_save_step == 0: if training_weights_path is not None: np.savez(training_weights_path + str(step), W_in=self.W_in.eval(session=sess), W_rec=self.W_rec.eval(session=sess), W_out=self.W_out.eval(session=sess), b_rec=self.b_rec.eval(session=sess), b_out=self.b_out.eval(session=sess), init_state=self.init_state.eval(session=sess), input_Connectivity=self.input_Connectivity.eval(session=sess), rec_Connectivity=self.rec_Connectivity.eval(session=sess), output_Connectivity=self.output_Connectivity.eval(session=sess)) step += 1 t2 = time() print("Optimization Finished!") # save weights if save_weights_path is not None: np.savez(save_weights_path, W_in = self.W_in.eval(session=sess), W_rec = self.W_rec.eval(session=sess), W_out = self.W_out.eval(session=sess), b_rec = self.b_rec.eval(session=sess), b_out = self.b_out.eval(session=sess), init_state = self.init_state.eval(session=sess), input_Connectivity = self.input_Connectivity.eval(session=sess), rec_Connectivity=self.rec_Connectivity.eval(session=sess), output_Connectivity=self.output_Connectivity.eval(session=sess)) print("Model saved in file: %s" % save_weights_path) return (t2 - t1) # use a trained model to get test outputs def test(self, sess, rnn_in, weights_path = None): if(weights_path): saver = tf.train.Saver() # Restore variables from disk. saver.restore(sess, weights_path) predictions, states = sess.run([self.predictions, self.states], feed_dict={self.x: rnn_in}) else: predictions, states = sess.run([self.predictions, self.states], feed_dict={self.x: rnn_in}) return predictions, states
[ "numpy.diag", "tensorflow.get_variable", "numpy.linalg.eigvals", "numpy.sqrt", "tensorflow.zeros", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.abs", "tensorflow.train.AdamOptimizer", "tensorflow.greater", "tensorflow.gradients", "tensorflow.div", "tensorflow.clip_by_norm", "tensorflow.square", "tensorflow.train.Saver", "numpy.load", "tensorflow.random_normal_initializer", "tensorflow.matmul", "tensorflow.Print", "tensorflow.unstack", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.zeros_like", "tensorflow.nn.relu", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.ones_like", "numpy.ones", "tensorflow.constant_initializer", "numpy.random.normal", "numpy.random.gamma", "tensorflow.variable_scope", "numpy.random.uniform", "tensorflow.verify_tensor_all_finite" ]
backend/networks.py
[(37, 'numpy.ones', 'np.ones', (['N_rec'], {}), True, 'import numpy as np\n'), (79, 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[N_batch, N_steps, N_in]'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[N_batch, N_steps, N_out]'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[N_batch, N_steps, N_out]'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.unstack', 'tf.unstack', (['self.x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (391, 'tensorflow.gradients', 'tf.gradients', (['self.error', 'states'], {}), True, 'import tensorflow as tf\n'), (395, 'tensorflow.gradients', 'tf.gradients', (['states[0]', 'states[-1]'], {}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.stack', 'tf.stack', (['dxt_list'], {}), True, 'import tensorflow as tf\n'), (398, 'tensorflow.stack', 'tf.stack', (['states'], {}), True, 'import tensorflow as tf\n'), (415, 'tensorflow.square', 'tf.square', (['(bounded - 1.0)'], {}), True, 'import tensorflow as tf\n'), (418, 'tensorflow.gradients', 'tf.gradients', (['Omega', 'self.W_rec'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.Print', 'tf.Print', (['out[0]', '[out[0], self.W_rec, Omega]', '"""omega grads"""'], {}), True, 'import tensorflow as tf\n'), (421, 'tensorflow.verify_tensor_all_finite', 'tf.verify_tensor_all_finite', (['out[0]', '"""dead omega grad"""'], {}), True, 'import tensorflow as tf\n'), (446, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), True, 'import tensorflow as tf\n'), (464, 'time.time', 'time', ([], {}), False, 'from time import time\n'), (495, 'time.time', 'time', ([], {}), False, 'from time import time\n'), (40, 'numpy.diag', 'np.diag', (['dale_vec'], {}), True, 'import numpy as np\n'), (42, 'numpy.diag', 'np.diag', (['dale_vec'], {}), True, 'import numpy as np\n'), (44, 'numpy.diag', 'np.diag', (['dale_vec'], {}), True, 'import numpy as np\n'), (45, 'numpy.diag', 'np.diag', (['dale_vec'], {}), True, 'import numpy as np\n'), (52, 'numpy.ones', 'np.ones', (['(N_rec, N_in)'], {}), True, 'import numpy as np\n'), (54, 'numpy.ones', 'np.ones', (['(N_rec, N_rec)'], {}), True, 'import numpy as np\n'), (56, 'numpy.ones', 'np.ones', (['(N_out, N_rec)'], {}), True, 'import numpy as np\n'), (84, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.get_variable', 'tf.get_variable', (['"""init_state"""', '[N_batch, N_rec]'], {'initializer': 'init_state_initializer'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.get_variable', 'tf.get_variable', (['"""W_in"""', '[N_rec, N_in]'], {'initializer': 'W_in_initializer', 'trainable': 'self.W_in_train'}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.get_variable', 'tf.get_variable', (['"""W_rec"""', '[N_rec, N_rec]'], {'initializer': 'W_rec_initializer', 'trainable': 'self.W_rec_train'}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.get_variable', 'tf.get_variable', (['"""W_out"""', '[N_out, N_rec]'], {'initializer': 'W_out_initializer', 'trainable': 'self.W_out_train'}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.get_variable', 'tf.get_variable', (['"""b_rec"""', '[N_rec]'], {'initializer': 'b_rec_initializer', 'trainable': 'self.b_rec_train'}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.get_variable', 'tf.get_variable', (['"""b_out"""', '[N_out]'], {'initializer': 'b_out_initializer', 'trainable': 'self.b_out_train'}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.square', 'tf.square', (['(self.output_mask * (self.predictions - self.y))'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.transpose', 'tf.transpose', (['rnn_outputs', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (346, 'tensorflow.transpose', 'tf.transpose', (['self.x', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.transpose', 'tf.transpose', (['rnn_outputs', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.unstack', 'tf.unstack', (['rnn_states'], {}), True, 'import tensorflow as tf\n'), (368, 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(1)', 'size': '(self.N_rec, self.N_rec)'}), True, 'import numpy as np\n'), (408, 'tensorflow.square', 'tf.square', (['num'], {}), True, 'import tensorflow as tf\n'), (409, 'tensorflow.square', 'tf.square', (['denom'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.greater', 'tf.greater', (['denom', '(1e-20)'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.div', 'tf.div', (['num', '(1.0 * denom)'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.ones_like', 'tf.ones_like', (['num'], {}), True, 'import tensorflow as tf\n'), (460, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (517, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.1)', 'stddev': '(0.01)'}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (102, 'numpy.load', 'np.load', (['self.load_weights_path'], {}), True, 'import numpy as np\n'), (103, 'tensorflow.constant_initializer', 'tf.constant_initializer', (["weights['init_state']"], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.constant_initializer', 'tf.constant_initializer', (["weights['W_in']"], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.constant_initializer', 'tf.constant_initializer', (["weights['W_rec']"], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.constant_initializer', 'tf.constant_initializer', (["weights['W_out']"], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.constant_initializer', 'tf.constant_initializer', (["weights['b_rec']"], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.constant_initializer', 'tf.constant_initializer', (["weights['b_out']"], {}), True, 'import tensorflow as tf\n'), (353, 'tensorflow.zeros', 'tf.zeros', (['[self.N_batch, self.N_out]'], {}), True, 'import tensorflow as tf\n'), (372, 'numpy.random.gamma', 'np.random.gamma', (['k', 'theta'], {'size': '(self.N_rec, self.N_rec)'}), True, 'import numpy as np\n'), (380, 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['W'], {}), True, 'import numpy as np\n'), (412, 'tensorflow.greater', 'tf.greater', (['denom', '(1e-20)'], {}), True, 'import tensorflow as tf\n'), (416, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['Omega'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (416, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['nelems'], {}), True, 'import tensorflow as tf\n'), (432, 'tensorflow.nn.relu', 'tf.nn.relu', (['state'], {}), True, 'import tensorflow as tf\n'), (434, 'tensorflow.square', 'tf.square', (['dJr'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.dale_rec'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.dale_out'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.input_connectivity_mask'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.recurrent_connectivity_mask'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.output_connectivity_mask'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.abs', 'tf.abs', (['self.W_in'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.abs', 'tf.abs', (['self.W_rec'], {}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.nn.relu', 'tf.nn.relu', (['self.states'], {}), True, 'import tensorflow as tf\n'), (240, 'numpy.sqrt', 'np.sqrt', (['(2.0 * self.alpha * self.rec_noise * self.rec_noise)'], {}), True, 'import numpy as np\n'), (255, 'numpy.sqrt', 'np.sqrt', (['(2.0 * self.alpha * self.rec_noise * self.rec_noise)'], {}), True, 'import numpy as np\n'), (262, 'tensorflow.nn.relu', 'tf.nn.relu', (['new_state'], {}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.nn.relu', 'tf.nn.relu', (['new_state'], {}), True, 'import tensorflow as tf\n'), (289, 'numpy.sqrt', 'np.sqrt', (['(2.0 * self.alpha * self.rec_noise * self.rec_noise)'], {}), True, 'import numpy as np\n'), (303, 'numpy.sqrt', 'np.sqrt', (['(2.0 * self.alpha * self.rec_noise * self.rec_noise)'], {}), True, 'import numpy as np\n'), (312, 'tensorflow.nn.relu', 'tf.nn.relu', (['new_state'], {}), True, 'import tensorflow as tf\n'), (321, 'tensorflow.nn.relu', 'tf.nn.relu', (['new_state'], {}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.greater', 'tf.greater', (['xt', '(0)'], {}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.ones_like', 'tf.ones_like', (['xt'], {}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.zeros_like', 'tf.zeros_like', (['xt'], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.ones_like', 'tf.ones_like', (['num'], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.zeros_like', 'tf.zeros_like', (['num'], {}), True, 'import tensorflow as tf\n'), (448, 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['grad', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (94, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(self.N_rec, self.N_in)'}), True, 'import numpy as np\n'), (97, 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(self.N_out, self.N_rec)'}), True, 'import numpy as np\n'), (204, 'tensorflow.abs', 'tf.abs', (['self.W_out'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.abs', 'tf.abs', (['self.W_in'], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.abs', 'tf.abs', (['self.W_rec'], {}), True, 'import tensorflow as tf\n'), (433, 'tensorflow.abs', 'tf.abs', (['self.W_rec'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.abs', 'tf.abs', (['self.W_out'], {}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.abs', 'tf.abs', (['self.W_out'], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.abs', 'tf.abs', (['self.W_out'], {}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.abs', 'tf.abs', (['self.W_out'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.abs', 'tf.abs', (['self.W_out'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.matmul', 'tf.matmul', (['rnn_in', '(self.W_in * self.input_Connectivity)'], {'transpose_b': '(True)', 'name': '"""2"""'}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.matmul', 'tf.matmul', (['rnn_in', '(self.W_in * self.input_Connectivity)'], {'transpose_b': '(True)', 'name': '"""2"""'}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.abs', 'tf.abs', (['self.W_rec'], {}), True, 'import tensorflow as tf\n'), (230, 'tensorflow.nn.relu', 'tf.nn.relu', (['state'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.nn.relu', 'tf.nn.relu', (['state'], {}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.nn.relu', 'tf.nn.relu', (['state'], {}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.nn.relu', 'tf.nn.relu', (['state'], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.abs', 'tf.abs', (['self.W_in'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.abs', 'tf.abs', (['self.W_in'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.abs', 'tf.abs', (['self.W_rec'], {}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.abs', 'tf.abs', (['self.W_rec'], {}), True, 'import tensorflow as tf\n')]
Chngzz/Dynamic-Gesture-Recognition-Based-on-FMCW
11b97e65b6f552972660b0d191eff7ec42965a2f
import tensorflow as tf def conv3d(layer_name, x, out_channels, kernel_size=[1,3,3], strides=[1,1,1,1,1], data_format='NDHWC', is_pretrain=True): ''' Convolution 3D op wrapper, use RELU activation after convolution ''' in_channels = x.get_shape()[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],kernel_size[2],in_channels,out_channels], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.contrib.layers.xavier_initializer()) x = tf.nn.conv3d(x, w, strides=strides, padding='SAME', data_format=data_format, name='conv3d') x = tf.nn.bias_add(x, b, name='bias_add') x = tf.nn.relu(x, name='relu') return x def conv(layer_name, x, out_channels, kernel_size=[3,3], strides=[1,1,1,1], is_pretrain=True): ''' Convolution op wrapper, use RELU activation after convolution Args: layer_name: x: input tensor Returns: 4D tensor ''' # x.get_shape()[-1] : Dimension(3) # x.get_shape()[-1].value : 3 in_channels = x.get_shape()[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weights', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],in_channels,out_channels], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.conv2d(x,w,strides,padding='SAME',name='conv') x = tf.nn.bias_add(x,b,name='bias_add') x = tf.nn.relu(x,name='relu') return x def pool(layer_name, x, kernel_size=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True): ''' Pooling op Args: Returns: ''' if is_max_pool: # May Name Conflict x = tf.nn.max_pool(x,kernel_size,strides=strides,padding='SAME',name=layer_name) else: x = tf.nn.avg_pool(x,kernel_size,strides=strides,padding='SAME',name=layer_name) return x def pool3d(layer_name, x, kernel_size=[1,1,2,2,1], strides=[1,1,2,2,1], is_max_pool=True): ''' Pooling 3D op ''' if is_max_pool: x = tf.nn.max_pool3d(x, ksize=kernel_size, strides=strides, padding='VALID', name=layer_name) else: x = tf.nn.avg_pool3d(x, ksize=kernel_size, strides=strides, padding='VALID', name=layer_name) return x def batch_norm(x): ''' Batch normlization (w/o the offset and scale) ''' pass def fc_layer(layer_name, x, out_nodes): ''' Wrapper for fully connected layers with RELU activation as default ''' shape = x.get_shape() if len(shape) == 5: # FC 3D size = shape[1].value*shape[2].value*shape[3].value*shape[4].value elif len(shape) == 4: size = shape[1].value*shape[2].value*shape[3].value else: size = shape[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', shape=[size, out_nodes], initializer=tf.constant_initializer(0.0)) b = tf.get_variable(name='bias', shape=[out_nodes], initializer=tf.constant_initializer(0.0)) # batch? flat_x = tf.reshape(x, [-1,size]) x = tf.nn.bias_add(tf.matmul(flat_x,w), b) x = tf.nn.relu(x) return x def lstm(): ''' Build LSTM cell ''' pass def loss(logits, labels): ''' Compute loss ''' with tf.name_scope('loss') as scope: cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy') loss = tf.reduce_mean(cross_entropy, name='loss') tf.summary.scalar(scope+'/loss', loss) return loss def accuracy(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' # for summary with tf.name_scope('accuracy') as scope: correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.float32) accuracy = tf.reduce_mean(correct)*100.0 tf.summary.scalar(scope+'accuracy',accuracy) return accuracy def num_correct_prediction(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.int32) n_correct = tf.reduce_sum(correct) return n_correct def optimize(loss, learning_rate, global_step): ''' Optimization, use Gradient Descent as default ''' with tf.name_scope('optimizer'): optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) return train_op def print_all_variables(train_only=True): ''' Print all trainable and non-trainable variables ''' if train_only: t_vars = tf.trainable_variables() print('[*] printing trainable variables') else: try: t_vars = tf.global_variables() except: t_vars = tf.all_variables() print('[*] printing global variables') for idx, v in enumerate(t_vars): print(' var {:3}: {:15} {}'.format(idx, str(v.get_shape()), v.name))
[ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.global_variables", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.all_variables", "tensorflow.name_scope", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.trainable_variables", "tensorflow.matmul", "tensorflow.nn.conv3d", "tensorflow.train.GradientDescentOptimizer", "tensorflow.nn.avg_pool", "tensorflow.nn.avg_pool3d", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.nn.max_pool3d", "tensorflow.arg_max", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.constant_initializer", "tensorflow.variable_scope" ]
TS-FNN/src/tools.py
[(142, 'tensorflow.cast', 'tf.cast', (['correct', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['correct'], {}), True, 'import tensorflow as tf\n'), (8, 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['x', 'w'], {'strides': 'strides', 'padding': '"""SAME"""', 'data_format': 'data_format', 'name': '"""conv3d"""'}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {'name': '"""bias_add"""'}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': '"""relu"""'}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'w', 'strides'], {'padding': '"""SAME"""', 'name': '"""conv"""'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {'name': '"""bias_add"""'}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': '"""relu"""'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x', 'kernel_size'], {'strides': 'strides', 'padding': '"""SAME"""', 'name': 'layer_name'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['x', 'kernel_size'], {'strides': 'strides', 'padding': '"""SAME"""', 'name': 'layer_name'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.nn.max_pool3d', 'tf.nn.max_pool3d', (['x'], {'ksize': 'kernel_size', 'strides': 'strides', 'padding': '"""VALID"""', 'name': 'layer_name'}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.nn.avg_pool3d', 'tf.nn.avg_pool3d', (['x'], {'ksize': 'kernel_size', 'strides': 'strides', 'padding': '"""VALID"""', 'name': 'layer_name'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, size]'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'labels', 'name': '"""cross-entropy"""'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""loss"""'}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(scope + '/loss')", 'loss'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(scope + 'accuracy')", 'accuracy'], {}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.arg_max', 'tf.arg_max', (['logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.arg_max', 'tf.arg_max', (['labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.matmul', 'tf.matmul', (['flat_x', 'w'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.arg_max', 'tf.arg_max', (['logits', '(1)'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.arg_max', 'tf.arg_max', (['labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['correct'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (12, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.all_variables', 'tf.all_variables', ([], {}), True, 'import tensorflow as tf\n')]
lj-ecjtu/Faster-RCNN-TensorFlow-Python3-master-RSDDs
33371985133c93d9a7a5ef0a8a60a558ccfa1ae2
# -------------------------------------------------------- # Tensorflow Faster R-CNN # Licensed under The MIT License [see LICENSE for details] # Written by Xinlei Chen # -------------------------------------------------------- from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.contrib.slim import arg_scope from lib.config import config as cfg from lib.layer_utils.anchor_target_layer import anchor_target_layer from lib.layer_utils.proposal_layer import proposal_layer from lib.layer_utils.proposal_target_layer import proposal_target_layer from lib.layer_utils.proposal_top_layer import proposal_top_layer from lib.layer_utils.snippets import generate_anchors_pre class Network(object): def __init__(self, batch_size=1): self._feat_stride = [16, ] self._feat_compress = [1. / 16., ] self._batch_size = batch_size self._predictions = {} self._losses = {} self._anchor_targets = {} self._proposal_targets = {} self._layers = {} self._act_summaries = [] self._score_summaries = {} self._train_summaries = [] self._event_summaries = {} self._variables_to_fix = {} # Summaries # def _add_image_summary(self, image, boxes): # add back mean ''' tf.stack()这是一个矩阵拼接的函数,tf.unstack()则是一个矩阵分解的函数 ''' image += cfg.FLAGS2["pixel_means"] # bgr to rgb (opencv uses bgr) channels = tf.unstack(image, axis=-1) image = tf.stack([channels[2], channels[1], channels[0]], axis=-1) # dims for normalization width = tf.to_float(tf.shape(image)[2]) height = tf.to_float(tf.shape(image)[1]) # from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1] cols = tf.unstack(boxes, axis=1) boxes = tf.stack([cols[1] / height, cols[0] / width, cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = tf.expand_dims(boxes, dim=0) image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor)) def _add_score_summary(self, key, tensor): tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor) def _add_train_summary(self, var): tf.summary.histogram('TRAIN/' + var.op.name, var) # Custom Layers # def _reshape_layer(self, bottom, num_dim, name): input_shape = tf.shape(bottom) with tf.variable_scope(name): # change the channel to the caffe format # 18个通道[,18,none,none],分别显示得分,前9个为前景得分,后9个为背景得分 # 第二次[1,2,none,none] to_caffe = tf.transpose(bottom, [0, 3, 1, 2]) # then force it to have channel 2 #[1,2,none.none],将9个anchor的前景得分和背景得分分开 # 第二次[1,18,none,none] reshaped = tf.reshape(to_caffe, tf.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]])) # then swap the channel back # [1,none,none,2], 第一个none应该为(行*9) # 第二次[1,none,none,18] to_tf = tf.transpose(reshaped, [0, 2, 3, 1]) return to_tf def _softmax_layer(self, bottom, name): if name == 'rpn_cls_prob_reshape': input_shape = tf.shape(bottom) # tf.reshape()中-1的应用,-1表示不知道该填什么数字合适的情况下,可以选择,由python通过原数组和其他的值推测出来 # 每一行是1个anchor的前景、背景得分,先显示所有点产生的第一种anchor,然后是所有点产生的第二种anchor,........ bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]]) reshaped_score = tf.nn.softmax(bottom_reshaped, name=name) return tf.reshape(reshaped_score, input_shape) # [1,none,none,2] return tf.nn.softmax(bottom, name=name) def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with tf.variable_scope(name): rois, rpn_scores = tf.py_func(proposal_top_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32]) rois.set_shape([cfg.FLAGS.rpn_top_n, 5]) rpn_scores.set_shape([cfg.FLAGS.rpn_top_n, 1]) return rois, rpn_scores def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name): with tf.variable_scope(name): # 返回的rois中多加了一列0在第一列 rois, rpn_scores = tf.py_func(proposal_layer, [rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32]) rois.set_shape([None, 5]) rpn_scores.set_shape([None, 1]) return rois, rpn_scores def _crop_pool_layer(self, bottom, rois, name): with tf.variable_scope(name): # tf.squeeze()返回一个张量,这个张量是将原始input中所有维度中为1的那些维都删掉的结果 batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1]) # Get the normalized coordinates of bboxes bottom_shape = tf.shape(bottom) height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0]) width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0]) # rois除以h,w就得到了rois在特征图上的位置 x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height # Won't be backpropagated to rois anyway, but to save time bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小 crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops") return slim.max_pool2d(crops, [2, 2], padding='SAME') def _dropout_layer(self, bottom, name, ratio=0.5): return tf.nn.dropout(bottom, ratio, name=name) def _anchor_target_layer(self, rpn_cls_score, name): with tf.variable_scope(name): # 这里的index是对于所有anchor而言 # (1, 1, A * height, width) # (1, height, width, A * 4) # (1, height, width, A * 4) # (1, height, width, A * 4) rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func( anchor_target_layer, [rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors], [tf.float32, tf.float32, tf.float32, tf.float32]) #self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号 rpn_labels.set_shape([1, 1, None, None]) rpn_bbox_targets.set_shape([1, None, None, self._num_anchors * 4]) rpn_bbox_inside_weights.set_shape([1, None, None, self._num_anchors * 4]) rpn_bbox_outside_weights.set_shape([1, None, None, self._num_anchors * 4]) rpn_labels = tf.to_int32(rpn_labels, name="to_int32") self._anchor_targets['rpn_labels'] = rpn_labels self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights self._score_summaries.update(self._anchor_targets) return rpn_labels def _proposal_target_layer(self, rois, roi_scores, name): with tf.variable_scope(name): # 这里的index是对于cfg.FLAGS.batch_size=256 而言 # rois (0, x1, y1, x2, y2),coming from RPN 然后再减少至256个 # bbox_target (ndarray): N x 4K blob of regression targets # bbox_inside_weights (ndarray): N x 4K blob of loss weights rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = tf.py_func( proposal_target_layer, [rois, roi_scores, self._gt_boxes, self._num_classes], [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32]) rois.set_shape([cfg.FLAGS.batch_size, 5]) roi_scores.set_shape([cfg.FLAGS.batch_size]) labels.set_shape([cfg.FLAGS.batch_size, 1]) bbox_targets.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_inside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) bbox_outside_weights.set_shape([cfg.FLAGS.batch_size, self._num_classes * 4]) self._proposal_targets['rois'] = rois self._proposal_targets['labels'] = tf.to_int32(labels, name="to_int32") self._proposal_targets['bbox_targets'] = bbox_targets self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights self._score_summaries.update(self._proposal_targets) #self._score_summaries.update(self._anchor_targets) return rois, roi_scores def _anchor_component(self): with tf.variable_scope('ANCHOR_' + 'default'): # just to get the shape right # 根据原始输入图片通过VGG16的conv5_3后,缩小16倍,得到RPN的输入feature map大小 height = tf.to_int32(tf.ceil(self._im_info[0, 0] / np.float32(self._feat_stride[0]))) width = tf.to_int32(tf.ceil(self._im_info[0, 1] / np.float32(self._feat_stride[0]))) #得到一张输入图片的所有anchor在原输入image上的坐标,以及anchor的数量 anchors, anchor_length = tf.py_func(generate_anchors_pre, [height, width, self._feat_stride, self._anchor_scales, self._anchor_ratios], [tf.float32, tf.int32], name="generate_anchors") anchors.set_shape([None, 4]) anchor_length.set_shape([]) self._anchors = anchors self._anchor_length = anchor_length def build_network(self, sess, is_training=True): raise NotImplementedError # sigma=sigma_rpn=3, dim=[1, 2, 3] def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]): sigma_2 = sigma ** 2 box_diff = bbox_pred - bbox_targets in_box_diff = bbox_inside_weights * box_diff #属于前景的行不为0,其他的行都为0 abs_in_box_diff = tf.abs(in_box_diff) # 决定哪些位置是权重是1(包括的本身为0的位置,即非前景),哪些位置权重为0 smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2))) # Smooth L1函数 (和论文有点不一样) in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign) out_loss_box = bbox_outside_weights * in_loss_box loss_box = tf.reduce_mean(tf.reduce_sum( out_loss_box, axis=dim )) return loss_box def _add_losses(self, sigma_rpn=3.0): with tf.variable_scope('loss_' + self._tag): # RPN, class loss rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2]) rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1]) # 得到前景和背景anchor的index rpn_select = tf.where(tf.not_equal(rpn_label, -1)) rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2]) rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1]) rpn_cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label)) # RPN, bbox loss rpn_bbox_pred = self._predictions['rpn_bbox_pred'] rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets'] rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights'] rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights'] rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3]) # RCNN, class loss cls_score = self._predictions["cls_score"] label = tf.reshape(self._proposal_targets["labels"], [-1]) cross_entropy = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(cls_score, [-1, self._num_classes]), labels=label)) # logits仍然是向量,label只含正确答案 # RCNN, bbox loss bbox_pred = self._predictions['bbox_pred'] bbox_targets = self._proposal_targets['bbox_targets'] bbox_inside_weights = self._proposal_targets['bbox_inside_weights'] bbox_outside_weights = self._proposal_targets['bbox_outside_weights'] loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights) self._losses['cross_entropy'] = cross_entropy self._losses['loss_box'] = loss_box self._losses['rpn_cross_entropy'] = rpn_cross_entropy self._losses['rpn_loss_box'] = rpn_loss_box loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box self._losses['total_loss'] = loss self._event_summaries.update(self._losses) return loss def create_architecture(self, sess, mode, num_classes, tag=None, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)): self._image = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 3]) self._im_info = tf.placeholder(tf.float32, shape=[self._batch_size, 3]) #缩放之后的图片尺寸和缩放的比例 self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5]) #gt_boxes缩放之后的坐标以及所属类别的标号 self._tag = tag self._num_classes = num_classes self._mode = mode self._anchor_scales = anchor_scales self._num_scales = len(anchor_scales) self._anchor_ratios = anchor_ratios self._num_ratios = len(anchor_ratios) # anchor的种数 self._num_anchors = self._num_scales * self._num_ratios training = mode == 'TRAIN' testing = mode == 'TEST' assert tag != None # handle most of the regularizer here weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.FLAGS.weight_decay) if cfg.FLAGS.bias_decay: biases_regularizer = weights_regularizer else: biases_regularizer = tf.no_regularizer # list as many types of layers as possible, even if they are not used now # slim.arg_scope函数可以用于设置默认的参数取值,第一个参数是一个函数列表,在这个列表中的函数使用默认的参数取值 # 默认stride=1, padding='SAME', activation_fn=nn.relu with arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): rois, cls_prob, bbox_pred = self.build_network(sess, training) layers_to_output = {'rois': rois} layers_to_output.update(self._predictions) for var in tf.trainable_variables(): self._train_summaries.append(var) if mode == 'TEST': # FLAGS2["bbox_normalize_means"] = (0.0, 0.0, 0.0, 0.0) # FLAGS2["bbox_normalize_stds"] = (0.1, 0.1, 0.1, 0.1) stds = np.tile(np.array(cfg.FLAGS2["bbox_normalize_stds"]), (self._num_classes)) means = np.tile(np.array(cfg.FLAGS2["bbox_normalize_means"]), (self._num_classes)) self._predictions["bbox_pred"] *= stds self._predictions["bbox_pred"] += means else: self._add_losses() layers_to_output.update(self._losses) val_summaries = [] # 保存添加tf.summary.image和添加self._losses的操作 with tf.device("/cpu:0"): val_summaries.append(self._add_image_summary(self._image, self._gt_boxes)) for key, var in self._event_summaries.items(): #添加self._losses val_summaries.append(tf.summary.scalar(key, var)) for key, var in self._score_summaries.items(): #self._score_summaries.update(self._anchor_targets) self._score_summaries.update(self._proposal_targets) self._add_score_summary(key, var) for var in self._act_summaries: # 添加head网络和rpn层 self._add_act_summary(var) ''' for var in tf.trainable_variables(): self._train_summaries.append(var) ''' for var in self._train_summaries: #添加tf.trainable_variables(),显示张量分布监控数据随着迭代轮数的变化趋势 self._add_train_summary(var) self._summary_op = tf.summary.merge_all() # tf.summary.merge_all()函数来整理所有的日志生成操作 if not testing: self._summary_op_val = tf.summary.merge(val_summaries) return layers_to_output def get_variables_to_restore(self, variables, var_keep_dic): raise NotImplementedError def fix_variables(self, sess, pretrained_model): raise NotImplementedError # Extract the head feature maps, for example for vgg16 it is conv5_3 # only useful during testing mode def extract_head(self, sess, image): feed_dict = {self._image: image} feat = sess.run(self._layers["head"], feed_dict=feed_dict) return feat # only useful during testing mode def test_image(self, sess, image, im_info): feed_dict = {self._image: image, self._im_info: im_info} cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions["cls_score"], self._predictions['cls_prob'], self._predictions['bbox_pred'], self._predictions['rois']], feed_dict=feed_dict) return cls_score, cls_prob, bbox_pred, rois def get_summary(self, sess, blobs): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} summary = sess.run(self._summary_op_val, feed_dict=feed_dict) return summary def get_summary_2(self, sess, blobs): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} summary = sess.run(self._summary_op, feed_dict=feed_dict) return summary def train_step(self, sess, blobs, train_op): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, _ = sess.run([self._losses["rpn_cross_entropy"], self._losses['rpn_loss_box'], self._losses['cross_entropy'], self._losses['loss_box'], self._losses['total_loss'], train_op], feed_dict=feed_dict) return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss def train_step_with_summary(self, sess, blobs, train_op): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary, _ = sess.run([self._losses["rpn_cross_entropy"], self._losses['rpn_loss_box'], self._losses['cross_entropy'], self._losses['loss_box'], self._losses['total_loss'], self._summary_op, train_op], feed_dict=feed_dict) return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary def train_step_no_return(self, sess, blobs, train_op): feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'], self._gt_boxes: blobs['gt_boxes']} sess.run([train_op], feed_dict=feed_dict)
[ "tensorflow.device", "tensorflow.concat", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.image.draw_bounding_boxes", "tensorflow.to_int32", "tensorflow.summary.scalar", "tensorflow.py_func", "tensorflow.summary.image", "tensorflow.gather", "tensorflow.to_float", "numpy.float32", "tensorflow.trainable_variables", "tensorflow.nn.dropout", "tensorflow.unstack", "tensorflow.shape", "tensorflow.less", "tensorflow.pow", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.array", "tensorflow.summary.merge", "tensorflow.summary.histogram", "tensorflow.not_equal", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.slice", "tensorflow.contrib.slim.max_pool2d", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.nn.zero_fraction", "tensorflow.constant_initializer", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.variable_scope", "tensorflow.abs" ]
lib/nets/network.py
[(47, 'tensorflow.unstack', 'tf.unstack', (['image'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.stack', 'tf.stack', (['[channels[2], channels[1], channels[0]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.unstack', 'tf.unstack', (['boxes'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.stack', 'tf.stack', (['[cols[1] / height, cols[0] / width, cols[3] / height, cols[2] / width]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.expand_dims', 'tf.expand_dims', (['boxes'], {'dim': '(0)'}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.image.draw_bounding_boxes', 'tf.image.draw_bounding_boxes', (['image', 'boxes'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.summary.image', 'tf.summary.image', (['"""ground_truth"""', 'image'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('ACT/' + tensor.op.name + '/activations')", 'tensor'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('SCORE/' + tensor.op.name + '/' + key + '/scores')", 'tensor'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('TRAIN/' + var.op.name)", 'var'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.shape', 'tf.shape', (['bottom'], {}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['bottom'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['crops', '[2, 2]'], {'padding': '"""SAME"""'}), True, 'import tensorflow.contrib.slim as slim\n'), (151, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['bottom', 'ratio'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.abs', 'tf.abs', (['in_box_diff'], {}), True, 'import tensorflow as tf\n'), (300, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self._batch_size, None, None, 3]'}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self._batch_size, 3]'}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 5]'}), True, 'import tensorflow as tf\n'), (322, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['cfg.FLAGS.weight_decay'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['tensor'], {}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.transpose', 'tf.transpose', (['bottom', '[0, 3, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.transpose', 'tf.transpose', (['reshaped', '[0, 2, 3, 1]'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.shape', 'tf.shape', (['bottom'], {}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.reshape', 'tf.reshape', (['bottom', '[-1, input_shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['bottom_reshaped'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.reshape', 'tf.reshape', (['reshaped_score', 'input_shape'], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.py_func', 'tf.py_func', (['proposal_top_layer', '[rpn_cls_prob, rpn_bbox_pred, self._im_info, self._feat_stride, self.\n _anchors, self._num_anchors]', '[tf.float32, tf.float32]'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.py_func', 'tf.py_func', (['proposal_layer', '[rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode, self._feat_stride,\n self._anchors, self._num_anchors]', '[tf.float32, tf.float32]'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.shape', 'tf.shape', (['bottom'], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.py_func', 'tf.py_func', (['anchor_target_layer', '[rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self.\n _anchors, self._num_anchors]', '[tf.float32, tf.float32, tf.float32, tf.float32]'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.to_int32', 'tf.to_int32', (['rpn_labels'], {'name': '"""to_int32"""'}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.py_func', 'tf.py_func', (['proposal_target_layer', '[rois, roi_scores, self._gt_boxes, self._num_classes]', '[tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32]'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.to_int32', 'tf.to_int32', (['labels'], {'name': '"""to_int32"""'}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.variable_scope', 'tf.variable_scope', (["('ANCHOR_' + 'default')"], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.py_func', 'tf.py_func', (['generate_anchors_pre', '[height, width, self._feat_stride, self._anchor_scales, self._anchor_ratios]', '[tf.float32, tf.int32]'], {'name': '"""generate_anchors"""'}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['out_loss_box'], {'axis': 'dim'}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.variable_scope', 'tf.variable_scope', (["('loss_' + self._tag)"], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.reshape', 'tf.reshape', (["self._predictions['rpn_cls_score_reshape']", '[-1, 2]'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.reshape', 'tf.reshape', (["self._anchor_targets['rpn_labels']", '[-1]'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.reshape', 'tf.reshape', (["self._proposal_targets['labels']", '[-1]'], {}), True, 'import tensorflow as tf\n'), (357, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.summary.merge', 'tf.summary.merge', (['val_summaries'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': '[[self._batch_size], [num_dim, -1], [input_shape[2]]]'}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.slice', 'tf.slice', (['rois', '[0, 0]', '[-1, 1]'], {'name': '"""batch_id"""'}), True, 'import tensorflow as tf\n'), (134, 'numpy.float32', 'np.float32', (['self._feat_stride[0]'], {}), True, 'import numpy as np\n'), (135, 'numpy.float32', 'np.float32', (['self._feat_stride[0]'], {}), True, 'import numpy as np\n'), (137, 'tensorflow.slice', 'tf.slice', (['rois', '[0, 1]', '[-1, 1]'], {'name': '"""x1"""'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.slice', 'tf.slice', (['rois', '[0, 2]', '[-1, 1]'], {'name': '"""y1"""'}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.slice', 'tf.slice', (['rois', '[0, 3]', '[-1, 1]'], {'name': '"""x2"""'}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.slice', 'tf.slice', (['rois', '[0, 4]', '[-1, 1]'], {'name': '"""y2"""'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.concat', 'tf.concat', (['[y1, x1, y2, x2]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.to_int32', 'tf.to_int32', (['batch_ids'], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.less', 'tf.less', (['abs_in_box_diff', '(1.0 / sigma_2)'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.not_equal', 'tf.not_equal', (['rpn_label', '(-1)'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.gather', 'tf.gather', (['rpn_cls_score', 'rpn_select'], {}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.gather', 'tf.gather', (['rpn_label', 'rpn_select'], {}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'rpn_cls_score', 'labels': 'rpn_label'}), True, 'import tensorflow as tf\n'), (348, 'numpy.array', 'np.array', (["cfg.FLAGS2['bbox_normalize_stds']"], {}), True, 'import numpy as np\n'), (349, 'numpy.array', 'np.array', (["cfg.FLAGS2['bbox_normalize_means']"], {}), True, 'import numpy as np\n'), (134, 'tensorflow.to_float', 'tf.to_float', (['bottom_shape[1]'], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.to_float', 'tf.to_float', (['bottom_shape[2]'], {}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.pow', 'tf.pow', (['in_box_diff', '(2)'], {}), True, 'import tensorflow as tf\n'), (336, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['key', 'var'], {}), True, 'import tensorflow as tf\n'), (213, 'numpy.float32', 'np.float32', (['self._feat_stride[0]'], {}), True, 'import numpy as np\n'), (214, 'numpy.float32', 'np.float32', (['self._feat_stride[0]'], {}), True, 'import numpy as np\n'), (277, 'tensorflow.reshape', 'tf.reshape', (['cls_score', '[-1, self._num_classes]'], {}), True, 'import tensorflow as tf\n')]
evdcush/neorl
a1af069072e752ab79e7279a88ad95d195a81821
import time import warnings import numpy as np import tensorflow as tf from gym.spaces import Discrete, Box from collections import deque from neorl.rl.baselines.shared import logger from neorl.rl.baselines.shared.schedules import Scheduler from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, \ check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger from neorl.rl.baselines.acer.buffer import Buffer from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter from neorl.rl.baselines.shared.runners import AbstractEnvRunner from neorl.rl.baselines.shared.policies import ActorCriticPolicy, RecurrentActorCriticPolicy # Filter tensorflow version warnings import os # https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'} import warnings # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=Warning) import tensorflow as tf tf.get_logger().setLevel('INFO') tf.autograph.set_verbosity(0) import logging tf.get_logger().setLevel(logging.ERROR) # For ACER def get_by_index(input_tensor, idx): """ Return the input tensor, offset by a certain value :param input_tensor: (TensorFlow Tensor) The input tensor :param idx: (int) The index offset :return: (TensorFlow Tensor) the offset tensor """ assert len(input_tensor.get_shape()) == 2 assert len(idx.get_shape()) == 1 idx_flattened = tf.range(0, input_tensor.shape[0], dtype=tf.int64) * input_tensor.shape[1] + idx offset_tensor = tf.gather(tf.reshape(input_tensor, [-1]), # flatten input idx_flattened) # use flattened indices return offset_tensor def strip(var, n_envs, n_steps, flat=False): """ Removes the last step in the batch :param var: (TensorFlow Tensor) The input Tensor :param n_envs: (int) The number of environments :param n_steps: (int) The number of steps to run for each environment :param flat: (bool) If the input Tensor is flat :return: (TensorFlow Tensor) the input tensor, without the last step in the batch """ out_vars = batch_to_seq(var, n_envs, n_steps + 1, flat) return seq_to_batch(out_vars[:-1], flat) def q_retrace(rewards, dones, q_i, values, rho_i, n_envs, n_steps, gamma): """ Calculates the target Q-retrace :param rewards: ([TensorFlow Tensor]) The rewards :param dones: ([TensorFlow Tensor]) :param q_i: ([TensorFlow Tensor]) The Q values for actions taken :param values: ([TensorFlow Tensor]) The output of the value functions :param rho_i: ([TensorFlow Tensor]) The importance weight for each action :param n_envs: (int) The number of environments :param n_steps: (int) The number of steps to run for each environment :param gamma: (float) The discount value :return: ([TensorFlow Tensor]) the target Q-retrace """ rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), n_envs, n_steps, True) # list of len steps, shape [n_envs] reward_seq = batch_to_seq(rewards, n_envs, n_steps, True) # list of len steps, shape [n_envs] done_seq = batch_to_seq(dones, n_envs, n_steps, True) # list of len steps, shape [n_envs] q_is = batch_to_seq(q_i, n_envs, n_steps, True) value_sequence = batch_to_seq(values, n_envs, n_steps + 1, True) final_value = value_sequence[-1] qret = final_value qrets = [] for i in range(n_steps - 1, -1, -1): check_shape([qret, done_seq[i], reward_seq[i], rho_bar[i], q_is[i], value_sequence[i]], [[n_envs]] * 6) qret = reward_seq[i] + gamma * qret * (1.0 - done_seq[i]) qrets.append(qret) qret = (rho_bar[i] * (qret - q_is[i])) + value_sequence[i] qrets = qrets[::-1] qret = seq_to_batch(qrets, flat=True) return qret class EpisodeStats: def __init__(self, n_steps, n_envs): """ Calculates the episode statistics :param n_steps: (int) The number of steps to run for each environment :param n_envs: (int) The number of environments """ self.episode_rewards = [] for _ in range(n_envs): self.episode_rewards.append([]) self.len_buffer = deque(maxlen=40) # rolling buffer for episode lengths self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards self.n_steps = n_steps self.n_envs = n_envs def feed(self, rewards, masks): """ Update the latest reward and mask :param rewards: ([float]) The new rewards for the new step :param masks: ([float]) The new masks for the new step """ rewards = np.reshape(rewards, [self.n_envs, self.n_steps]) masks = np.reshape(masks, [self.n_envs, self.n_steps]) for i in range(0, self.n_envs): for j in range(0, self.n_steps): self.episode_rewards[i].append(rewards[i][j]) if masks[i][j]: reward_length = len(self.episode_rewards[i]) reward_sum = sum(self.episode_rewards[i]) self.len_buffer.append(reward_length) self.rewbuffer.append(reward_sum) self.episode_rewards[i] = [] def mean_length(self): """ Returns the average length of each episode :return: (float) """ if self.len_buffer: return np.mean(self.len_buffer) else: return 0 # on the first params dump, no episodes are finished def mean_reward(self): """ Returns the average reward of each episode :return: (float) """ if self.rewbuffer: return np.mean(self.rewbuffer) else: return 0 class ACER(ActorCriticRLModel): """ The ACER (Actor-Critic with Experience Replay) model class :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...) :param env: (NEORL environment or Gym environment) The environment to learn with PPO, either use NEORL method ``CreateEnvironment`` (see **below**) or construct your custom Gym environment :param gamma: (float) The discount value :param n_steps: (int) The number of steps to run for each environment per update (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel) :param q_coef: (float) The weight for the loss on the Q value :param ent_coef: (float) The weight for the entropy loss :param max_grad_norm: (float) The clipping value for the maximum gradient :param learning_rate: (float) The initial learning rate for the RMS prop optimizer :param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant', 'double_linear_con', 'middle_drop' or 'double_middle_drop') :param buffer_size: (int) The buffer size in number of steps :param replay_ratio: (float) The number of replay learning per on policy learning on average, using a poisson distribution :param replay_start: (int) The minimum number of steps in the buffer, before experience replay starts :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow). If None (default), use random seed. """ #:param alpha: (float) The decay rate for the Exponential moving average of the parameters #:param correction_term: (float) Importance weight clipping factor (default: 10) #:param delta: (float) max KL divergence between the old policy and updated policy (default: 1) #:param trust_region: (bool) Whether or not algorithms estimates the gradient KL divergence # between the old and updated policy and uses it to determine step size (default: True) def __init__(self, policy, env, gamma=0.99, n_steps=20, q_coef=0.5, ent_coef=0.01, max_grad_norm=10, learning_rate=7e-4, lr_schedule='linear', buffer_size=5000, replay_ratio=4, replay_start=1000, verbose=0, seed=None, _init_setup_model=True): #if num_procs is not None: # warnings.warn("num_procs will be removed in a future version (v3.x.x) " # "use n_cpu_tf_sess instead", DeprecationWarning) # n_cpu_tf_sess = num_procs self.n_steps = n_steps self.replay_ratio = replay_ratio self.buffer_size = buffer_size self.replay_start = replay_start self.gamma = gamma self.alpha = 0.99 self.correction_term = 10.0 self.q_coef = q_coef self.ent_coef = ent_coef self.trust_region = True self.delta = 1 self.max_grad_norm = max_grad_norm self.rprop_alpha = 0.99 self.rprop_epsilon = 1e-5 self.learning_rate = learning_rate self.lr_schedule = lr_schedule self.tensorboard_log = None self.full_tensorboard_log = False policy_kwargs=None n_cpu_tf_sess=1 self.action_ph = None self.done_ph = None self.reward_ph = None self.mu_ph = None self.learning_rate_ph = None self.polyak_model = None self.learning_rate_schedule = None self.run_ops = None self.names_ops = None self.train_model = None self.step_model = None self.proba_step = None self.n_act = None self.n_batch = None self.summary = None super(ACER, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True, _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs, seed=seed, n_cpu_tf_sess=n_cpu_tf_sess) if _init_setup_model: self.setup_model() def _make_runner(self) -> AbstractEnvRunner: return _Runner(env=self.env, model=self, n_steps=self.n_steps) def _get_pretrain_placeholders(self): policy = self.step_model action_ph = policy.pdtype.sample_placeholder([None]) if isinstance(self.action_space, Discrete): return policy.obs_ph, action_ph, policy.policy raise NotImplementedError('Only discrete actions are supported for ACER for now') def set_env(self, env): if env is not None: assert self.n_envs == env.num_envs, \ "Error: the environment passed must have the same number of environments as the model was trained on." \ "This is due to ACER not being capable of changing the number of environments." super().set_env(env) def setup_model(self): with SetVerbosity(self.verbose): assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the ACER model must be " \ "an instance of common.policies.ActorCriticPolicy." if isinstance(self.action_space, Discrete): self.n_act = self.action_space.n continuous = False elif isinstance(self.action_space, Box): # self.n_act = self.action_space.shape[-1] # continuous = True raise NotImplementedError("WIP: Acer does not support Continuous actions yet.") else: raise ValueError("Error: ACER does not work with {} actions space.".format(self.action_space)) self.n_batch = self.n_envs * self.n_steps self.graph = tf.Graph() with self.graph.as_default(): self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph) self.set_random_seed(self.seed) n_batch_step = None if issubclass(self.policy, RecurrentActorCriticPolicy): n_batch_step = self.n_envs n_batch_train = self.n_envs * (self.n_steps + 1) step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1, n_batch_step, reuse=False, **self.policy_kwargs) self.params = tf_util.get_trainable_vars("model") with tf.variable_scope("train_model", reuse=True, custom_getter=tf_util.outer_scope_getter("train_model")): train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, self.n_steps + 1, n_batch_train, reuse=True, **self.policy_kwargs) with tf.variable_scope("moving_average"): # create averaged model ema = tf.train.ExponentialMovingAverage(self.alpha) ema_apply_op = ema.apply(self.params) def custom_getter(getter, name, *args, **kwargs): name = name.replace("polyak_model/", "") val = ema.average(getter(name, *args, **kwargs)) return val with tf.variable_scope("polyak_model", reuse=True, custom_getter=custom_getter): self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, self.n_steps + 1, self.n_envs * (self.n_steps + 1), reuse=True, **self.policy_kwargs) with tf.variable_scope("loss", reuse=False): self.done_ph = tf.placeholder(tf.float32, [self.n_batch]) # dones self.reward_ph = tf.placeholder(tf.float32, [self.n_batch]) # rewards, not returns self.mu_ph = tf.placeholder(tf.float32, [self.n_batch, self.n_act]) # mu's self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch]) self.learning_rate_ph = tf.placeholder(tf.float32, []) eps = 1e-6 # Notation: (var) = batch variable, (var)s = sequence variable, # (var)_i = variable index by action at step i # shape is [n_envs * (n_steps + 1)] if continuous: value = train_model.value_flat else: value = tf.reduce_sum(train_model.policy_proba * train_model.q_value, axis=-1) rho, rho_i_ = None, None if continuous: action_ = strip(train_model.proba_distribution.sample(), self.n_envs, self.n_steps) distribution_f = tf.contrib.distributions.MultivariateNormalDiag( loc=strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps), scale_diag=strip(train_model.proba_distribution.logstd, self.n_envs, self.n_steps)) f_polyak = tf.contrib.distributions.MultivariateNormalDiag( loc=strip(polyak_model.proba_distribution.mean, self.n_envs, self.n_steps), scale_diag=strip(polyak_model.proba_distribution.logstd, self.n_envs, self.n_steps)) f_i = distribution_f.prob(self.action_ph) f_i_ = distribution_f.prob(action_) f_polyak_i = f_polyak.prob(self.action_ph) phi_i = strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps) q_value = strip(train_model.value_fn, self.n_envs, self.n_steps) q_i = q_value[:, 0] rho_i = tf.reshape(f_i, [-1, 1]) / (self.mu_ph + eps) rho_i_ = tf.reshape(f_i_, [-1, 1]) / (self.mu_ph + eps) qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, tf.pow(rho_i, 1 / self.n_act), self.n_envs, self.n_steps, self.gamma) else: # strip off last step # f is a distribution, chosen to be Gaussian distributions # with fixed diagonal covariance and mean \phi(x) # in the paper distribution_f, f_polyak, q_value = \ map(lambda variables: strip(variables, self.n_envs, self.n_steps), [train_model.policy_proba, polyak_model.policy_proba, train_model.q_value]) # Get pi and q values for actions taken f_i = get_by_index(distribution_f, self.action_ph) f_i_ = distribution_f phi_i = distribution_f f_polyak_i = f_polyak q_i = get_by_index(q_value, self.action_ph) # Compute ratios for importance truncation rho = distribution_f / (self.mu_ph + eps) rho_i = get_by_index(rho, self.action_ph) # Calculate Q_retrace targets qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, rho_i, self.n_envs, self.n_steps, self.gamma) # Calculate losses # Entropy entropy = tf.reduce_sum(train_model.proba_distribution.entropy()) # Policy Gradient loss, with truncated importance sampling & bias correction value = strip(value, self.n_envs, self.n_steps, True) # check_shape([qret, value, rho_i, f_i], [[self.n_envs * self.n_steps]] * 4) # check_shape([rho, distribution_f, q_value], [[self.n_envs * self.n_steps, self.n_act]] * 2) # Truncated importance sampling adv = qret - value log_f = tf.log(f_i + eps) # [n_envs * n_steps] gain_f = log_f * tf.stop_gradient(adv * tf.minimum(self.correction_term, rho_i)) loss_f = -tf.reduce_mean(gain_f) # Bias correction for the truncation adv_bc = (q_value - tf.reshape(value, [self.n_envs * self.n_steps, 1])) # [n_envs * n_steps, n_act] # check_shape([adv_bc, log_f_bc], [[self.n_envs * self.n_steps, self.n_act]] * 2) if continuous: gain_bc = tf.stop_gradient(adv_bc * tf.nn.relu(1.0 - (self.correction_term / (rho_i_ + eps))) * f_i_) else: log_f_bc = tf.log(f_i_ + eps) # / (f_old + eps) gain_bc = tf.reduce_sum(log_f_bc * tf.stop_gradient( adv_bc * tf.nn.relu(1.0 - (self.correction_term / (rho + eps))) * f_i_), axis=1) # IMP: This is sum, as expectation wrt f loss_bc = -tf.reduce_mean(gain_bc) loss_policy = loss_f + loss_bc # Value/Q function loss, and explained variance check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2) explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]), tf.reshape(qret, [self.n_envs, self.n_steps])) loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5) # Net loss check_shape([loss_policy, loss_q, entropy], [[]] * 3) loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy tf.summary.scalar('entropy_loss', entropy) tf.summary.scalar('policy_gradient_loss', loss_policy) tf.summary.scalar('value_function_loss', loss_q) tf.summary.scalar('loss', loss) norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None if self.trust_region: # [n_envs * n_steps, n_act] grad = tf.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs, phi_i) # [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f kl_grad = - f_polyak_i / (f_i_ + eps) k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1) adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / ( tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps] # Calculate stats (before doing adjustment) for logging. avg_norm_k = avg_norm(kl_grad) avg_norm_g = avg_norm(grad) avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g)) avg_norm_adj = tf.reduce_mean(tf.abs(adj)) grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad # These are turst region adjusted gradients wrt f ie statistics of policy pi grads_f = -grad / (self.n_envs * self.n_steps) grads_policy = tf.gradients(f_i_, self.params, grads_f) grads_q = tf.gradients(loss_q * self.q_coef, self.params) grads = [gradient_add(g1, g2, param, verbose=self.verbose) for (g1, g2, param) in zip(grads_policy, grads_q, self.params)] avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs) norm_grads_q = tf.global_norm(grads_q) norm_grads_policy = tf.global_norm(grads_policy) else: grads = tf.gradients(loss, self.params) norm_grads = None if self.max_grad_norm is not None: grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm) grads = list(zip(grads, self.params)) with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph)) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate)) tf.summary.scalar('advantage', tf.reduce_mean(adv)) tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph)) if self.full_tensorboard_log: tf.summary.histogram('rewards', self.reward_ph) tf.summary.histogram('learning_rate', self.learning_rate) tf.summary.histogram('advantage', adv) tf.summary.histogram('action_probability', self.mu_ph) if tf_util.is_image(self.observation_space): tf.summary.image('observation', train_model.obs_ph) else: tf.summary.histogram('observation', train_model.obs_ph) trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha, epsilon=self.rprop_epsilon) _opt_op = trainer.apply_gradients(grads) # so when you call _train, you first do the gradient step, then you apply ema with tf.control_dependencies([_opt_op]): _train = tf.group(ema_apply_op) # Ops/Summaries to run, and their names for logging assert norm_grads is not None run_ops = [_train, loss, loss_q, entropy, loss_policy, loss_f, loss_bc, explained_variance, norm_grads] names_ops = ['loss', 'loss_q', 'entropy', 'loss_policy', 'loss_f', 'loss_bc', 'explained_variance', 'norm_grads'] if self.trust_region: self.run_ops = run_ops + [norm_grads_q, norm_grads_policy, avg_norm_grads_f, avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj] self.names_ops = names_ops + ['norm_grads_q', 'norm_grads_policy', 'avg_norm_grads_f', 'avg_norm_k', 'avg_norm_g', 'avg_norm_k_dot_g', 'avg_norm_adj'] self.train_model = train_model self.step_model = step_model self.step = step_model.step self.proba_step = step_model.proba_step self.initial_state = step_model.initial_state tf.global_variables_initializer().run(session=self.sess) self.summary = tf.summary.merge_all() def _train_step(self, obs, actions, rewards, dones, mus, states, masks, steps, writer=None): """ applies a training step to the model :param obs: ([float]) The input observations :param actions: ([float]) The actions taken :param rewards: ([float]) The rewards from the environment :param dones: ([bool]) Whether or not the episode is over (aligned with reward, used for reward calculation) :param mus: ([float]) The logits values :param states: ([float]) The states (used for recurrent policies) :param masks: ([bool]) Whether or not the episode is over (used for recurrent policies) :param steps: (int) the number of steps done so far (can be None) :param writer: (TensorFlow Summary.writer) the writer for tensorboard :return: ([str], [float]) the list of update operation name, and the list of the results of the operations """ cur_lr = self.learning_rate_schedule.value_steps(steps) td_map = {self.train_model.obs_ph: obs, self.polyak_model.obs_ph: obs, self.action_ph: actions, self.reward_ph: rewards, self.done_ph: dones, self.mu_ph: mus, self.learning_rate_ph: cur_lr} if states is not None: td_map[self.train_model.states_ph] = states td_map[self.train_model.dones_ph] = masks td_map[self.polyak_model.states_ph] = states td_map[self.polyak_model.dones_ph] = masks if writer is not None: # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...) if self.full_tensorboard_log and (1 + (steps / self.n_batch)) % 10 == 0: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() step_return = self.sess.run([self.summary] + self.run_ops, td_map, options=run_options, run_metadata=run_metadata) writer.add_run_metadata(run_metadata, 'step%d' % steps) else: step_return = self.sess.run([self.summary] + self.run_ops, td_map) writer.add_summary(step_return[0], steps) step_return = step_return[1:] else: step_return = self.sess.run(self.run_ops, td_map) return self.names_ops, step_return[1:] # strip off _train def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="ACER", reset_num_timesteps=True): new_tb_log = self._init_num_timesteps(reset_num_timesteps) callback = self._init_callback(callback) with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \ as writer: self._setup_learn() self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps, schedule=self.lr_schedule) episode_stats = EpisodeStats(self.n_steps, self.n_envs) if self.replay_ratio > 0: buffer = Buffer(env=self.env, n_steps=self.n_steps, size=self.buffer_size) else: buffer = None t_start = time.time() callback.on_training_start(locals(), globals()) # n_batch samples, 1 on_policy call and multiple off-policy calls for steps in range(0, total_timesteps, self.n_batch): callback.on_rollout_start() enc_obs, obs, actions, rewards, mus, dones, masks = self.runner.run(callback) callback.update_locals(locals()) callback.on_rollout_end() # Early stopping due to the callback if not self.runner.continue_training: break episode_stats.feed(rewards, dones) if buffer is not None: buffer.put(enc_obs, actions, rewards, mus, dones, masks) if writer is not None: total_episode_reward_logger(self.episode_reward, rewards.reshape((self.n_envs, self.n_steps)), dones.reshape((self.n_envs, self.n_steps)), writer, self.num_timesteps) # reshape stuff correctly obs = obs.reshape(self.runner.batch_ob_shape) actions = actions.reshape([self.n_batch]) rewards = rewards.reshape([self.n_batch]) mus = mus.reshape([self.n_batch, self.n_act]) dones = dones.reshape([self.n_batch]) masks = masks.reshape([self.runner.batch_ob_shape[0]]) names_ops, values_ops = self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks, self.num_timesteps, writer) if self.verbose >= 1 and (int(steps / self.n_batch) % log_interval == 0): logger.record_tabular("total_timesteps", self.num_timesteps) logger.record_tabular("fps", int(steps / (time.time() - t_start))) # IMP: In EpisodicLife env, during training, we get done=True at each loss of life, # not just at the terminal state. Thus, this is mean until end of life, not end of episode. # For true episode rewards, see the monitor files in the log folder. logger.record_tabular("mean_episode_length", episode_stats.mean_length()) logger.record_tabular("mean_episode_reward", episode_stats.mean_reward()) for name, val in zip(names_ops, values_ops): logger.record_tabular(name, float(val)) logger.dump_tabular() if (self.replay_ratio > 0 and buffer is not None and buffer.has_atleast(self.replay_start)): samples_number = np.random.poisson(self.replay_ratio) for _ in range(samples_number): # get obs, actions, rewards, mus, dones from buffer. obs, actions, rewards, mus, dones, masks = buffer.get() # reshape stuff correctly obs = obs.reshape(self.runner.batch_ob_shape) actions = actions.reshape([self.n_batch]) rewards = rewards.reshape([self.n_batch]) mus = mus.reshape([self.n_batch, self.n_act]) dones = dones.reshape([self.n_batch]) masks = masks.reshape([self.runner.batch_ob_shape[0]]) self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks, self.num_timesteps) callback.on_training_end() return self def save(self, save_path, cloudpickle=False): data = { "gamma": self.gamma, "n_steps": self.n_steps, "q_coef": self.q_coef, "ent_coef": self.ent_coef, "max_grad_norm": self.max_grad_norm, "learning_rate": self.learning_rate, "lr_schedule": self.lr_schedule, "rprop_alpha": self.rprop_alpha, "rprop_epsilon": self.rprop_epsilon, "replay_ratio": self.replay_ratio, "replay_start": self.replay_start, "verbose": self.verbose, "policy": self.policy, "observation_space": self.observation_space, "action_space": self.action_space, "n_envs": self.n_envs, 'n_cpu_tf_sess': self.n_cpu_tf_sess, 'seed': self.seed, "_vectorize_action": self._vectorize_action, "policy_kwargs": self.policy_kwargs } params_to_save = self.get_parameters() self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle) class _Runner(AbstractEnvRunner): def __init__(self, env, model, n_steps): """ A runner to learn the policy of an environment for a model :param env: (Gym environment) The environment to learn from :param model: (Model) The model to learn :param n_steps: (int) The number of steps to run for each environment """ super(_Runner, self).__init__(env=env, model=model, n_steps=n_steps) self.env = env self.model = model self.n_env = n_env = env.num_envs if isinstance(env.action_space, Discrete): self.n_act = env.action_space.n else: self.n_act = env.action_space.shape[-1] self.n_batch = n_env * n_steps if len(env.observation_space.shape) > 1: self.raw_pixels = True obs_height, obs_width, obs_num_channels = env.observation_space.shape self.batch_ob_shape = (n_env * (n_steps + 1), obs_height, obs_width, obs_num_channels) self.obs_dtype = np.uint8 self.obs = np.zeros((n_env, obs_height, obs_width, obs_num_channels), dtype=self.obs_dtype) self.num_channels = obs_num_channels else: if len(env.observation_space.shape) == 1: self.obs_dim = env.observation_space.shape[0] else: self.obs_dim = 1 self.raw_pixels = False if isinstance(self.env.observation_space, Discrete): self.batch_ob_shape = (n_env * (n_steps + 1),) else: self.batch_ob_shape = (n_env * (n_steps + 1), self.obs_dim) self.obs_dtype = np.float32 self.n_steps = n_steps self.states = model.initial_state self.dones = [False for _ in range(n_env)] def _run(self): """ Run a step leaning of the model :return: ([float], [float], [int64], [float], [float], [bool], [float]) encoded observation, observations, actions, rewards, mus, dones, masks """ enc_obs = [self.obs] mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], [] for _ in range(self.n_steps): actions, _, states, _ = self.model.step(self.obs, self.states, self.dones) mus = self.model.proba_step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_mus.append(mus) mb_dones.append(self.dones) clipped_actions = actions # Clip the actions to avoid out of bound error if isinstance(self.env.action_space, Box): clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high) obs, rewards, dones, _ = self.env.step(clipped_actions) self.model.num_timesteps += self.n_envs if self.callback is not None: # Abort training early self.callback.update_locals(locals()) if self.callback.on_step() is False: self.continue_training = False # Return dummy values return [None] * 7 # states information for statefull models like LSTM self.states = states self.dones = dones self.obs = obs mb_rewards.append(rewards) enc_obs.append(obs) mb_obs.append(np.copy(self.obs)) mb_dones.append(self.dones) enc_obs = np.asarray(enc_obs, dtype=self.obs_dtype).swapaxes(1, 0) mb_obs = np.asarray(mb_obs, dtype=self.obs_dtype).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int64).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards # shapes are now [nenv, nsteps, []] # When pulling from buffer, arrays will now be reshaped in place, preventing a deep copy. return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus, mb_dones, mb_masks
[ "tensorflow.control_dependencies", "numpy.asarray", "tensorflow.reduce_sum", "tensorflow.RunMetadata", "tensorflow.minimum", "tensorflow.train.ExponentialMovingAverage", "numpy.mean", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.Graph", "numpy.clip", "numpy.reshape", "tensorflow.summary.image", "tensorflow.gradients", "tensorflow.stop_gradient", "tensorflow.autograph.set_verbosity", "numpy.random.poisson", "numpy.copy", "tensorflow.square", "numpy.zeros", "tensorflow.train.RMSPropOptimizer", "tensorflow.pow", "tensorflow.RunOptions", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.global_norm", "tensorflow.summary.histogram", "tensorflow.nn.relu", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.get_logger", "tensorflow.clip_by_global_norm", "tensorflow.log", "tensorflow.variable_scope", "tensorflow.abs" ]
neorl/rl/baselines/acer/acer_simple.py
[(24, 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), False, 'import warnings\n'), (25, 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'Warning'}), False, 'import warnings\n'), (28, 'tensorflow.autograph.set_verbosity', 'tf.autograph.set_verbosity', (['(0)'], {}), True, 'import tensorflow as tf\n'), (59, 'neorl.rl.baselines.shared.tf_util.batch_to_seq', 'batch_to_seq', (['var', 'n_envs', '(n_steps + 1)', 'flat'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (60, 'neorl.rl.baselines.shared.tf_util.seq_to_batch', 'seq_to_batch', (['out_vars[:-1]', 'flat'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (78, 'neorl.rl.baselines.shared.tf_util.batch_to_seq', 'batch_to_seq', (['rewards', 'n_envs', 'n_steps', '(True)'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (79, 'neorl.rl.baselines.shared.tf_util.batch_to_seq', 'batch_to_seq', (['dones', 'n_envs', 'n_steps', '(True)'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (80, 'neorl.rl.baselines.shared.tf_util.batch_to_seq', 'batch_to_seq', (['q_i', 'n_envs', 'n_steps', '(True)'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (81, 'neorl.rl.baselines.shared.tf_util.batch_to_seq', 'batch_to_seq', (['values', 'n_envs', '(n_steps + 1)', '(True)'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (91, 'neorl.rl.baselines.shared.tf_util.seq_to_batch', 'seq_to_batch', (['qrets'], {'flat': '(True)'}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (27, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.reshape', 'tf.reshape', (['input_tensor', '[-1]'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.minimum', 'tf.minimum', (['(1.0)', 'rho_i'], {}), True, 'import tensorflow as tf\n'), (86, 'neorl.rl.baselines.shared.tf_util.check_shape', 'check_shape', (['[qret, done_seq[i], reward_seq[i], rho_bar[i], q_is[i], value_sequence[i]]', '([[n_envs]] * 6)'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (106, 'collections.deque', 'deque', ([], {'maxlen': '(40)'}), False, 'from collections import deque\n'), (107, 'collections.deque', 'deque', ([], {'maxlen': '(40)'}), False, 'from collections import deque\n'), (118, 'numpy.reshape', 'np.reshape', (['rewards', '[self.n_envs, self.n_steps]'], {}), True, 'import numpy as np\n'), (119, 'numpy.reshape', 'np.reshape', (['masks', '[self.n_envs, self.n_steps]'], {}), True, 'import numpy as np\n'), (43, 'tensorflow.range', 'tf.range', (['(0)', 'input_tensor.shape[0]'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (137, 'numpy.mean', 'np.mean', (['self.len_buffer'], {}), True, 'import numpy as np\n'), (148, 'numpy.mean', 'np.mean', (['self.rewbuffer'], {}), True, 'import numpy as np\n'), (255, 'neorl.rl.baselines.shared.SetVerbosity', 'SetVerbosity', (['self.verbose'], {}), False, 'from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\n'), (272, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (553, 'neorl.rl.baselines.shared.SetVerbosity', 'SetVerbosity', (['self.verbose'], {}), False, 'from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\n'), (553, 'neorl.rl.baselines.shared.TensorboardWriter', 'TensorboardWriter', (['self.graph', 'self.tensorboard_log', 'tb_log_name', 'new_tb_log'], {}), False, 'from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\n'), (557, 'neorl.rl.baselines.shared.schedules.Scheduler', 'Scheduler', ([], {'initial_value': 'self.learning_rate', 'n_values': 'total_timesteps', 'schedule': 'self.lr_schedule'}), False, 'from neorl.rl.baselines.shared.schedules import Scheduler\n'), (567, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (694, 'numpy.zeros', 'np.zeros', (['(n_env, obs_height, obs_width, obs_num_channels)'], {'dtype': 'self.obs_dtype'}), True, 'import numpy as np\n'), (750, 'numpy.copy', 'np.copy', (['self.obs'], {}), True, 'import numpy as np\n'), (274, 'neorl.rl.baselines.shared.tf_util.make_session', 'tf_util.make_session', ([], {'num_cpu': 'self.n_cpu_tf_sess', 'graph': 'self.graph'}), False, 'from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\n'), (284, 'neorl.rl.baselines.shared.tf_util.get_trainable_vars', 'tf_util.get_trainable_vars', (['"""model"""'], {}), False, 'from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\n'), (476, 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'self.learning_rate_ph', 'decay': 'self.rprop_alpha', 'epsilon': 'self.rprop_epsilon'}), True, 'import tensorflow as tf\n'), (503, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (533, 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), True, 'import tensorflow as tf\n'), (534, 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), True, 'import tensorflow as tf\n'), (563, 'neorl.rl.baselines.acer.buffer.Buffer', 'Buffer', ([], {'env': 'self.env', 'n_steps': 'self.n_steps', 'size': 'self.buffer_size'}), False, 'from neorl.rl.baselines.acer.buffer import Buffer\n'), (724, 'numpy.copy', 'np.copy', (['self.obs'], {}), True, 'import numpy as np\n'), (731, 'numpy.clip', 'np.clip', (['actions', 'self.env.action_space.low', 'self.env.action_space.high'], {}), True, 'import numpy as np\n'), (753, 'numpy.asarray', 'np.asarray', (['enc_obs'], {'dtype': 'self.obs_dtype'}), True, 'import numpy as np\n'), (754, 'numpy.asarray', 'np.asarray', (['mb_obs'], {'dtype': 'self.obs_dtype'}), True, 'import numpy as np\n'), (755, 'numpy.asarray', 'np.asarray', (['mb_actions'], {'dtype': 'np.int64'}), True, 'import numpy as np\n'), (756, 'numpy.asarray', 'np.asarray', (['mb_rewards'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (757, 'numpy.asarray', 'np.asarray', (['mb_mus'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (758, 'numpy.asarray', 'np.asarray', (['mb_dones'], {'dtype': 'np.bool'}), True, 'import numpy as np\n'), (291, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""moving_average"""'], {}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['self.alpha'], {}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""polyak_model"""'], {'reuse': '(True)', 'custom_getter': 'custom_getter'}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {'reuse': '(False)'}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.n_batch]'], {}), True, 'import tensorflow as tf\n'), (309, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.n_batch]'], {}), True, 'import tensorflow as tf\n'), (310, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.n_batch, self.n_act]'], {}), True, 'import tensorflow as tf\n'), (312, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {}), True, 'import tensorflow as tf\n'), (382, 'tensorflow.log', 'tf.log', (['(f_i + eps)'], {}), True, 'import tensorflow as tf\n'), (409, 'neorl.rl.baselines.shared.tf_util.check_shape', 'check_shape', (['[qret, q_i]', '([[self.n_envs * self.n_steps]] * 2)'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (415, 'neorl.rl.baselines.shared.tf_util.check_shape', 'check_shape', (['[loss_policy, loss_q, entropy]', '([[]] * 3)'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (418, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""entropy_loss"""', 'entropy'], {}), True, 'import tensorflow as tf\n'), (419, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""policy_gradient_loss"""', 'loss_policy'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""value_function_loss"""', 'loss_q'], {}), True, 'import tensorflow as tf\n'), (421, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), True, 'import tensorflow as tf\n'), (460, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input_info"""'], {'reuse': '(False)'}), True, 'import tensorflow as tf\n'), (481, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[_opt_op]'], {}), True, 'import tensorflow as tf\n'), (482, 'tensorflow.group', 'tf.group', (['ema_apply_op'], {}), True, 'import tensorflow as tf\n'), (606, 'neorl.rl.baselines.shared.logger.record_tabular', 'logger.record_tabular', (['"""total_timesteps"""', 'self.num_timesteps'], {}), False, 'from neorl.rl.baselines.shared import logger\n'), (615, 'neorl.rl.baselines.shared.logger.dump_tabular', 'logger.dump_tabular', ([], {}), False, 'from neorl.rl.baselines.shared import logger\n'), (620, 'numpy.random.poisson', 'np.random.poisson', (['self.replay_ratio'], {}), True, 'import numpy as np\n'), (321, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(train_model.policy_proba * train_model.q_value)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['gain_f'], {}), True, 'import tensorflow as tf\n'), (388, 'tensorflow.reshape', 'tf.reshape', (['value', '[self.n_envs * self.n_steps, 1]'], {}), True, 'import tensorflow as tf\n'), (396, 'tensorflow.log', 'tf.log', (['(f_i_ + eps)'], {}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['gain_bc'], {}), True, 'import tensorflow as tf\n'), (410, 'tensorflow.reshape', 'tf.reshape', (['q_i', '[self.n_envs, self.n_steps]'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.reshape', 'tf.reshape', (['qret', '[self.n_envs, self.n_steps]'], {}), True, 'import tensorflow as tf\n'), (427, 'tensorflow.gradients', 'tf.gradients', (['(-(loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs)', 'phi_i'], {}), True, 'import tensorflow as tf\n'), (431, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(kl_grad * grad)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (436, 'neorl.rl.baselines.shared.tf_util.avg_norm', 'avg_norm', (['kl_grad'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (437, 'neorl.rl.baselines.shared.tf_util.avg_norm', 'avg_norm', (['grad'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (444, 'tensorflow.gradients', 'tf.gradients', (['f_i_', 'self.params', 'grads_f'], {}), True, 'import tensorflow as tf\n'), (445, 'tensorflow.gradients', 'tf.gradients', (['(loss_q * self.q_coef)', 'self.params'], {}), True, 'import tensorflow as tf\n'), (450, 'tensorflow.global_norm', 'tf.global_norm', (['grads_q'], {}), True, 'import tensorflow as tf\n'), (451, 'tensorflow.global_norm', 'tf.global_norm', (['grads_policy'], {}), True, 'import tensorflow as tf\n'), (453, 'tensorflow.gradients', 'tf.gradients', (['loss', 'self.params'], {}), True, 'import tensorflow as tf\n'), (457, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'self.max_grad_norm'], {}), True, 'import tensorflow as tf\n'), (461, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.reward_ph'], {}), True, 'import tensorflow as tf\n'), (462, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.learning_rate'], {}), True, 'import tensorflow as tf\n'), (463, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['adv'], {}), True, 'import tensorflow as tf\n'), (464, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.mu_ph'], {}), True, 'import tensorflow as tf\n'), (467, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""rewards"""', 'self.reward_ph'], {}), True, 'import tensorflow as tf\n'), (468, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""learning_rate"""', 'self.learning_rate'], {}), True, 'import tensorflow as tf\n'), (469, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""advantage"""', 'adv'], {}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""action_probability"""', 'self.mu_ph'], {}), True, 'import tensorflow as tf\n'), (471, 'neorl.rl.baselines.shared.tf_util.is_image', 'tf_util.is_image', (['self.observation_space'], {}), False, 'from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\n'), (501, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (287, 'neorl.rl.baselines.shared.tf_util.outer_scope_getter', 'tf_util.outer_scope_getter', (['"""train_model"""'], {}), False, 'from neorl.rl.baselines.shared import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\n'), (341, 'tensorflow.reshape', 'tf.reshape', (['f_i', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.reshape', 'tf.reshape', (['f_i_', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (344, 'tensorflow.pow', 'tf.pow', (['rho_i', '(1 / self.n_act)'], {}), True, 'import tensorflow as tf\n'), (438, 'tensorflow.abs', 'tf.abs', (['k_dot_g'], {}), True, 'import tensorflow as tf\n'), (439, 'tensorflow.abs', 'tf.abs', (['adj'], {}), True, 'import tensorflow as tf\n'), (446, 'neorl.rl.baselines.shared.tf_util.gradient_add', 'gradient_add', (['g1', 'g2', 'param'], {'verbose': 'self.verbose'}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (449, 'neorl.rl.baselines.shared.tf_util.avg_norm', 'avg_norm', (['grads_f'], {}), False, 'from neorl.rl.baselines.shared.tf_util import batch_to_seq, seq_to_batch, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger\n'), (472, 'tensorflow.summary.image', 'tf.summary.image', (['"""observation"""', 'train_model.obs_ph'], {}), True, 'import tensorflow as tf\n'), (474, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""observation"""', 'train_model.obs_ph'], {}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.minimum', 'tf.minimum', (['self.correction_term', 'rho_i'], {}), True, 'import tensorflow as tf\n'), (441, 'tensorflow.reshape', 'tf.reshape', (['adj', '[self.n_envs * self.n_steps, 1]'], {}), True, 'import tensorflow as tf\n'), (393, 'tensorflow.nn.relu', 'tf.nn.relu', (['(1.0 - self.correction_term / (rho_i_ + eps))'], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['qret'], {}), True, 'import tensorflow as tf\n'), (432, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(kl_grad * grad)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (607, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (433, 'tensorflow.square', 'tf.square', (['kl_grad'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.nn.relu', 'tf.nn.relu', (['(1.0 - self.correction_term / (rho + eps))'], {}), True, 'import tensorflow as tf\n')]
bluetiger9/Vitis-AI
f61061eef7550d98bf02a171604c9a9f283a7c47
# Copyright 2019 Xilinx Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Vitis activation layers.""" import tensorflow as tf from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.keras.utils.generic_utils import register_keras_serializable from tensorflow_model_optimization.python.core.quantization.keras.vitis.utils import common_utils __all__ = ['VitisAveragePooling2D', 'VitisGlobalAveragePooling2D'] serialize_keras_object = tf.keras.utils.serialize_keras_object deserialize_keras_object = tf.keras.utils.deserialize_keras_object logger = common_utils.VAILogger @tf.function def _get_avgpool_scale(kw, kh): if kh > 255 or kw > 255: return 1.0 elif kh == 3 and kw == 3: return 9.0 * 7.0 / 64.0 elif kh == 5 and kw == 5: return 25.0 * 10.0 / 256.0 elif kh == 6 and kw == 6: return 36.0 * 7.0 / 256.0 elif kh == 7 and kw == 7: return 49.0 * 21.0 / 1024.0 elif kh == 14 and kw == 14: return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec return scale @register_keras_serializable( package='Vitis', name='VitisGlobalAveragePooling2D') class VitisGlobalAveragePooling2D(tf.keras.layers.GlobalAveragePooling2D): """Vitis version of GlobalAveragePooling2D layer. This is an Vitis version of average pooling to simulate DPU behaviour which to integer approximations for averaging of specific sizes. """ def __init__(self, **kwargs): """Create a Vitis.GlobalAveragePooling2D Layer. Args: quantizer: `Quantizer` used to quantize tensors. **kwargs: Additional keyword arguments to be passed to the keras layer. """ super(VitisGlobalAveragePooling2D, self).__init__(**kwargs) def build(self, input_shape): super(VitisGlobalAveragePooling2D, self).build(input_shape) def call(self, inputs): outputs = super(VitisGlobalAveragePooling2D, self).call(inputs) # Simulate DPU hahavior of AvgPooling input_shape = array_ops.shape(inputs) rescale_factor = _get_avgpool_scale(input_shape[1], input_shape[2]) if rescale_factor != 1.0: outputs *= rescale_factor return outputs @register_keras_serializable(package='Vitis', name='AveragePooling2D') class VitisAveragePooling2D(tf.keras.layers.AveragePooling2D): """Vitis version of AveragePooling2D layer. This is an Vitis version of average pooling to simulate DPU behaviour which uses integer approximations for averaging of specific sizes. """ def __init__(self, **kwargs): """Create a Vitis.AveragePooling2D Layer. Args: quantizer: `Quantizer` used to quantize tensors. **kwargs: Additional keyword arguments to be passed to the keras layer. """ super(VitisAveragePooling2D, self).__init__(**kwargs) def build(self, input_shape): super(VitisAveragePooling2D, self).build(input_shape) # Compute rescale factor in build() since the pool_size is determined. self.rescale_factor = _get_avgpool_scale(self.pool_size[0], self.pool_size[1]) def call(self, inputs): outputs = super(VitisAveragePooling2D, self).call(inputs) # Simulate DPU hahavior of AvgPooling input_shape = array_ops.shape(inputs) if self.rescale_factor != 1.0: outputs *= self.rescale_factor return outputs def _types_dict(): return { 'VitisAveragePooling2D': VitisAveragePooling2D, 'VitisGlobalAveragePooling2D': VitisGlobalAveragePooling2D, }
[ "tensorflow.math.abs", "tensorflow.python.ops.array_ops.shape", "tensorflow.range", "tensorflow.pow", "tensorflow.cast", "tensorflow.python.keras.utils.generic_utils.register_keras_serializable", "tensorflow.math.log", "tensorflow.round", "tensorflow.argmin" ]
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/layers/vitis_pooling.py
[(59, 'tensorflow.python.keras.utils.generic_utils.register_keras_serializable', 'register_keras_serializable', ([], {'package': '"""Vitis"""', 'name': '"""VitisGlobalAveragePooling2D"""'}), False, 'from tensorflow.python.keras.utils.generic_utils import register_keras_serializable\n'), (92, 'tensorflow.python.keras.utils.generic_utils.register_keras_serializable', 'register_keras_serializable', ([], {'package': '"""Vitis"""', 'name': '"""AveragePooling2D"""'}), False, 'from tensorflow.python.keras.utils.generic_utils import register_keras_serializable\n'), (84, 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['inputs'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (119, 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['inputs'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (46, 'tensorflow.cast', 'tf.cast', (['(kw * kh)', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.range', 'tf.range', (['(0.0)', 'n_max'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.pow', 'tf.pow', (['(2.0)', 'ns'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.round', 'tf.round', (['(ns_pow / rec)'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.math.abs', 'tf.math.abs', (['(ks / ns_pow - 1 / rec)'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.argmin', 'tf.argmin', (['diffs'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.cast', 'tf.cast', (['n', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.math.log', 'tf.math.log', (['rec'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.math.log', 'tf.math.log', (['(2.0)'], {}), True, 'import tensorflow as tf\n')]
urialon/lingvo
0819730882bfaa68d2eeb702e13d4c943172d5ff
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Send/Recv ops. The following _Send()/_Recv() are adapted from python op wrappers generated by python_op_gen_main. python_op_gen_main.cc's PrintAllPythonOps needs to be updated to export internal ops. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from google.protobuf import text_format as _text_format from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 from tensorflow.python.framework import op_def_library as _op_def_library from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import tensor_shape as _tensor_shape def _Recv(tensor_type, tensor_name, send_device, recv_device, name=None): r"""Receives the named tensor from send_device on recv_device. Args: tensor_type: A `tf.DType`. tensor_name: A `string`. The name of the tensor to receive. send_device: A `string`. The name of the device sending the tensor. recv_device: A `string`. The name of the device receiving the tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `tensor_type`. The tensor to receive. """ result = _op_def_lib.apply_op( "_Recv", tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=0, recv_device=recv_device, client_terminated=False, name=name if name else "Recv") return result _ops.RegisterShape("_Recv")(None) def _Send(tensor, tensor_name, send_device, recv_device, name=None): r"""Sends the named tensor from send_device to recv_device. Args: tensor: A `Tensor`. The tensor to send. tensor_name: A `string`. The name of the tensor to send. send_device: A `string`. The name of the device sending the tensor. recv_device: A `string`. The name of the device receiving the tensor. name: A name for the operation (optional). Returns: The created Operation. """ result = _op_def_lib.apply_op( "_Send", tensor=tensor, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=0, recv_device=recv_device, client_terminated=False, name=name if name else "Send") return result _ops.RegisterShape("_Send")(None) def _XlaSend(tensor, tensor_name, name=None): r"""Sends the named tensor from send_device to recv_device. Args: tensor: A `Tensor`. The tensor to send. tensor_name: A `string`. The name of the tensor to send. name: A name for the operation (optional). Returns: The created Operation. """ result = _op_def_lib.apply_op( "XlaSend", tensor=tensor, tensor_name=tensor_name, name=name if name else "XlaSend") return result def _XlaRecv(dtype, tensor_name, shape, name=None): r"""Sends the named tensor from send_device to recv_device. Args: dtype: A `tf.DType`. tensor_name: A `string`. The name of the tensor to receive. shape: A `tf.TensorShape` or list of `ints`. The shape of the input tensor. name: A name for the operation (optional). Returns: The created Operation. """ result = _op_def_lib.apply_op( "XlaRecv", dtype=dtype, shape=shape, tensor_name=tensor_name, name=name if name else "XlaRecv") return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "_Recv" output_arg { name: "tensor" type_attr: "tensor_type" } attr { name: "tensor_type" type: "type" } attr { name: "tensor_name" type: "string" } attr { name: "send_device" type: "string" } attr { name: "send_device_incarnation" type: "int" } attr { name: "recv_device" type: "string" } attr { name: "client_terminated" type: "bool" default_value { b: false } } is_stateful: true } op { name: "_Send" input_arg { name: "tensor" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" } attr { name: "send_device" type: "string" } attr { name: "send_device_incarnation" type: "int" } attr { name: "recv_device" type: "string" } attr { name: "client_terminated" type: "bool" default_value { b: false } } is_stateful: true } op { name: "XlaRecv" output_arg { name: "tensor" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "tensor_name" type: "string" } attr { name: "shape" type: "shape" } is_stateful: true } op { name: "XlaSend" input_arg { name: "tensor" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" } is_stateful: true } """ _op_def_lib = _InitOpDefLibrary() def _TpuCore(device): """Returns the TPU core represented by <device>, or -1 if not TPU.""" prefix = "device:TPU_REPLICATED_CORE:" if prefix in device: return int(device[len(prefix):]) return -1 class Channel(object): """A communication channel to transfer tensors in order.""" def __init__(self, dtype, shape, send_device, recv_device, name=None): """Construct a channel. Args: dtype: The dtype of tensors sent through the channel. shape: The shape of tensors sent through the channel. Must be a fully defined shape for TPUs. send_device: A fully-specified tensorflow device. recv_device: A fully-specified tensorflow device. name: A name for the channel (optional). """ current_graph = _ops.get_default_graph() assert current_graph, "A channel is scoped within a tf.Graph" self._dtype = dtype self._send_device = send_device self._recv_device = recv_device self._name = current_graph.unique_name(name if name else "channel") assert shape is not None shape = _tensor_shape.TensorShape(shape) self._shape = shape self._send_tpu_core = _TpuCore(send_device) self._recv_tpu_core = _TpuCore(recv_device) self._send_called = False self._recv_op = None assert ((self._send_tpu_core == -1) == (self._recv_tpu_core == -1)), ( "Mixing TPU and non-TPU: %s and %s" % (send_device, recv_device)) if self._send_tpu_core >= 0: assert self._shape.is_fully_defined(), ( "TPU channel must have fully defined shape. Name: %s, shape: %s" % (self._name, self._shape)) assert self._send_tpu_core != self._recv_tpu_core, ( "TPU send/recv must be cross-core: %s and %s" % (send_device, recv_device)) def Send(self, tensor): """Sends a tensor through the channel.""" assert tensor.dtype == self._dtype assert not self._send_called, ( "Send called multiple times for %s" % self._name) self._send_called = True if self._send_tpu_core == -1: return _Send(tensor, self._name, self._send_device, self._recv_device) else: with _ops.device(self._send_device): return _XlaSend( tensor, tensor_name=self._name, name="Send_" + self._name) def Recv(self): """Receives a tensor from the channel.""" if self._send_tpu_core == -1: return _Recv(self._dtype, self._name, self._send_device, self._recv_device) else: with _ops.device(self._recv_device): return _XlaRecv( self._dtype, tensor_name=self._name, shape=self._shape, name="Recv_" + self._name)
[ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.op_def_registry.register_op_list" ]
lingvo/core/sendrecv.py
[(59, 'tensorflow.python.framework.ops.RegisterShape', '_ops.RegisterShape', (['"""_Recv"""'], {}), True, 'from tensorflow.python.framework import ops as _ops\n'), (87, 'tensorflow.python.framework.ops.RegisterShape', '_ops.RegisterShape', (['"""_Send"""'], {}), True, 'from tensorflow.python.framework import ops as _ops\n'), (131, 'tensorflow.core.framework.op_def_pb2.OpList', '_op_def_pb2.OpList', ([], {}), True, 'from tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n'), (132, 'google.protobuf.text_format.Merge', '_text_format.Merge', (['_InitOpDefLibrary.op_list_ascii', 'op_list'], {}), True, 'from google.protobuf import text_format as _text_format\n'), (133, 'tensorflow.python.framework.op_def_registry.register_op_list', '_op_def_registry.register_op_list', (['op_list'], {}), True, 'from tensorflow.python.framework import op_def_registry as _op_def_registry\n'), (134, 'tensorflow.python.framework.op_def_library.OpDefLibrary', '_op_def_library.OpDefLibrary', ([], {}), True, 'from tensorflow.python.framework import op_def_library as _op_def_library\n'), (272, 'tensorflow.python.framework.ops.get_default_graph', '_ops.get_default_graph', ([], {}), True, 'from tensorflow.python.framework import ops as _ops\n'), (280, 'tensorflow.python.framework.tensor_shape.TensorShape', '_tensor_shape.TensorShape', (['shape'], {}), True, 'from tensorflow.python.framework import tensor_shape as _tensor_shape\n'), (306, 'tensorflow.python.framework.ops.device', '_ops.device', (['self._send_device'], {}), True, 'from tensorflow.python.framework import ops as _ops\n'), (316, 'tensorflow.python.framework.ops.device', '_ops.device', (['self._recv_device'], {}), True, 'from tensorflow.python.framework import ops as _ops\n')]
pune-lug/DeepVideoAnalytics
2650037040dca49b0f537df576af123dae8cef97
# Copyright 2015 Paul Balanca. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides data for the Pascal VOC Dataset (images + annotations). """ import os import tensorflow as tf from datasets import dataset_utils slim = tf.contrib.slim VOC_LABELS = { 'none': (0, 'Background'), 'aeroplane': (1, 'Vehicle'), 'bicycle': (2, 'Vehicle'), 'bird': (3, 'Animal'), 'boat': (4, 'Vehicle'), 'bottle': (5, 'Indoor'), 'bus': (6, 'Vehicle'), 'car': (7, 'Vehicle'), 'cat': (8, 'Animal'), 'chair': (9, 'Indoor'), 'cow': (10, 'Animal'), 'diningtable': (11, 'Indoor'), 'dog': (12, 'Animal'), 'horse': (13, 'Animal'), 'motorbike': (14, 'Vehicle'), 'person': (15, 'Person'), 'pottedplant': (16, 'Indoor'), 'sheep': (17, 'Animal'), 'sofa': (18, 'Indoor'), 'train': (19, 'Vehicle'), 'tvmonitor': (20, 'Indoor'), } def get_split(split_name, dataset_dir, file_pattern, reader, split_to_sizes, items_to_descriptions, num_classes): """Gets a dataset tuple with instructions for reading Pascal VOC dataset. Args: split_name: A train/test split name. dataset_dir: The base directory of the dataset sources. file_pattern: The file pattern to use when matching the dataset sources. It is assumed that the pattern contains a '%s' string so that the split name can be inserted. reader: The TensorFlow reader type. Returns: A `Dataset` namedtuple. Raises: ValueError: if `split_name` is not a valid train/test split. """ if split_name not in split_to_sizes: raise ValueError('split name %s was not recognized.' % split_name) file_pattern = os.path.join(dataset_dir, file_pattern % split_name) # Allowing None in the signature so that dataset_factory can use the default. if reader is None: reader = tf.TFRecordReader # Features in Pascal VOC TFRecords. keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/height': tf.FixedLenFeature([1], tf.int64), 'image/width': tf.FixedLenFeature([1], tf.int64), 'image/channels': tf.FixedLenFeature([1], tf.int64), 'image/shape': tf.FixedLenFeature([3], tf.int64), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64), 'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64), 'image/object/bbox/truncated': tf.VarLenFeature(dtype=tf.int64), } items_to_handlers = { 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'shape': slim.tfexample_decoder.Tensor('image/shape'), 'object/bbox': slim.tfexample_decoder.BoundingBox( ['xmin', 'ymin', 'xmax', 'ymax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'), 'object/difficult': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'), 'object/truncated': slim.tfexample_decoder.Tensor('image/object/bbox/truncated'), } decoder = slim.tfexample_decoder.TFExampleDecoder( keys_to_features, items_to_handlers) labels_to_names = None if dataset_utils.has_labels(dataset_dir): labels_to_names = dataset_utils.read_label_file(dataset_dir) # else: # labels_to_names = create_readable_names_for_imagenet_labels() # dataset_utils.write_label_file(labels_to_names, dataset_dir) return slim.dataset.Dataset( data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=split_to_sizes[split_name], items_to_descriptions=items_to_descriptions, num_classes=num_classes, labels_to_names=labels_to_names)
[ "tensorflow.FixedLenFeature", "tensorflow.VarLenFeature" ]
dvalib/ssd/datasets/pascalvoc_common.py
[(69, 'os.path.join', 'os.path.join', (['dataset_dir', '(file_pattern % split_name)'], {}), False, 'import os\n'), (103, 'datasets.dataset_utils.has_labels', 'dataset_utils.has_labels', (['dataset_dir'], {}), False, 'from datasets import dataset_utils\n'), (76, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '"""jpeg"""'}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[3]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (104, 'datasets.dataset_utils.read_label_file', 'dataset_utils.read_label_file', (['dataset_dir'], {}), False, 'from datasets import dataset_utils\n')]
SeonghoBaek/RealtimeCamera
1b371b58eafdddf94330f008495dc9ad593ea8e1
import tensorflow as tf from tensorflow.python.client import device_lib import numpy as np import util import argparse import os import csv os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' input_feature_dim = 97 cond_step_dim = 8 cond_wafer_dim = 24 cond_dim = cond_step_dim + cond_wafer_dim lstm_sequence_length = 20 lstm_hidden_size_layer1 = 64 lstm_hidden_size_layer2 = 64 lstm_feature_dim = lstm_hidden_size_layer1 lstm_z_sequence_dim = 16 lstm_linear_transform_input_dim = 2 * lstm_feature_dim g_encoder_z_local_dim = 16 g_encoder_z_dim = lstm_z_sequence_dim + g_encoder_z_local_dim + cond_dim g_encoder_input_dim = input_feature_dim g_encoder_layer1_dim = 84 g_encoder_layer2_dim = 64 g_encoder_layer3_dim = 32 g_decoder_output_dim = input_feature_dim g_decoder_layer2_dim = 72 g_decoder_layer1_dim = 84 d_layer_1_dim = input_feature_dim d_layer_2_dim = 64 d_layer_3_dim = 32 d_layer_4_dim = 16 num_block_layers = 3 dense_layer_depth = 16 def lstm_network(input, scope='lstm_network'): with tf.variable_scope(scope): # tf.nn.rnn_cell lstm_cell1 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) lstm_cell2 = tf.contrib.rnn.BasicLSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) lstm_cells = tf.contrib.rnn.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # tf.nn.rnn_cell # lstm_cell1 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer1, forget_bias=1.0) # lstm_cell2 = tf.nn.rnn_cell.LSTMCell(lstm_hidden_size_layer2, forget_bias=1.0) #lstm_cells = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell1, lstm_cell2], state_is_tuple=True) # initial_state = lstm_cells.zero_state(batch_size, tf.float32) _, states = tf.nn.dynamic_rnn(lstm_cells, input, dtype=tf.float32, initial_state=None) # z_sequence_output = states[1].h # print(z_sequence_output.get_shape()) states_concat = tf.concat([states[0].h, states[1].h], 1) #def fc(input, scope, out_dim, non_linear_fn=None, initial_value=None, use_bias=True): z_sequence_output = fc(states_concat, lstm_z_sequence_dim, scope='linear_transform') return z_sequence_output def fc(input_data, out_dim, non_linear_fn=None, initial_value=None, use_bias=True, scope='fc'): with tf.variable_scope(scope): input_dims = input_data.get_shape().as_list() if len(input_dims) == 4: _, input_h, input_w, num_channels = input_dims in_dim = input_h * input_w * num_channels flat_input = tf.reshape(input_data, [-1, in_dim]) else: in_dim = input_dims[-1] flat_input = input_data if initial_value is None: fc_weight = tf.get_variable("weights", shape=[in_dim, out_dim], initializer=tf.random_normal_initializer(mean=0., stddev=0.01)) fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=tf.constant_initializer(0.0)) else: fc_weight = tf.get_variable("weights", initializer=initial_value[0]) fc_bias = tf.get_variable("bias", shape=[out_dim], initializer=initial_value[1]) if use_bias: output = tf.add(tf.matmul(flat_input, fc_weight), fc_bias) else: output = tf.matmul(flat_input, fc_weight) if non_linear_fn is None: return output else: activation = non_linear_fn(output) return activation def batch_norm(x, b_train, scope, reuse=False): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(b_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed def conv(input, scope, filter_dims, stride_dims, padding='SAME', non_linear_fn=tf.nn.relu, dilation=[1, 1, 1, 1], bias=True): input_dims = input.get_shape().as_list() assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in assert (len(filter_dims) == 3) # height, width and num_channels out assert (len(stride_dims) == 2) # stride height and width num_channels_in = input_dims[-1] filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims with tf.variable_scope(scope): conv_weight = tf.Variable( tf.truncated_normal([filter_h, filter_w, num_channels_in, num_channels_out], stddev=0.1, dtype=tf.float32)) conv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32)) map = tf.nn.conv2d(input, conv_weight, strides=[1, stride_h, stride_w, 1], padding=padding, dilations=dilation) if bias is True: map = tf.nn.bias_add(map, conv_bias) if non_linear_fn is not None: activation = non_linear_fn(map) else: activation = map # print(activation.get_shape().as_list()) return activation def batch_norm_conv(x, b_train, scope): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): n_out = x.get_shape().as_list()[-1] beta = tf.get_variable('beta', initializer=tf.constant(0.0, shape=[n_out])) gamma = tf.get_variable('gamma', initializer=tf.constant(1.0, shape=[n_out])) batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(b_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed def add_dense_layer(layer, filter_dims, act_func=tf.nn.relu, scope='dense_layer', use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]): with tf.variable_scope(scope): l = layer if use_bn: l = batch_norm_conv(l, b_train=bn_phaze, scope='bn') l = act_func(l) l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=[1, 1], dilation=dilation, non_linear_fn=None, bias=use_bias) l = tf.concat([l, layer], 3) return l def add_residual_layer(layer, filter_dims, act_func=tf.nn.relu, scope='residual_layer', use_bn=True, bn_phaze=False, use_bias=False, dilation=[1, 1, 1, 1]): with tf.variable_scope(scope): l = layer if use_bn: l = batch_norm_conv(l, b_train=bn_phaze, scope='bn') l = act_func(l) l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=[1, 1], dilation=dilation, non_linear_fn=act_func, bias=use_bias) return l def add_dense_transition_layer(layer, filter_dims, stride_dims=[1, 1], act_func=tf.nn.relu, scope='transition', use_bn=True, bn_phaze=False, use_pool=True, use_bias=False, dilation=[1, 1, 1, 1]): with tf.variable_scope(scope): if use_bn: l = batch_norm_conv(layer, b_train=bn_phaze, scope='bn') l = act_func(l) l = conv(l, scope='conv', filter_dims=filter_dims, stride_dims=stride_dims, non_linear_fn=None, bias=use_bias, dilation=dilation) if use_pool: l = tf.nn.max_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') return l def global_avg_pool(input_data, output_length=1, padding='VALID', scope='gloval_avg_pool'): input_dims = input_data.get_shape().as_list() assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in num_channels_in = input_dims[-1] height = input_dims[1] width = input_dims[2] with tf.variable_scope(scope): if output_length == 1: pool = tf.nn.avg_pool(input_data, [1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.reduce_mean(pool, axis=[1, 2]) pool = tf.squeeze(pool, axis=[1, 2]) return pool else: if num_channels_in != output_length: conv_weight = tf.Variable(tf.truncated_normal([1, 1, num_channels_in, output_length], stddev=0.1, dtype=tf.float32)) conv = tf.nn.conv2d(input_data, conv_weight, strides=[1, 1, 1, 1], padding='SAME') pool = tf.nn.avg_pool(conv, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) else: pool = tf.nn.avg_pool(input_data, ksize=[1, height, width, 1], strides=[1, 1, 1, 1], padding=padding) pool = tf.squeeze(pool, axis=[1, 2]) return pool def avg_pool(input, scope, filter_dims, stride_dims, padding='SAME'): assert (len(filter_dims) == 2) # filter height and width assert (len(stride_dims) == 2) # stride height and width filter_h, filter_w = filter_dims stride_h, stride_w = stride_dims with tf.variable_scope(scope): pool = tf.nn.avg_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding) return pool def get_deconv2d_output_dims(input_dims, filter_dims, stride_dims, padding): batch_size, input_h, input_w, num_channels_in = input_dims filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims if padding == 'SAME': out_h = input_h * stride_h elif padding == 'VALID': out_h = (input_h - 1) * stride_h + filter_h if padding == 'SAME': out_w = input_w * stride_w elif padding == 'VALID': out_w = (input_w - 1) * stride_w + filter_w return [batch_size, out_h, out_w, num_channels_out] def deconv(input_data, b_size, scope, filter_dims, stride_dims, padding='SAME', non_linear_fn=tf.nn.relu): input_dims = input_data.get_shape().as_list() # print(scope, 'in', input_dims) assert (len(input_dims) == 4) # batch_size, height, width, num_channels_in assert (len(filter_dims) == 3) # height, width and num_channels out assert (len(stride_dims) == 2) # stride height and width input_dims = [b_size, input_dims[1], input_dims[2], input_dims[3]] num_channels_in = input_dims[-1] filter_h, filter_w, num_channels_out = filter_dims stride_h, stride_w = stride_dims output_dims = get_deconv2d_output_dims(input_dims, filter_dims, stride_dims, padding) with tf.variable_scope(scope): deconv_weight = tf.Variable( tf.random_normal([filter_h, filter_w, num_channels_out, num_channels_in], stddev=0.1, dtype=tf.float32)) deconv_bias = tf.Variable(tf.zeros([num_channels_out], dtype=tf.float32)) map = tf.nn.conv2d_transpose(input_data, deconv_weight, output_dims, strides=[1, stride_h, stride_w, 1], padding=padding) map = tf.nn.bias_add(map, deconv_bias) activation = non_linear_fn(map) # print(scope, 'out', activation.get_shape().as_list()) return activation def self_attention(x, channels, act_func=tf.nn.relu, scope='attention'): with tf.variable_scope(scope): batch_size, height, width, num_channels = x.get_shape().as_list() f = conv(x, scope='f_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func) f = tf.layers.max_pooling2d(f, pool_size=2, strides=2, padding='SAME') print('attention f dims: ' + str(f.get_shape().as_list())) g = conv(x, scope='g_conv', filter_dims=[1, 1, channels//8], stride_dims=[1, 1], non_linear_fn=act_func) print('attention g dims: ' + str(g.get_shape().as_list())) h = conv(x, scope='h_conv', filter_dims=[1, 1, channels//2], stride_dims=[1, 1], non_linear_fn=act_func) h = tf.layers.max_pooling2d(h, pool_size=2, strides=2, padding='SAME') print('attention h dims: ' + str(h.get_shape().as_list())) # N = h * w g = tf.reshape(g, shape=[-1, g.shape[1]*g.shape[2], g.get_shape().as_list()[-1]]) print('attention g flat dims: ' + str(g.get_shape().as_list())) f = tf.reshape(f, shape=[-1, f.shape[1]*f.shape[2], f.shape[-1]]) print('attention f flat dims: ' + str(f.get_shape().as_list())) s = tf.matmul(g, f, transpose_b=True) # # [bs, N, N] beta = tf.nn.softmax(s) # attention map print('attention beta dims: ' + str(s.get_shape().as_list())) h = tf.reshape(h, shape=[-1, h.shape[1]*h.shape[2], h.shape[-1]]) print('attention h flat dims: ' + str(h.get_shape().as_list())) o = tf.matmul(beta, h) # [bs, N, C] print('attention o dims: ' + str(o.get_shape().as_list())) gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0)) o = tf.reshape(o, shape=[-1, height, width, num_channels // 2]) # [bs, h, w, C] o = conv(o, scope='attn_conv', filter_dims=[1, 1, channels], stride_dims=[1, 1], non_linear_fn=act_func) x = gamma * o + x return x
[ "tensorflow.nn.dynamic_rnn", "tensorflow.get_variable", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.nn.max_pool", "tensorflow.nn.conv2d_transpose", "tensorflow.train.ExponentialMovingAverage", "tensorflow.nn.conv2d", "tensorflow.nn.moments", "tensorflow.squeeze", "tensorflow.contrib.rnn.MultiRNNCell", "tensorflow.random_normal_initializer", "tensorflow.matmul", "tensorflow.nn.batch_normalization", "tensorflow.truncated_normal", "tensorflow.identity", "tensorflow.nn.avg_pool", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.reshape", "tensorflow.layers.max_pooling2d", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.random_normal" ]
layers.py
[(44, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['lstm_hidden_size_layer1'], {'forget_bias': '(1.0)'}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['lstm_hidden_size_layer2'], {'forget_bias': '(1.0)'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', ([], {'cells': '[lstm_cell1, lstm_cell2]', 'state_is_tuple': '(True)'}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['lstm_cells', 'input'], {'dtype': 'tf.float32', 'initial_state': 'None'}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.concat', 'tf.concat', (['[states[0].h, states[1].h]', '(1)'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0]'], {'name': '"""moments"""'}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.9)'}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['x', 'mean', 'var', 'beta', 'gamma', '(0.001)'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input', 'conv_weight'], {'strides': '[1, stride_h, stride_w, 1]', 'padding': 'padding', 'dilations': 'dilation'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0, 1, 2]'], {'name': '"""moments"""'}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', ([], {'decay': '(0.9)'}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['x', 'mean', 'var', 'beta', 'gamma', '(0.001)'], {}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.concat', 'tf.concat', (['[l, layer]', '(3)'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['input'], {'ksize': '[1, filter_h, filter_w, 1]', 'strides': '[1, stride_h, stride_w, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['input_data', 'deconv_weight', 'output_dims'], {'strides': '[1, stride_h, stride_w, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['map', 'deconv_bias'], {}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['f'], {'pool_size': '(2)', 'strides': '(2)', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['h'], {'pool_size': '(2)', 'strides': '(2)', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.reshape', 'tf.reshape', (['f'], {'shape': '[-1, f.shape[1] * f.shape[2], f.shape[-1]]'}), True, 'import tensorflow as tf\n'), (346, 'tensorflow.matmul', 'tf.matmul', (['g', 'f'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (348, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['s'], {}), True, 'import tensorflow as tf\n'), (352, 'tensorflow.reshape', 'tf.reshape', (['h'], {'shape': '[-1, h.shape[1] * h.shape[2], h.shape[-1]]'}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.matmul', 'tf.matmul', (['beta', 'h'], {}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.reshape', 'tf.reshape', (['o'], {'shape': '[-1, height, width, num_channels // 2]'}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.reshape', 'tf.reshape', (['input_data', '[-1, in_dim]'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.get_variable', 'tf.get_variable', (['"""weights"""'], {'initializer': 'initial_value[0]'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""'], {'shape': '[out_dim]', 'initializer': 'initial_value[1]'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.matmul', 'tf.matmul', (['flat_input', 'fc_weight'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[filter_h, filter_w, num_channels_in, num_channels_out]'], {'stddev': '(0.1)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.zeros', 'tf.zeros', (['[num_channels_out]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['map', 'conv_bias'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['l'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['input_data', '[1, height, width, 1]'], {'strides': '[1, 1, 1, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pool'], {'axis': '[1, 2]'}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.squeeze', 'tf.squeeze', (['pool'], {'axis': '[1, 2]'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.squeeze', 'tf.squeeze', (['pool'], {'axis': '[1, 2]'}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.random_normal', 'tf.random_normal', (['[filter_h, filter_w, num_channels_out, num_channels_in]'], {'stddev': '(0.1)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.zeros', 'tf.zeros', (['[num_channels_out]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.matmul', 'tf.matmul', (['flat_input', 'fc_weight'], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[n_out]'}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[n_out]'}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[ema_apply_op]'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[n_out]'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'shape': '[n_out]'}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[ema_apply_op]'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_data', 'conv_weight'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['conv'], {'ksize': '[1, height, width, 1]', 'strides': '[1, 1, 1, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['input_data'], {'ksize': '[1, height, width, 1]', 'strides': '[1, 1, 1, 1]', 'padding': 'padding'}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.01)'}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.identity', 'tf.identity', (['batch_mean'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.identity', 'tf.identity', (['batch_var'], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.identity', 'tf.identity', (['batch_mean'], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.identity', 'tf.identity', (['batch_var'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[1, 1, num_channels_in, output_length]'], {'stddev': '(0.1)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n')]
jayroxis/quadratic-residual-networks
eeb9b0a449b6ac8cd55f4bb2d11ce1d3071d975d
""" @author: Maziar Raissi """ import sys sys.path.insert(0, '../../Utilities/') import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import time import scipy.io from plotting import newfig, savefig from mpl_toolkits.mplot3d import Axes3D import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1 import make_axes_locatable import argparse np.random.seed(1234) tf.set_random_seed(1234) parser = argparse.ArgumentParser() parser.add_argument('--mod', default='lite', type=str, help='the version of QRes network, can be "full" or "lite".') parser.add_argument('--epochs', default=50000, type=int, help='number of training epochs.') args = parser.parse_args() class PhysicsInformedNN: # Initialize the class def __init__(self, x0, u0, x1, u1, layers, dt, lb, ub, q): self.lb = lb self.ub = ub self.x0 = x0 self.x1 = x1 self.u0 = u0 self.u1 = u1 self.layers = layers self.dt = dt self.q = max(q,1) # Initialize NN self.weights, self.biases = self.initialize_NN(layers) # Initialize parameters self.lambda_1 = tf.Variable([0.0], dtype=tf.float32) self.lambda_2 = tf.Variable([-6.0], dtype=tf.float32) # Load IRK weights tmp = np.float32(np.loadtxt('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % (q), ndmin = 2)) weights = np.reshape(tmp[0:q**2+q], (q+1,q)) self.IRK_alpha = weights[0:-1,:] self.IRK_beta = weights[-1:,:] self.IRK_times = tmp[q**2+q:] # tf placeholders and graph self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) self.x0_tf = tf.placeholder(tf.float32, shape=(None, self.x0.shape[1])) self.x1_tf = tf.placeholder(tf.float32, shape=(None, self.x1.shape[1])) self.u0_tf = tf.placeholder(tf.float32, shape=(None, self.u0.shape[1])) self.u1_tf = tf.placeholder(tf.float32, shape=(None, self.u1.shape[1])) self.dummy_x0_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.dummy_x1_tf = tf.placeholder(tf.float32, shape=(None, self.q)) # dummy variable for fwd_gradients self.U0_pred = self.net_U0(self.x0_tf) # N0 x q self.U1_pred = self.net_U1(self.x1_tf) # N1 x q self.loss = tf.reduce_sum(tf.square(self.u0_tf - self.U0_pred)) + \ tf.reduce_sum(tf.square(self.u1_tf - self.U1_pred)) self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, method = 'L-BFGS-B', options = {'maxiter': 50000, 'maxfun': 50000, 'maxcor': 50, 'maxls': 50, 'ftol' : 1.0 * np.finfo(float).eps}) self.optimizer_Adam = tf.compat.v1.train.AdamOptimizer() self.train_op_Adam = self.optimizer_Adam.minimize(self.loss) init = tf.global_variables_initializer() self.sess.run(init) self.loss_log = [] def initialize_NN(self, layers): weights = [] biases = [] num_layers = len(layers) for l in range(0,num_layers-1): W1 = self.xavier_init(size=[layers[l], layers[l+1]]) W2 = self.xavier_init(size=[layers[l], layers[l+1]]) b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) weights.append((W1, W2)) biases.append(b) return weights, biases def xavier_init(self, size): in_dim = size[0] out_dim = size[1] xavier_stddev = np.sqrt(2/(in_dim + out_dim)) return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32) def neural_net(self, X, weights, biases): num_layers = len(weights) + 1 H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0 for l in range(0,num_layers-2): W1, W2 = weights[l] b = biases[l] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) H = tf.tanh(tf.add(H1 * H2, H1)) W1, W2 = weights[-1] b = biases[-1] H1 = tf.add(tf.matmul(H, W1), b) H2 = tf.matmul(H, W2) Y = tf.add(H1 * H2, H1) return Y def fwd_gradients_0(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x0_tf)[0] return tf.gradients(g, self.dummy_x0_tf)[0] def fwd_gradients_1(self, U, x): g = tf.gradients(U, x, grad_ys=self.dummy_x1_tf)[0] return tf.gradients(g, self.dummy_x1_tf)[0] def net_U0(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_0(U, x) U_xx = self.fwd_gradients_0(U_x, x) U_xxx = self.fwd_gradients_0(U_xx, x) F = -lambda_1*U*U_x - lambda_2*U_xxx U0 = U - self.dt*tf.matmul(F, self.IRK_alpha.T) return U0 def net_U1(self, x): lambda_1 = self.lambda_1 lambda_2 = tf.exp(self.lambda_2) U = self.neural_net(x, self.weights, self.biases) U_x = self.fwd_gradients_1(U, x) U_xx = self.fwd_gradients_1(U_x, x) U_xxx = self.fwd_gradients_1(U_xx, x) F = -lambda_1*U*U_x - lambda_2*U_xxx U1 = U + self.dt*tf.matmul(F, (self.IRK_beta - self.IRK_alpha).T) return U1 def callback(self, loss): print('Loss:', loss) self.loss_log.append(loss) def train(self, nIter): tf_dict = {self.x0_tf: self.x0, self.u0_tf: self.u0, self.x1_tf: self.x1, self.u1_tf: self.u1, self.dummy_x0_tf: np.ones((self.x0.shape[0], self.q)), self.dummy_x1_tf: np.ones((self.x1.shape[0], self.q))} start_time = time.time() for it in range(nIter): self.sess.run(self.train_op_Adam, tf_dict) # Print if it % 10 == 0: elapsed = time.time() - start_time loss_value = self.sess.run(self.loss, tf_dict) lambda_1_value = self.sess.run(self.lambda_1) lambda_2_value = np.exp(self.sess.run(self.lambda_2)) print('It: %d, Loss: %.3e, l1: %.3f, l2: %.5f, Time: %.2f' % (it, loss_value, lambda_1_value, lambda_2_value, elapsed)) self.loss_log.append(loss_value) start_time = time.time() self.optimizer.minimize(self.sess, feed_dict = tf_dict, fetches = [self.loss], loss_callback = self.callback) def predict(self, x_star): U0_star = self.sess.run(self.U0_pred, {self.x0_tf: x_star, self.dummy_x0_tf: np.ones((x_star.shape[0], self.q))}) U1_star = self.sess.run(self.U1_pred, {self.x1_tf: x_star, self.dummy_x1_tf: np.ones((x_star.shape[0], self.q))}) return U0_star, U1_star if __name__ == "__main__": q = 50 skip = 120 N0 = 199 N1 = 201 if args.mod == 'full': layers = [1, 35, 35, 35, 35, q] else: layers = [1, 20, 20, 20, 20, q] data = scipy.io.loadmat('../Data/KdV.mat') t_star = data['tt'].flatten()[:,None] x_star = data['x'].flatten()[:,None] Exact = np.real(data['uu']) idx_t = 40 ###################################################################### ######################## Noiseles Data ############################### ###################################################################### noise = 0.0 idx_x = np.random.choice(Exact.shape[0], N0, replace=False) x0 = x_star[idx_x,:] u0 = Exact[idx_x,idx_t][:,None] u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1]) idx_x = np.random.choice(Exact.shape[0], N1, replace=False) x1 = x_star[idx_x,:] u1 = Exact[idx_x,idx_t + skip][:,None] u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1]) dt = np.asscalar(t_star[idx_t+skip] - t_star[idx_t]) # Doman bounds lb = x_star.min(0) ub = x_star.max(0) model = PhysicsInformedNN(x0, u0, x1, u1, layers, dt, lb, ub, q) model.train(nIter = args.epochs) U0_pred, U1_pred = model.predict(x_star) lambda_1_value = model.sess.run(model.lambda_1) lambda_2_value = np.exp(model.sess.run(model.lambda_2)) error_lambda_1 = np.abs(lambda_1_value - 1.0)/1.0 *100 error_lambda_2 = np.abs(lambda_2_value - 0.0025)/0.0025 * 100 print('Error lambda_1: %f%%' % (error_lambda_1)) print('Error lambda_2: %f%%' % (error_lambda_2)) loss_log = np.array(model.loss_log) np.save('loss/loss_clean_QRes.npy', loss_log) ###################################################################### ########################### Noisy Data ############################### ###################################################################### noise = 0.01 u0 = u0 + noise*np.std(u0)*np.random.randn(u0.shape[0], u0.shape[1]) u1 = u1 + noise*np.std(u1)*np.random.randn(u1.shape[0], u1.shape[1]) model = PhysicsInformedNN(x0, u0, x1, u1, layers, dt, lb, ub, q) model.train(nIter = args.epochs) U_pred = model.predict(x_star) U0_pred, U1_pred = model.predict(x_star) lambda_1_value_noisy = model.sess.run(model.lambda_1) lambda_2_value_noisy = np.exp(model.sess.run(model.lambda_2)) error_lambda_1_noisy = np.abs(lambda_1_value_noisy - 1.0)/1.0 *100 error_lambda_2_noisy = np.abs(lambda_2_value_noisy - 0.0025)/0.0025 * 100 print('Error lambda_1: %f%%' % (error_lambda_1_noisy)) print('Error lambda_2: %f%%' % (error_lambda_2_noisy)) loss_log = np.array(model.loss_log) np.save('loss/loss_noisy_QRes.npy', loss_log) ###################################################################### ############################# Plotting ############################### ###################################################################### fig, ax = newfig(1.0, 1.5) ax.axis('off') gs0 = gridspec.GridSpec(1, 2) gs0.update(top=1-0.06, bottom=1-1/3+0.05, left=0.15, right=0.85, wspace=0) ax = plt.subplot(gs0[:, :]) h = ax.imshow(Exact, interpolation='nearest', cmap='rainbow', extent=[t_star.min(),t_star.max(), lb[0], ub[0]], origin='lower', aspect='auto') divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(h, cax=cax) line = np.linspace(x_star.min(), x_star.max(), 2)[:,None] ax.plot(t_star[idx_t]*np.ones((2,1)), line, 'w-', linewidth = 1.0) ax.plot(t_star[idx_t + skip]*np.ones((2,1)), line, 'w-', linewidth = 1.0) ax.set_xlabel('$t$') ax.set_ylabel('$x$') ax.set_title('$u(t,x)$', fontsize = 10) gs1 = gridspec.GridSpec(1, 2) gs1.update(top=1-1/3-0.1, bottom=1-2/3, left=0.15, right=0.85, wspace=0.5) ax = plt.subplot(gs1[0, 0]) ax.plot(x_star,Exact[:,idx_t][:,None], 'b', linewidth = 2, label = 'Exact') ax.plot(x0, u0, 'rx', linewidth = 2, label = 'Data') ax.set_xlabel('$x$') ax.set_ylabel('$u(t,x)$') ax.set_title('$t = %.2f$\n%d trainng data' % (t_star[idx_t], u0.shape[0]), fontsize = 10) ax = plt.subplot(gs1[0, 1]) ax.plot(x_star,Exact[:,idx_t + skip][:,None], 'b', linewidth = 2, label = 'Exact') ax.plot(x1, u1, 'rx', linewidth = 2, label = 'Data') ax.set_xlabel('$x$') ax.set_ylabel('$u(t,x)$') ax.set_title('$t = %.2f$\n%d trainng data' % (t_star[idx_t+skip], u1.shape[0]), fontsize = 10) ax.legend(loc='upper center', bbox_to_anchor=(-0.3, -0.3), ncol=2, frameon=False) gs2 = gridspec.GridSpec(1, 2) gs2.update(top=1-2/3-0.05, bottom=0, left=0.15, right=0.85, wspace=0.0) ax = plt.subplot(gs2[0, 0]) ax.axis('off') s1 = r'$\begin{tabular}{ |c|c| } \hline Correct PDE & $u_t + u u_x + 0.0025 u_{xxx} = 0$ \\ \hline Identified PDE (clean data) & ' s2 = r'$u_t + %.3f u u_x + %.7f u_{xxx} = 0$ \\ \hline ' % (lambda_1_value, lambda_2_value) s3 = r'Identified PDE (1\% noise) & ' s4 = r'$u_t + %.3f u u_x + %.7f u_{xxx} = 0$ \\ \hline ' % (lambda_1_value_noisy, lambda_2_value_noisy) s5 = r'\end{tabular}$' s = s1+s2+s3+s4+s5 ax.text(-0.1,0.2,s) savefig('./figures/KdV') with open('results.txt', 'w') as f: s = 'Error lambda_1: %f%%\n' % (error_lambda_1) + 'Error lambda_2: %f%%\n' % (error_lambda_2) + 'Error lambda_1: %f%%\n' % (error_lambda_1_noisy) +'Error lambda_2: %f%%' % (error_lambda_2_noisy) print(s) f.write(s)
[ "numpy.sqrt", "tensorflow.zeros", "numpy.random.randn", "numpy.asscalar", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.Variable", "numpy.reshape", "tensorflow.gradients", "numpy.save", "numpy.finfo", "tensorflow.ConfigProto", "numpy.real", "matplotlib.pyplot.subplot", "tensorflow.add", "matplotlib.gridspec.GridSpec", "tensorflow.square", "numpy.std", "tensorflow.matmul", "tensorflow.truncated_normal", "numpy.random.choice", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.global_variables_initializer", "tensorflow.set_random_seed", "numpy.array", "numpy.abs", "numpy.random.seed", "numpy.ones", "numpy.loadtxt" ]
QRes/main/discrete_time_identification (KdV)/KdV.py
[(6, 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../Utilities/"""'], {}), False, 'import sys\n'), (20, 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), True, 'import numpy as np\n'), (21, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), True, 'import tensorflow as tf\n'), (24, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (214, 'numpy.real', 'np.real', (["data['uu']"], {}), True, 'import numpy as np\n'), (223, 'numpy.random.choice', 'np.random.choice', (['Exact.shape[0]', 'N0'], {'replace': '(False)'}), True, 'import numpy as np\n'), (228, 'numpy.random.choice', 'np.random.choice', (['Exact.shape[0]', 'N1'], {'replace': '(False)'}), True, 'import numpy as np\n'), (233, 'numpy.asscalar', 'np.asscalar', (['(t_star[idx_t + skip] - t_star[idx_t])'], {}), True, 'import numpy as np\n'), (252, 'numpy.array', 'np.array', (['model.loss_log'], {}), True, 'import numpy as np\n'), (253, 'numpy.save', 'np.save', (['"""loss/loss_clean_QRes.npy"""', 'loss_log'], {}), True, 'import numpy as np\n'), (278, 'numpy.array', 'np.array', (['model.loss_log'], {}), True, 'import numpy as np\n'), (279, 'numpy.save', 'np.save', (['"""loss/loss_noisy_QRes.npy"""', 'loss_log'], {}), True, 'import numpy as np\n'), (285, 'plotting.newfig', 'newfig', (['(1.0)', '(1.5)'], {}), False, 'from plotting import newfig, savefig\n'), (288, 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), True, 'import matplotlib.gridspec as gridspec\n'), (290, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs0[:, :]'], {}), True, 'import matplotlib.pyplot as plt\n'), (295, 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), (306, 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), True, 'import matplotlib.gridspec as gridspec\n'), (309, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 0]'], {}), True, 'import matplotlib.pyplot as plt\n'), (316, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs1[0, 1]'], {}), True, 'import matplotlib.pyplot as plt\n'), (324, 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {}), True, 'import matplotlib.gridspec as gridspec\n'), (327, 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs2[0, 0]'], {}), True, 'import matplotlib.pyplot as plt\n'), (337, 'plotting.savefig', 'savefig', (['"""./figures/KdV"""'], {}), False, 'from plotting import newfig, savefig\n'), (51, 'tensorflow.Variable', 'tf.Variable', (['[0.0]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.Variable', 'tf.Variable', (['[-6.0]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (56, 'numpy.reshape', 'np.reshape', (['tmp[0:q ** 2 + q]', '(q + 1, q)'], {}), True, 'import numpy as np\n'), (65, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.x0.shape[1])'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.x1.shape[1])'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.u0.shape[1])'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.u1.shape[1])'}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.q)'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.q)'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (109, 'numpy.sqrt', 'np.sqrt', (['(2 / (in_dim + out_dim))'], {}), True, 'import numpy as np\n'), (125, 'tensorflow.matmul', 'tf.matmul', (['H', 'W2'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.add', 'tf.add', (['(H1 * H2)', 'H1'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.exp', 'tf.exp', (['self.lambda_2'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.exp', 'tf.exp', (['self.lambda_2'], {}), True, 'import tensorflow as tf\n'), (169, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (55, 'numpy.loadtxt', 'np.loadtxt', (["('../../Utilities/IRK_weights/Butcher_IRK%d.txt' % q)"], {'ndmin': '(2)'}), True, 'import numpy as np\n'), (110, 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_dim, out_dim]'], {'stddev': 'xavier_stddev'}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.matmul', 'tf.matmul', (['H', 'W2'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.matmul', 'tf.matmul', (['H', 'W1'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.gradients', 'tf.gradients', (['U', 'x'], {'grad_ys': 'self.dummy_x0_tf'}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.gradients', 'tf.gradients', (['g', 'self.dummy_x0_tf'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.gradients', 'tf.gradients', (['U', 'x'], {'grad_ys': 'self.dummy_x1_tf'}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.gradients', 'tf.gradients', (['g', 'self.dummy_x1_tf'], {}), True, 'import tensorflow as tf\n'), (166, 'numpy.ones', 'np.ones', (['(self.x0.shape[0], self.q)'], {}), True, 'import numpy as np\n'), (167, 'numpy.ones', 'np.ones', (['(self.x1.shape[0], self.q)'], {}), True, 'import numpy as np\n'), (226, 'numpy.random.randn', 'np.random.randn', (['u0.shape[0]', 'u0.shape[1]'], {}), True, 'import numpy as np\n'), (231, 'numpy.random.randn', 'np.random.randn', (['u1.shape[0]', 'u1.shape[1]'], {}), True, 'import numpy as np\n'), (247, 'numpy.abs', 'np.abs', (['(lambda_1_value - 1.0)'], {}), True, 'import numpy as np\n'), (248, 'numpy.abs', 'np.abs', (['(lambda_2_value - 0.0025)'], {}), True, 'import numpy as np\n'), (260, 'numpy.random.randn', 'np.random.randn', (['u0.shape[0]', 'u0.shape[1]'], {}), True, 'import numpy as np\n'), (261, 'numpy.random.randn', 'np.random.randn', (['u1.shape[0]', 'u1.shape[1]'], {}), True, 'import numpy as np\n'), (273, 'numpy.abs', 'np.abs', (['(lambda_1_value_noisy - 1.0)'], {}), True, 'import numpy as np\n'), (274, 'numpy.abs', 'np.abs', (['(lambda_2_value_noisy - 0.0025)'], {}), True, 'import numpy as np\n'), (300, 'numpy.ones', 'np.ones', (['(2, 1)'], {}), True, 'import numpy as np\n'), (301, 'numpy.ones', 'np.ones', (['(2, 1)'], {}), True, 'import numpy as np\n'), (62, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(True)'}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.square', 'tf.square', (['(self.u0_tf - self.U0_pred)'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.square', 'tf.square', (['(self.u1_tf - self.U1_pred)'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.zeros', 'tf.zeros', (['[1, layers[l + 1]]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.matmul', 'tf.matmul', (['H', 'W1'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.add', 'tf.add', (['(H1 * H2)', 'H1'], {}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.matmul', 'tf.matmul', (['F', 'self.IRK_alpha.T'], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.matmul', 'tf.matmul', (['F', '(self.IRK_beta - self.IRK_alpha).T'], {}), True, 'import tensorflow as tf\n'), (182, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (191, 'numpy.ones', 'np.ones', (['(x_star.shape[0], self.q)'], {}), True, 'import numpy as np\n'), (192, 'numpy.ones', 'np.ones', (['(x_star.shape[0], self.q)'], {}), True, 'import numpy as np\n'), (226, 'numpy.std', 'np.std', (['u0'], {}), True, 'import numpy as np\n'), (231, 'numpy.std', 'np.std', (['u1'], {}), True, 'import numpy as np\n'), (260, 'numpy.std', 'np.std', (['u0'], {}), True, 'import numpy as np\n'), (261, 'numpy.std', 'np.std', (['u1'], {}), True, 'import numpy as np\n'), (175, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (84, 'numpy.finfo', 'np.finfo', (['float'], {}), True, 'import numpy as np\n')]
maggie0830/segan
c88a08d3299fe6b3627550a4fdb036b179a6537a
from __future__ import print_function import tensorflow as tf from ops import * import numpy as np def pre_emph(x, coeff=0.95): x0 = tf.reshape(x[0], [1,]) diff = x[1:] - coeff * x[:-1] concat = tf.concat(0, [x0, diff]) return concat def de_emph(y, coeff=0.95): if coeff <= 0: return y x = np.zeros(y.shape[0], dtype=np.float32) x[0] = y[0] for n in range(1, y.shape[0], 1): x[n] = coeff * x[n - 1] + y[n] return x def read_and_decode(filename_queue, canvas_size, preemph=0.): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'wav_raw': tf.FixedLenFeature([], tf.string), 'noisy_raw': tf.FixedLenFeature([], tf.string), }) wave = tf.decode_raw(features['wav_raw'], tf.int32) wave.set_shape(canvas_size) wave = (2./65535.) * tf.cast((wave - 32767), tf.float32) + 1. noisy = tf.decode_raw(features['noisy_raw'], tf.int32) noisy.set_shape(canvas_size) noisy = (2./65535.) * tf.cast((noisy - 32767), tf.float32) + 1. if preemph > 0: wave = tf.cast(pre_emph(wave, preemph), tf.float32) noisy = tf.cast(pre_emph(noisy, preemph), tf.float32) return wave, noisy
[ "tensorflow.concat", "tensorflow.FixedLenFeature", "tensorflow.decode_raw", "tensorflow.reshape", "tensorflow.cast", "tensorflow.TFRecordReader", "numpy.zeros" ]
data_loader.py
[(8, 'tensorflow.reshape', 'tf.reshape', (['x[0]', '[1]'], {}), True, 'import tensorflow as tf\n'), (10, 'tensorflow.concat', 'tf.concat', (['(0)', '[x0, diff]'], {}), True, 'import tensorflow as tf\n'), (16, 'numpy.zeros', 'np.zeros', (['y.shape[0]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (23, 'tensorflow.TFRecordReader', 'tf.TFRecordReader', ([], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.decode_raw', 'tf.decode_raw', (["features['wav_raw']", 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.decode_raw', 'tf.decode_raw', (["features['noisy_raw']", 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.cast', 'tf.cast', (['(wave - 32767)', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.cast', 'tf.cast', (['(noisy - 32767)', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), True, 'import tensorflow as tf\n')]
stefan-falk/tensor2tensor
7ea91197843399ddf46ebf78c9d42c2a573a4335
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reinforcement learning models and parameters.""" import collections import functools import operator import gym import six from tensor2tensor.data_generators import gym_env from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import video_utils from tensor2tensor.envs import tic_tac_toe_env from tensor2tensor.layers import common_hparams from tensor2tensor.layers import common_layers from tensor2tensor.layers import discretization from tensor2tensor.layers import modalities from tensor2tensor.models.video import basic_deterministic_params from tensor2tensor.models.video import basic_stochastic from tensor2tensor.rl.envs.py_func_batch_env import PyFuncBatchEnv from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv from tensor2tensor.rl.envs.simulated_batch_gym_env import SimulatedBatchGymEnv from tensor2tensor.utils import hparam from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model from tensor2tensor.utils import trainer_lib import tensorflow as tf import tensorflow_probability as tfp @registry.register_hparams def ppo_base_v1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.learning_rate_schedule = "constant" hparams.learning_rate_constant = 1e-4 hparams.clip_grad_norm = 0.5 hparams.weight_decay = 0 # If set, extends the LR warmup to all epochs except the final one. hparams.add_hparam("lr_decay_in_final_epoch", False) hparams.add_hparam("init_mean_factor", 0.1) hparams.add_hparam("init_logstd", 0.1) hparams.add_hparam("policy_layers", (100, 100)) hparams.add_hparam("value_layers", (100, 100)) hparams.add_hparam("clipping_coef", 0.2) hparams.add_hparam("gae_gamma", 0.99) hparams.add_hparam("gae_lambda", 0.95) hparams.add_hparam("entropy_loss_coef", 0.01) hparams.add_hparam("value_loss_coef", 1) hparams.add_hparam("optimization_epochs", 15) hparams.add_hparam("epoch_length", 200) hparams.add_hparam("epochs_num", 2000) hparams.add_hparam("eval_every_epochs", 10) hparams.add_hparam("save_models_every_epochs", 30) hparams.add_hparam("optimization_batch_size", 50) hparams.add_hparam("intrinsic_reward_scale", 0.) hparams.add_hparam("logits_clip", 0.0) hparams.add_hparam("dropout_ppo", 0.1) hparams.add_hparam("effective_num_agents", None) hparams.add_hparam("use_epochs", True) # TODO(afrozm): Clean this up, this is used in PPO learner to get modalities. hparams.add_hparam("policy_problem_name", "dummy_policy_problem") return hparams @registry.register_hparams def basic_policy_parameters(): wrappers = None return hparam.HParams(wrappers=wrappers) @registry.register_hparams def ppo_discrete_action_base(): hparams = ppo_base_v1() hparams.add_hparam("policy_network", "feed_forward_categorical_policy") return hparams @registry.register_hparams def discrete_random_action_base(): hparams = common_hparams.basic_params1() hparams.add_hparam("policy_network", "random_policy") return hparams @registry.register_hparams def ppo_atari_base(): """Pong base parameters.""" hparams = ppo_discrete_action_base() hparams.learning_rate_constant = 1e-4 hparams.epoch_length = 200 hparams.gae_gamma = 0.985 hparams.gae_lambda = 0.985 hparams.entropy_loss_coef = 0.003 hparams.value_loss_coef = 1 hparams.optimization_epochs = 3 hparams.epochs_num = 1000 hparams.policy_network = "feed_forward_cnn_small_categorical_policy" hparams.clipping_coef = 0.2 hparams.optimization_batch_size = 20 hparams.clip_grad_norm = 0.5 return hparams @registry.register_hparams def ppo_original_params(): """Parameters based on the original PPO paper.""" hparams = ppo_atari_base() hparams.learning_rate_constant = 2.5e-4 hparams.gae_gamma = 0.99 hparams.gae_lambda = 0.95 hparams.clipping_coef = 0.1 hparams.value_loss_coef = 1 hparams.entropy_loss_coef = 0.01 hparams.eval_every_epochs = 200 hparams.dropout_ppo = 0.1 # The parameters below are modified to accommodate short epoch_length (which # is needed for model based rollouts). hparams.epoch_length = 50 hparams.optimization_batch_size = 20 return hparams @registry.register_hparams def ppo_dist_params(): """Parameters based on the original paper modified for distributional RL.""" hparams = ppo_original_params() hparams.learning_rate_constant = 1e-3 return hparams @registry.register_hparams def ppo_original_tiny(): """Parameters based on the original PPO paper, tiny version.""" hparams = ppo_original_params() hparams.epoch_length = 5 hparams.optimization_batch_size = 1 return hparams @registry.register_hparams def ppo_ttt_params(): """Parameters based on the original PPO paper.""" hparams = ppo_original_tiny() hparams.policy_network = "feed_forward_categorical_policy" hparams.policy_problem_name = "dummy_policy_problem_ttt" return hparams @registry.register_hparams def ppo_original_params_gamma95(): """Parameters based on the original PPO paper, changed gamma.""" hparams = ppo_original_params() hparams.gae_gamma = 0.95 return hparams @registry.register_hparams def ppo_original_params_gamma90(): """Parameters based on the original PPO paper, changed gamma.""" hparams = ppo_original_params() hparams.gae_gamma = 0.90 return hparams @registry.register_hparams def ppo_original_world_model(): """Atari parameters with world model as policy.""" hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_deterministic" hparams_keys = hparams.values().keys() video_hparams = basic_deterministic_params.next_frame_basic_deterministic() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) # Mostly to avoid decaying WM params when training the policy. hparams.weight_decay = 0 return hparams @registry.register_hparams def ppo_tiny_world_model(): """Atari parameters with world model as policy.""" hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_deterministic" hparams_keys = hparams.values().keys() video_hparams = basic_deterministic_params.next_frame_tiny() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) hparams.weight_decay = 0 return hparams @registry.register_hparams def ppo_original_world_model_stochastic_discrete(): """Atari parameters with stochastic discrete world model as policy.""" hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_stochastic_discrete" hparams_keys = hparams.values().keys() video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) # To avoid OOM. Probably way to small. hparams.optimization_batch_size = 1 hparams.weight_decay = 0 return hparams def make_real_env_fn(env): """Creates a function returning a given real env, in or out of graph. Args: env: Environment to return from the function. Returns: Function in_graph -> env. """ return lambda in_graph: PyFuncBatchEnv(env) if in_graph else env def make_simulated_env_fn(**env_kwargs): """Returns a function creating a simulated env, in or out of graph. Args: **env_kwargs: kwargs to pass to the simulated env constructor. Returns: Function in_graph -> env. """ def env_fn(in_graph): class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv return class_(**env_kwargs) return env_fn # TODO(koz4k): Move this and the one below to rl_utils. def make_simulated_env_kwargs(real_env, hparams, **extra_kwargs): """Extracts simulated env kwargs from real_env and loop hparams.""" objs_and_attrs = [ (real_env, [ "reward_range", "observation_space", "action_space", "frame_height", "frame_width" ]), (hparams, ["frame_stack_size", "intrinsic_reward_scale"]) ] kwargs = { attr: getattr(obj, attr) # pylint: disable=g-complex-comprehension for (obj, attrs) in objs_and_attrs for attr in attrs } kwargs["model_name"] = hparams.generative_model kwargs["model_hparams"] = trainer_lib.create_hparams( hparams.generative_model_params ) if hparams.wm_policy_param_sharing: kwargs["model_hparams"].optimizer_zero_grads = True kwargs.update(extra_kwargs) return kwargs def make_simulated_env_fn_from_hparams(real_env, hparams, **extra_kwargs): """Creates a simulated env_fn.""" return make_simulated_env_fn( **make_simulated_env_kwargs(real_env, hparams, **extra_kwargs) ) def get_policy(observations, hparams, action_space, distributional_size=1, epoch=-1): """Get a policy network. Args: observations: observations hparams: parameters action_space: action space distributional_size: optional number of buckets for distributional RL epoch: optional epoch number Returns: Tuple (action logits, value). """ if not isinstance(action_space, gym.spaces.Discrete): raise ValueError("Expecting discrete action space.") obs_shape = common_layers.shape_list(observations) (frame_height, frame_width) = obs_shape[2:4] # TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup # when possible and do this properly. if hparams.policy_problem_name == "dummy_policy_problem_ttt": tf.logging.info("Using DummyPolicyProblemTTT for the policy.") policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT() else: tf.logging.info("Using DummyPolicyProblem for the policy.") policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width) trainer_lib.add_problem_hparams(hparams, policy_problem) hparams.force_full_predict = True model = registry.model(hparams.policy_network)( hparams, tf.estimator.ModeKeys.TRAIN ) try: num_target_frames = hparams.video_num_target_frames except AttributeError: num_target_frames = 1 target_value_shape_suffix = [num_target_frames] if distributional_size > 1: target_value_shape_suffix = [num_target_frames, distributional_size] features = { "inputs": observations, "epoch": tf.constant(epoch + 1), "input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]), "target_action": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_reward": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_policy": tf.zeros( obs_shape[:1] + [num_target_frames] + [action_space.n]), "target_value": tf.zeros( obs_shape[:1] + target_value_shape_suffix) } model.distributional_value_size = max(distributional_size, 1) model.use_epochs = hparams.use_epochs with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): t2t_model.create_dummy_vars() (targets, _) = model(features) target_values = targets["target_value"][:, 0] if distributional_size > 1: target_values = targets["target_value"][:, :] return (targets["target_policy"][:, 0, :], target_values) @registry.register_hparams def ppo_pong_ae_base(): """Pong autoencoder base parameters.""" hparams = ppo_original_params() hparams.learning_rate_constant = 1e-4 hparams.network = "dense_bitwise_categorical_policy" return hparams @registry.register_hparams def dqn_atari_base(): # These params are based on agents/dqn/configs/dqn.gin # with some modifications taking into account our code return hparam.HParams( agent_gamma=0.99, agent_update_horizon=1, agent_min_replay_history=20000, # agent steps agent_update_period=4, agent_target_update_period=8000, # agent steps agent_epsilon_train=0.01, agent_epsilon_eval=0.001, agent_epsilon_decay_period=250000, # agent steps agent_generates_trainable_dones=True, optimizer_class="RMSProp", optimizer_learning_rate=0.00025, optimizer_decay=0.95, optimizer_momentum=0.0, optimizer_epsilon=0.00001, optimizer_centered=True, # TODO(kozak): change names maybe replay_buffer -> agent? # Also batch_size is now buffer_batch_size in _DQNAgent. replay_buffer_replay_capacity=1000000, replay_buffer_buffer_batch_size=32, time_limit=27000, save_every_steps=50000, num_frames=int(20 * 1e6), # TODO(konradczechowski) this is not used in trainer_model_free, clean # this up after evaluation refactor eval_episodes_num=3, ) @registry.register_hparams def dqn_original_params(): """dqn_original_params.""" hparams = dqn_atari_base() hparams.set_hparam("num_frames", int(1e6)) return hparams def rlmf_tiny_overrides(): """Parameters to override for tiny setting excluding agent-related hparams.""" return dict( max_num_noops=1, eval_max_num_noops=1, rl_env_max_episode_steps=7, eval_rl_env_max_episode_steps=7, eval_sampling_temps=[0.0, 1.0], ) @registry.register_hparams def rlmf_original(): return hparam.HParams( game="pong", sticky_actions=False, base_algo="ppo", base_algo_params="ppo_original_params", batch_size=16, eval_batch_size=2, frame_stack_size=4, eval_sampling_temps=[0.0, 0.2, 0.5, 0.8, 1.0, 2.0], max_num_noops=8, eval_max_num_noops=8, eval_rl_env_max_episode_steps=1000, resize_height_factor=2, resize_width_factor=2, distributional_size=1, # In distributional RL, number of buckets. distributional_subscale=0.04, # How to scale values to buckets. distributional_threshold=0.0, # Optimism threshold for experiments. grayscale=0, rl_env_max_episode_steps=-1, # If set, use this as the gym env name, instead of changing game mode etc. rl_env_name="", # Controls whether we should derive observation space, do some # pre-processing etc. See T2TGymEnv._derive_observation_space. rl_should_derive_observation_space=True, aunused=0, # unused param for multi-run settings. ) @registry.register_hparams def rlmf_tictactoe(): """Base set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.game = "tictactoe" hparams.rl_env_name = "T2TEnv-TicTacToeEnv-v0" # Since we don't have any no-op actions, otherwise we have to have an # attribute called `get_action_meanings`. hparams.eval_max_num_noops = 0 hparams.max_num_noops = 0 hparams.rl_should_derive_observation_space = False hparams.policy_network = "feed_forward_categorical_policy" hparams.base_algo_params = "ppo_ttt_params" # Number of last observations to feed to the agent hparams.frame_stack_size = 1 return hparams @registry.register_hparams def rlmf_base(): """Base set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.add_hparam("ppo_epochs_num", 3000) hparams.add_hparam("ppo_eval_every_epochs", 100) return hparams @registry.register_ranged_hparams def rlmf_5runs(rhp): rhp.set_discrete("aunused", list(range(5))) @registry.register_ranged_hparams def rlmf_5runs_atari(rhp): rhp.set_categorical("game", gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE) rhp.set_discrete("aunused", list(range(5))) @registry.register_hparams def rlmf_dist(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.distributional_size = 1024 hparams.base_algo_params = "ppo_dist_params" return hparams @registry.register_hparams def rlmf_dist_threshold(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_dist() hparams.distributional_threshold = 0.5 return hparams @registry.register_hparams def rlmf_tiny(): """Tiny set of hparams for model-free PPO.""" hparams = rlmf_original() hparams = hparams.override_from_dict(rlmf_tiny_overrides()) hparams.batch_size = 2 hparams.base_algo_params = "ppo_original_tiny" hparams.add_hparam("ppo_epochs_num", 3) hparams.add_hparam("ppo_epoch_length", 2) return hparams @registry.register_hparams def rlmf_dqn_tiny(): """Tiny DQN params.""" hparams = rlmf_original() hparams = hparams.override_from_dict(rlmf_tiny_overrides()) hparams.batch_size = 1 hparams.base_algo = "dqn" hparams.base_algo_params = "dqn_original_params" hparams.add_hparam("dqn_num_frames", 128) hparams.add_hparam("dqn_save_every_steps", 128) hparams.add_hparam("dqn_replay_buffer_replay_capacity", 100) hparams.add_hparam("dqn_agent_min_replay_history", 10) return hparams @registry.register_hparams def rlmf_eval(): """Eval set of hparams for model-free PPO.""" hparams = rlmf_original() hparams.batch_size = 16 hparams.eval_batch_size = 32 hparams.eval_episodes_num = 2 hparams.eval_sampling_temps = [0.5, 0.0, 1.0] hparams.eval_rl_env_max_episode_steps = 40000 hparams.add_hparam("ppo_epoch_length", 128) hparams.add_hparam("ppo_optimization_batch_size", 32) hparams.add_hparam("ppo_epochs_num", 10000) hparams.add_hparam("ppo_eval_every_epochs", 500) hparams.add_hparam("attempt", 0) hparams.add_hparam("moe_loss_coef", 0) return hparams @registry.register_hparams def rlmf_eval_dist(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_eval() hparams.distributional_size = 4096 hparams.distributional_subscale = 0.08 hparams.base_algo_params = "ppo_dist_params" return hparams @registry.register_hparams def rlmf_eval_dist_threshold(): """Distributional set of hparams for model-free PPO.""" hparams = rlmf_eval_dist() hparams.distributional_threshold = 0.5 return hparams class PolicyBase(t2t_model.T2TModel): def __init__(self, *args, **kwargs): super(PolicyBase, self).__init__(*args, **kwargs) self.distributional_value_size = 1 self.use_epochs = False def loss(self, *args, **kwargs): return 0.0 # TODO(lukaszkaiser): move this class or clean up the whole file. class DummyPolicyProblem(video_utils.VideoProblem): """Dummy Problem for running the policy.""" def __init__(self, action_space, frame_height, frame_width): super(DummyPolicyProblem, self).__init__() self.action_space = action_space self._frame_height = frame_height self._frame_width = frame_width @property def frame_height(self): """Height of each frame.""" return self._frame_height @property def frame_width(self): """Width of each frame.""" return self._frame_width @property def num_actions(self): return self.action_space.n def hparams(self, defaults, unused_model_hparams): p = defaults p.modality = { "inputs": modalities.ModalityType.VIDEO, "input_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "input_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "targets": modalities.ModalityType.VIDEO, "target_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "target_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, "target_policy": modalities.ModalityType.IDENTITY, "target_value": modalities.ModalityType.IDENTITY, } p.vocab_size = { "inputs": 256, "input_action": self.num_actions, "input_reward": 3, "targets": 256, "target_action": self.num_actions, "target_reward": 3, "target_policy": None, "target_value": None, } p.input_space_id = problem.SpaceID.IMAGE p.target_space_id = problem.SpaceID.IMAGE NetworkOutput = collections.namedtuple( "NetworkOutput", "policy, value, action_postprocessing") # TODO(koz4k): Translate it to T2TModel or remove. def feed_forward_gaussian_fun(action_space, config, observations): """Feed-forward Gaussian.""" if not isinstance(action_space, gym.spaces.box.Box): raise ValueError("Expecting continuous action space.") mean_weights_initializer = tf.initializers.variance_scaling( scale=config.init_mean_factor) logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10) flat_observations = tf.reshape(observations, [ tf.shape(observations)[0], tf.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with tf.variable_scope("network_parameters"): with tf.variable_scope("policy"): x = flat_observations for size in config.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) mean = tf.layers.dense( x, action_space.shape[0], activation=tf.tanh, kernel_initializer=mean_weights_initializer) logstd = tf.get_variable( "logstd", mean.shape[2:], tf.float32, logstd_initializer) logstd = tf.tile( logstd[None, None], [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2)) with tf.variable_scope("value"): x = flat_observations for size in config.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean") logstd = tf.check_numerics(logstd, "logstd") value = tf.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) def clip_logits(logits, config): logits_clip = getattr(config, "logits_clip", 0.) if logits_clip > 0: min_logit = tf.reduce_min(logits) return tf.minimum(logits - min_logit, logits_clip) else: return logits @registry.register_model class FeedForwardCategoricalPolicy(PolicyBase): """Feed-forward categorical.""" def body(self, features): observations = features["inputs_raw"] observations = tf.cast(observations, tf.float32) flat_observations = tf.layers.flatten(observations) with tf.variable_scope("policy"): x = flat_observations for size in self.hparams.policy_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) logits = tf.expand_dims(logits, axis=1) with tf.variable_scope("value"): x = flat_observations for size in self.hparams.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1) logits = clip_logits(logits, self.hparams) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicy(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs_raw"] # Axis 0 - Batch. # Axis 1 - Input Frames, 4 frames. # Axis 2, 3 - Height & Width. # Axis 4 - Channels RGB, 3 colours. x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = tf.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with tf.variable_scope("feed_forward_cnn_small"): x = tf.cast(x, tf.float32) / 255.0 x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2), activation=tf.nn.relu, padding="same") x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2), activation=tf.nn.relu, padding="same") flat_x = tf.layers.flatten(x) if self.use_epochs: epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32) # Randomly set epoch to 0 in some cases as that's the inference value. rand = tf.random.uniform([x_shape[0]]) epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch) # Embed the epoch number. emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32] flat_x = tf.concat([flat_x, emb_epoch], axis=1) flat_x = tf.layers.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) logits = tf.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = clip_logits(logits, self.hparams) logits = tf.expand_dims(logits, axis=1) value = tf.layers.dense(x, self.distributional_value_size) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs"] x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = tf.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with tf.variable_scope("feed_forward_cnn_small"): x = tf.cast(x, tf.float32) / 255.0 x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 32, (4, 4), strides=(2, 2), name="conv1", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME") flat_x = tf.layers.flatten(x) flat_x = tf.nn.dropout(flat_x, rate=dropout) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1") logits = tf.layers.dense( x, self.hparams.problem.num_actions, name="dense2" ) logits = tf.expand_dims(logits, axis=1) logits = clip_logits(logits, self.hparams) value = tf.layers.dense(x, 1, name="value") return {"target_policy": logits, "target_value": value} @registry.register_model class DenseBitwiseCategoricalPolicy(PolicyBase): """Dense network with bitwise input and categorical output.""" def body(self, features): observations = features["inputs"] flat_x = tf.layers.flatten(observations) with tf.variable_scope("dense_bitwise"): flat_x = discretization.int_to_bit_embed(flat_x, 8, 32) x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu) x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) logits = tf.layers.dense(x, self.hparams.problem.num_actions) value = tf.layers.dense(x, 1)[..., 0] return {"target_policy": logits, "target_value": value} @registry.register_model class RandomPolicy(PolicyBase): """Random policy with categorical output.""" def body(self, features): observations = features["inputs"] obs_shape = observations.shape.as_list() # Just so Saver doesn't complain because of no variables. tf.get_variable("dummy_var", initializer=0.0) num_actions = self.hparams.problem.num_actions logits = tf.constant( 1. / float(num_actions), shape=(obs_shape[:1] + [1, num_actions]) ) value = tf.zeros(obs_shape[:1] + [1]) return {"target_policy": logits, "target_value": value}
[ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.zeros", "tensorflow.layers.dropout", "tensorflow.minimum", "tensorflow.cast", "tensorflow.check_numerics", "tensorflow.layers.dense", "tensorflow.random_normal_initializer", "tensorflow.nn.dropout", "tensorflow.layers.conv2d", "tensorflow.shape", "tensorflow.random.uniform", "tensorflow.exp", "tensorflow.zeros_like", "tensorflow.logging.info", "tensorflow.clip_by_value", "tensorflow.layers.flatten", "tensorflow.constant", "tensorflow.transpose", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.reduce_min", "tensorflow.variable_scope", "tensorflow.initializers.variance_scaling", "tensorflow.get_variable_scope" ]
tensor2tensor/models/research/rl.py
[(633, 'collections.namedtuple', 'collections.namedtuple', (['"""NetworkOutput"""', '"""policy, value, action_postprocessing"""'], {}), False, 'import collections\n'), (49, 'tensor2tensor.layers.common_hparams.basic_params1', 'common_hparams.basic_params1', ([], {}), False, 'from tensor2tensor.layers import common_hparams\n'), (84, 'tensor2tensor.utils.hparam.HParams', 'hparam.HParams', ([], {'wrappers': 'wrappers'}), False, 'from tensor2tensor.utils import hparam\n'), (96, 'tensor2tensor.layers.common_hparams.basic_params1', 'common_hparams.basic_params1', ([], {}), False, 'from tensor2tensor.layers import common_hparams\n'), (187, 'tensor2tensor.models.video.basic_deterministic_params.next_frame_basic_deterministic', 'basic_deterministic_params.next_frame_basic_deterministic', ([], {}), False, 'from tensor2tensor.models.video import basic_deterministic_params\n'), (204, 'tensor2tensor.models.video.basic_deterministic_params.next_frame_tiny', 'basic_deterministic_params.next_frame_tiny', ([], {}), False, 'from tensor2tensor.models.video import basic_deterministic_params\n'), (220, 'tensor2tensor.models.video.basic_stochastic.next_frame_basic_stochastic_discrete', 'basic_stochastic.next_frame_basic_stochastic_discrete', ([], {}), False, 'from tensor2tensor.models.video import basic_stochastic\n'), (274, 'tensor2tensor.utils.trainer_lib.create_hparams', 'trainer_lib.create_hparams', (['hparams.generative_model_params'], {}), False, 'from tensor2tensor.utils import trainer_lib\n'), (307, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['observations'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (319, 'tensor2tensor.utils.trainer_lib.add_problem_hparams', 'trainer_lib.add_problem_hparams', (['hparams', 'policy_problem'], {}), False, 'from tensor2tensor.utils import trainer_lib\n'), (424, 'tensor2tensor.utils.hparam.HParams', 'hparam.HParams', ([], {'game': '"""pong"""', 'sticky_actions': '(False)', 'base_algo': '"""ppo"""', 'base_algo_params': '"""ppo_original_params"""', 'batch_size': '(16)', 'eval_batch_size': '(2)', 'frame_stack_size': '(4)', 'eval_sampling_temps': '[0.0, 0.2, 0.5, 0.8, 1.0, 2.0]', 'max_num_noops': '(8)', 'eval_max_num_noops': '(8)', 'eval_rl_env_max_episode_steps': '(1000)', 'resize_height_factor': '(2)', 'resize_width_factor': '(2)', 'distributional_size': '(1)', 'distributional_subscale': '(0.04)', 'distributional_threshold': '(0.0)', 'grayscale': '(0)', 'rl_env_max_episode_steps': '(-1)', 'rl_env_name': '""""""', 'rl_should_derive_observation_space': '(True)', 'aunused': '(0)'}), False, 'from tensor2tensor.utils import hparam\n'), (643, 'tensorflow.initializers.variance_scaling', 'tf.initializers.variance_scaling', ([], {'scale': 'config.init_mean_factor'}), True, 'import tensorflow as tf\n'), (645, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['config.init_logstd', '(1e-10)'], {}), True, 'import tensorflow as tf\n'), (669, 'tensorflow.check_numerics', 'tf.check_numerics', (['mean', '"""mean"""'], {}), True, 'import tensorflow as tf\n'), (670, 'tensorflow.check_numerics', 'tf.check_numerics', (['logstd', '"""logstd"""'], {}), True, 'import tensorflow as tf\n'), (671, 'tensorflow.check_numerics', 'tf.check_numerics', (['value', '"""value"""'], {}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.logging.info', 'tf.logging.info', (['"""Using DummyPolicyProblemTTT for the policy."""'], {}), True, 'import tensorflow as tf\n'), (314, 'tensor2tensor.envs.tic_tac_toe_env.DummyPolicyProblemTTT', 'tic_tac_toe_env.DummyPolicyProblemTTT', ([], {}), False, 'from tensor2tensor.envs import tic_tac_toe_env\n'), (316, 'tensorflow.logging.info', 'tf.logging.info', (['"""Using DummyPolicyProblem for the policy."""'], {}), True, 'import tensorflow as tf\n'), (321, 'tensor2tensor.utils.registry.model', 'registry.model', (['hparams.policy_network'], {}), False, 'from tensor2tensor.utils import registry\n'), (333, 'tensorflow.constant', 'tf.constant', (['(epoch + 1)'], {}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:2] + [1])'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (335, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:2] + [1])'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (336, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:1] + [num_target_frames] + obs_shape[2:])'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:1] + [num_target_frames, 1])'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:1] + [num_target_frames, 1])'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:1] + [num_target_frames] + [action_space.n])'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:1] + target_value_shape_suffix)'], {}), True, 'import tensorflow as tf\n'), (349, 'tensor2tensor.utils.t2t_model.create_dummy_vars', 't2t_model.create_dummy_vars', ([], {}), False, 'from tensor2tensor.utils import t2t_model\n'), (651, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""network_parameters"""'], {}), True, 'import tensorflow as tf\n'), (673, 'tensorflow.exp', 'tf.exp', (['logstd'], {}), True, 'import tensorflow as tf\n'), (681, 'tensorflow.reduce_min', 'tf.reduce_min', (['logits'], {}), True, 'import tensorflow as tf\n'), (682, 'tensorflow.minimum', 'tf.minimum', (['(logits - min_logit)', 'logits_clip'], {}), True, 'import tensorflow as tf\n'), (693, 'tensorflow.cast', 'tf.cast', (['observations', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (694, 'tensorflow.layers.flatten', 'tf.layers.flatten', (['observations'], {}), True, 'import tensorflow as tf\n'), (720, 'tensorflow.transpose', 'tf.transpose', (['observations', '[0, 2, 3, 1, 4]'], {}), True, 'import tensorflow as tf\n'), (721, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (722, 'tensorflow.reshape', 'tf.reshape', (['x', '(x_shape[:-2] + [-1])'], {}), True, 'import tensorflow as tf\n'), (758, 'tensorflow.transpose', 'tf.transpose', (['observations', '[0, 2, 3, 1, 4]'], {}), True, 'import tensorflow as tf\n'), (759, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (760, 'tensorflow.reshape', 'tf.reshape', (['x', '(x_shape[:-2] + [-1])'], {}), True, 'import tensorflow as tf\n'), (797, 'tensorflow.layers.flatten', 'tf.layers.flatten', (['observations'], {}), True, 'import tensorflow as tf\n'), (819, 'tensorflow.get_variable', 'tf.get_variable', (['"""dummy_var"""'], {'initializer': '(0.0)'}), True, 'import tensorflow as tf\n'), (825, 'tensorflow.zeros', 'tf.zeros', (['(obs_shape[:1] + [1])'], {}), True, 'import tensorflow as tf\n'), (241, 'tensor2tensor.rl.envs.py_func_batch_env.PyFuncBatchEnv', 'PyFuncBatchEnv', (['env'], {}), False, 'from tensor2tensor.rl.envs.py_func_batch_env import PyFuncBatchEnv\n'), (348, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (652, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""policy"""'], {}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'action_space.shape[0]'], {'activation': 'tf.tanh', 'kernel_initializer': 'mean_weights_initializer'}), True, 'import tensorflow as tf\n'), (659, 'tensorflow.get_variable', 'tf.get_variable', (['"""logstd"""', 'mean.shape[2:]', 'tf.float32', 'logstd_initializer'], {}), True, 'import tensorflow as tf\n'), (664, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""value"""'], {}), True, 'import tensorflow as tf\n'), (675, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['a', '(-2.0)', '(2)'], {}), True, 'import tensorflow as tf\n'), (695, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""policy"""'], {}), True, 'import tensorflow as tf\n'), (699, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'self.hparams.problem.num_actions'], {}), True, 'import tensorflow as tf\n'), (700, 'tensorflow.expand_dims', 'tf.expand_dims', (['logits'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (701, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""value"""'], {}), True, 'import tensorflow as tf\n'), (705, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(1)'], {}), True, 'import tensorflow as tf\n'), (724, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""feed_forward_cnn_small"""'], {}), True, 'import tensorflow as tf\n'), (726, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', '(32)', '(5, 5)'], {'strides': '(2, 2)', 'activation': 'tf.nn.relu', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (728, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', '(32)', '(5, 5)'], {'strides': '(2, 2)', 'activation': 'tf.nn.relu', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (731, 'tensorflow.layers.flatten', 'tf.layers.flatten', (['x'], {}), True, 'import tensorflow as tf\n'), (740, 'tensorflow.layers.dropout', 'tf.layers.dropout', (['flat_x'], {'rate': 'dropout'}), True, 'import tensorflow as tf\n'), (741, 'tensorflow.layers.dense', 'tf.layers.dense', (['flat_x', '(128)'], {'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (743, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'self.hparams.problem.num_actions'], {'name': '"""dense2"""'}), True, 'import tensorflow as tf\n'), (747, 'tensorflow.expand_dims', 'tf.expand_dims', (['logits'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (748, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'self.distributional_value_size'], {}), True, 'import tensorflow as tf\n'), (762, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""feed_forward_cnn_small"""'], {}), True, 'import tensorflow as tf\n'), (764, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x'], {'rate': 'dropout'}), True, 'import tensorflow as tf\n'), (765, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', '(32)', '(4, 4)'], {'strides': '(2, 2)', 'name': '"""conv1"""', 'activation': 'common_layers.belu', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (768, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x'], {'rate': 'dropout'}), True, 'import tensorflow as tf\n'), (769, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', '(64)', '(4, 4)'], {'strides': '(2, 2)', 'name': '"""conv2"""', 'activation': 'common_layers.belu', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (772, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x'], {'rate': 'dropout'}), True, 'import tensorflow as tf\n'), (773, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', '(128)', '(4, 4)'], {'strides': '(2, 2)', 'name': '"""conv3"""', 'activation': 'common_layers.belu', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (777, 'tensorflow.layers.flatten', 'tf.layers.flatten', (['x'], {}), True, 'import tensorflow as tf\n'), (778, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['flat_x'], {'rate': 'dropout'}), True, 'import tensorflow as tf\n'), (779, 'tensorflow.layers.dense', 'tf.layers.dense', (['flat_x', '(128)'], {'activation': 'tf.nn.relu', 'name': '"""dense1"""'}), True, 'import tensorflow as tf\n'), (781, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'self.hparams.problem.num_actions'], {'name': '"""dense2"""'}), True, 'import tensorflow as tf\n'), (784, 'tensorflow.expand_dims', 'tf.expand_dims', (['logits'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (787, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(1)'], {'name': '"""value"""'}), True, 'import tensorflow as tf\n'), (798, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dense_bitwise"""'], {}), True, 'import tensorflow as tf\n'), (799, 'tensor2tensor.layers.discretization.int_to_bit_embed', 'discretization.int_to_bit_embed', (['flat_x', '(8)', '(32)'], {}), False, 'from tensor2tensor.layers import discretization\n'), (801, 'tensorflow.layers.dense', 'tf.layers.dense', (['flat_x', '(256)'], {'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (802, 'tensorflow.layers.dense', 'tf.layers.dense', (['flat_x', '(128)'], {'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (804, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'self.hparams.problem.num_actions'], {}), True, 'import tensorflow as tf\n'), (648, 'tensorflow.shape', 'tf.shape', (['observations'], {}), True, 'import tensorflow as tf\n'), (648, 'tensorflow.shape', 'tf.shape', (['observations'], {}), True, 'import tensorflow as tf\n'), (655, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'size'], {'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (667, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'size'], {'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (668, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(1)'], {}), True, 'import tensorflow as tf\n'), (698, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'size'], {'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (704, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'size'], {'activation': 'tf.nn.relu'}), True, 'import tensorflow as tf\n'), (725, 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (735, 'tensorflow.random.uniform', 'tf.random.uniform', (['[x_shape[0]]'], {}), True, 'import tensorflow as tf\n'), (738, 'tensor2tensor.layers.common_layers.embedding', 'common_layers.embedding', (['epoch', '(32)', '(32)'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (739, 'tensorflow.concat', 'tf.concat', (['[flat_x, emb_epoch]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (763, 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (806, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(1)'], {}), True, 'import tensorflow as tf\n'), (733, 'tensorflow.zeros', 'tf.zeros', (['[x_shape[0]]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (736, 'tensorflow.zeros_like', 'tf.zeros_like', (['epoch'], {}), True, 'import tensorflow as tf\n'), (663, 'tensorflow.shape', 'tf.shape', (['mean'], {}), True, 'import tensorflow as tf\n'), (663, 'tensorflow.shape', 'tf.shape', (['mean'], {}), True, 'import tensorflow as tf\n')]
wangke1935/euler
e2785eca70e7e4f37d73ac4ce64a3059b0385dc7
# Copyright 2018 Alibaba Group Holding Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tf_euler.python.euler_ops import base sample_neighbor = base._LIB_OP.sample_neighbor get_top_k_neighbor = base._LIB_OP.get_top_k_neighbor def get_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types) return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \ tf.SparseTensor(*sp_returns[6:]) def get_sorted_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types) return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \ tf.SparseTensor(*sp_returns[6:]) def sample_fanout(nodes, edge_types, counts, default_node=-1): """ Sample multi-hop neighbors of nodes according to weight in graph. Args: nodes: A 1-D `Tensor` of `int64`. edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter outgoing edges in each hop. counts: A list of `int`. Specify the number of sampling for each node in each hop. default_node: A `int`. Specify the node id to fill when there is no neighbor for specific nodes. Return: A tuple of list: (samples, weights) samples: A list of `Tensor` of `int64`, with the same length as `edge_types` and `counts`, with shapes `[num_nodes]`, `[num_nodes * count1]`, `[num_nodes * count1 * count2]`, ... weights: A list of `Tensor` of `float`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... types: A list of `Tensor` of `int32`, with shapes `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ... """ neighbors_list = [tf.reshape(nodes, [-1])] weights_list = [] type_list = [] for hop_edge_types, count in zip(edge_types, counts): neighbors, weights, types = sample_neighbor( neighbors_list[-1], hop_edge_types, count, default_node=default_node) neighbors_list.append(tf.reshape(neighbors, [-1])) weights_list.append(tf.reshape(weights, [-1])) type_list.append(tf.reshape(weights, [-1])) return neighbors_list, weights_list, type_list def get_multi_hop_neighbor(nodes, edge_types): """ Get multi-hop neighbors with adjacent matrix. Args: nodes: A 1-D `tf.Tensor` of `int64`. edge_types: A list of 1-D `tf.Tensor` of `int32`. Specify edge types to filter outgoing edges in each hop. Return: A tuple of list: (nodes, adjcents) nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of hops. Specify node set of each hop, including the root. adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent matrix between hops. """ nodes = tf.reshape(nodes, [-1]) nodes_list = [nodes] adj_list = [] for hop_edge_types in edge_types: neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types) next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64) next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = [tf.size(nodes), tf.size(next_nodes)] next_adj = tf.sparse.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse.reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
[ "tensorflow.unique", "tensorflow.stack", "tensorflow.reshape", "tensorflow.SparseTensor", "tensorflow.sparse.reorder", "tensorflow.sparse.SparseTensor", "tensorflow.size" ]
tf_euler/python/euler_ops/neighbor_ops.py
[(41, 'tf_euler.python.euler_ops.base._LIB_OP.get_full_neighbor', 'base._LIB_OP.get_full_neighbor', (['nodes', 'edge_types'], {}), False, 'from tf_euler.python.euler_ops import base\n'), (59, 'tf_euler.python.euler_ops.base._LIB_OP.get_sorted_full_neighbor', 'base._LIB_OP.get_sorted_full_neighbor', (['nodes', 'edge_types'], {}), False, 'from tf_euler.python.euler_ops import base\n'), (115, 'tensorflow.reshape', 'tf.reshape', (['nodes', '[-1]'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.SparseTensor', 'tf.SparseTensor', (['*sp_returns[:3]'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.SparseTensor', 'tf.SparseTensor', (['*sp_returns[3:6]'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.SparseTensor', 'tf.SparseTensor', (['*sp_returns[6:]'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.SparseTensor', 'tf.SparseTensor', (['*sp_returns[:3]'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.SparseTensor', 'tf.SparseTensor', (['*sp_returns[3:6]'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.SparseTensor', 'tf.SparseTensor', (['*sp_returns[6:]'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.reshape', 'tf.reshape', (['nodes', '[-1]'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.unique', 'tf.unique', (['neighbor.values'], {'out_idx': 'tf.int64'}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.stack', 'tf.stack', (['[neighbor.indices[:, (0)], next_idx]', '(1)'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.sparse.SparseTensor', 'tf.sparse.SparseTensor', (['next_indices', 'next_values', 'next_shape'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.sparse.reorder', 'tf.sparse.reorder', (['next_adj'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.reshape', 'tf.reshape', (['neighbors', '[-1]'], {}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.reshape', 'tf.reshape', (['weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.reshape', 'tf.reshape', (['weights', '[-1]'], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.size', 'tf.size', (['nodes'], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.size', 'tf.size', (['next_nodes'], {}), True, 'import tensorflow as tf\n')]
sunblaze-ucb/rl-attack-vf
48d59d5d022599560f0fabfdd5dbf99984457cec
from __future__ import print_function from collections import namedtuple import six.moves.queue as queue import threading import numpy as np import tensorflow as tf from model import LSTMPolicy import scipy.signal def discount(x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] def process_rollout(rollout, gamma, lambda_=1.0): """ Given a rollout, compute its returns and the advantage. """ batch_si = np.asarray(rollout.states) batch_a = np.asarray(rollout.actions) rewards = np.asarray(rollout.rewards) vpred_t = np.asarray(rollout.values + [rollout.r]) rewards_plus_v = np.asarray(rollout.rewards + [rollout.r]) batch_r = discount(rewards_plus_v, gamma)[:-1] delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1] # this formula for the advantage comes "Generalized Advantage Estimation": # https://arxiv.org/abs/1506.02438 batch_adv = discount(delta_t, gamma * lambda_) features = rollout.features[0] return Batch(batch_si, batch_a, batch_adv, batch_r, rollout.terminal, features) Batch = namedtuple("Batch", ["si", "a", "adv", "r", "terminal", "features"]) class PartialRollout(object): """ A piece of a complete rollout. We run our agent, and process its experience once it has processed enough steps. """ def __init__(self): self.states = [] self.actions = [] self.rewards = [] self.values = [] self.r = 0.0 self.terminal = False self.features = [] def add(self, state, action, reward, value, terminal, features): self.states += [state] self.actions += [action] self.rewards += [reward] self.values += [value] self.terminal = terminal self.features += [features] def extend(self, other): assert not self.terminal self.states.extend(other.states) self.actions.extend(other.actions) self.rewards.extend(other.rewards) self.values.extend(other.values) self.r = other.r self.terminal = other.terminal self.features.extend(other.features) class RunnerThread(threading.Thread): """ One of the key distinctions between a normal environment and a universe environment is that a universe environment is _real time_. This means that there should be a thread that would constantly interact with the environment and tell it what to do. This thread is here. """ def __init__(self, env, policy, num_local_steps): threading.Thread.__init__(self) self.queue = queue.Queue(5) self.num_local_steps = num_local_steps self.env = env self.last_features = None self.policy = policy self.daemon = True self.sess = None self.summary_writer = None def start_runner(self, sess, summary_writer): self.sess = sess self.summary_writer = summary_writer self.start() def run(self): with self.sess.as_default(): self._run() def _run(self): rollout_provider = env_runner(self.env, self.policy, self.num_local_steps, self.summary_writer) while True: # the timeout variable exists because apparently, if one worker dies, the other workers # won't die with it, unless the timeout is set to some large number. This is an empirical # observation. self.queue.put(next(rollout_provider), timeout=3600.0) def env_runner(env, policy, num_local_steps, summary_writer): """ The logic of the thread runner. In brief, it constantly keeps on running the policy, and as long as the rollout exceeds a certain length, the thread runner appends the policy to the queue. """ last_state = env.reset() last_features = policy.get_initial_features() length = 0 rewards = 0 episode_vf = [] episode_logits = [] episode_rewards = [] episode_emit = True while True: terminal_end = False rollout = PartialRollout() for _ in range(num_local_steps): fetched = policy.act(last_state, *last_features) action, value_, logits, features = fetched[0], fetched[1], fetched[2], fetched[3:] episode_vf.append(value_) episode_logits.append(logits) # argmax to convert from one-hot state, reward, terminal, info = env.step(action.argmax()) # collect the experience rollout.add(last_state, action, reward, value_, terminal, last_features) length += 1 rewards += reward episode_rewards.append(rewards) last_state = state last_features = features if info: summary = tf.Summary() for k, v in info.items(): summary.value.add(tag=k, simple_value=float(v)) summary_writer.add_summary(summary, policy.global_step.eval()) summary_writer.flush() timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps') if terminal or length >= timestep_limit: terminal_end = True if length >= timestep_limit or not env.metadata.get('semantics.autoreset'): last_state = env.reset() last_features = policy.get_initial_features() print("Episode finished. Sum of rewards: %d. Length: %d" % (rewards, length)) length = 0 rewards = 0 # Record episode summary. if episode_emit: for index, (vf, logits, ep_reward) in enumerate(zip(episode_vf, episode_logits, episode_rewards)): summary = tf.Summary() summary.value.add(tag='episode/reward', simple_value=float(ep_reward)) summary.value.add(tag='episode/vf', simple_value=float(vf)) for action in range(logits.shape[1]): summary.value.add( tag='episode/logits/{}'.format(action), simple_value=float(logits[0, action]) ) summary_writer.add_summary(summary, index) summary_writer.flush() episode_vf = [] episode_logits = [] episode_rewards = [] episode_emit = False break if not terminal_end: rollout.r = policy.value(last_state, *last_features) # once we have enough experience, yield it, and have the ThreadRunner place it on a queue yield rollout class A3C(object): def __init__(self, env, task, freeze=False): """ An implementation of the A3C algorithm that is reasonably well-tuned for the VNC environments. Below, we will have a modest amount of complexity due to the way TensorFlow handles data parallelism. But overall, we'll define the model, specify its inputs, and describe how the policy gradients step should be computed. """ self.env = env self.task = task self.freeze = freeze worker_device = "/job:worker/task:{}/cpu:0".format(task) with tf.device(tf.train.replica_device_setter(1, worker_device=worker_device)): with tf.variable_scope("global"): self.network = LSTMPolicy(env.observation_space.shape, env.action_space.n) self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.constant_initializer(0, dtype=tf.int32), trainable=False) with tf.device(worker_device): with tf.variable_scope("local"): self.local_network = pi = LSTMPolicy(env.observation_space.shape, env.action_space.n) pi.global_step = self.global_step self.ac = tf.placeholder(tf.float32, [None, env.action_space.n], name="ac") self.adv = tf.placeholder(tf.float32, [None], name="adv") self.r = tf.placeholder(tf.float32, [None], name="r") log_prob_tf = tf.nn.log_softmax(pi.logits) prob_tf = tf.nn.softmax(pi.logits) # the "policy gradients" loss: its derivative is precisely the policy gradient # notice that self.ac is a placeholder that is provided externally. # adv will contain the advantages, as calculated in process_rollout pi_loss = - tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv) # loss of value function vf_loss = 0.5 * tf.reduce_sum(tf.square(pi.vf - self.r)) entropy = - tf.reduce_sum(prob_tf * log_prob_tf) bs = tf.to_float(tf.shape(pi.x)[0]) self.loss = pi_loss + 0.5 * vf_loss - entropy * 0.01 # 20 represents the number of "local steps": the number of timesteps # we run the policy before we update the parameters. # The larger local steps is, the lower is the variance in our policy gradients estimate # on the one hand; but on the other hand, we get less frequent parameter updates, which # slows down learning. In this code, we found that making local steps be much # smaller than 20 makes the algorithm more difficult to tune and to get to work. self.runner = RunnerThread(env, pi, 20) grads = tf.gradients(self.loss, pi.var_list) tf.summary.scalar("model/policy_loss", pi_loss / bs) tf.summary.scalar("model/value_loss", vf_loss / bs) tf.summary.scalar("model/entropy", entropy / bs) tf.summary.image("model/state", pi.x) tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads)) tf.summary.scalar("model/var_global_norm", tf.global_norm(pi.var_list)) self.summary_op = tf.summary.merge_all() grads, _ = tf.clip_by_global_norm(grads, 40.0) # copy weights from the parameter server to the local model self.sync = tf.group(*[v1.assign(v2) for v1, v2 in zip(pi.var_list, self.network.var_list)]) grads_and_vars = list(zip(grads, self.network.var_list)) self.inc_step = self.global_step.assign_add(tf.shape(pi.x)[0]) # each worker has a different set of adam optimizer parameters opt = tf.train.AdamOptimizer(1e-4) self.train_op = tf.group(opt.apply_gradients(grads_and_vars), self.inc_step) self.summary_writer = None self.local_steps = 0 def start(self, sess, summary_writer): self.runner.start_runner(sess, summary_writer) self.summary_writer = summary_writer def pull_batch_from_queue(self): """ self explanatory: take a rollout from the queue of the thread runner. """ rollout = self.runner.queue.get(timeout=3600.0) while not rollout.terminal: try: rollout.extend(self.runner.queue.get_nowait()) except queue.Empty: break return rollout def process(self, sess): """ process grabs a rollout that's been produced by the thread runner, and updates the parameters. The update is then sent to the parameter server. """ sess.run(self.sync) # copy weights from shared to local rollout = self.pull_batch_from_queue() batch = process_rollout(rollout, gamma=0.99, lambda_=1.0) should_compute_summary = self.task == 0 and self.local_steps % 11 == 0 fetches = [] if should_compute_summary: fetches.append(self.summary_op) if not self.freeze: fetches.append(self.train_op) else: # If we are frozen, we just bump the global step. fetches.append(self.inc_step) fetches.append(self.global_step) feed_dict = { self.local_network.x: batch.si, self.ac: batch.a, self.adv: batch.adv, self.r: batch.r, self.local_network.state_in[0]: batch.features[0], self.local_network.state_in[1]: batch.features[1], } fetched = sess.run(fetches, feed_dict=feed_dict) if should_compute_summary: self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1]) self.summary_writer.flush() self.local_steps += 1
[ "tensorflow.device", "tensorflow.nn.log_softmax", "numpy.asarray", "tensorflow.reduce_sum", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.summary.image", "tensorflow.gradients", "tensorflow.Summary.FromString", "tensorflow.square", "tensorflow.Summary", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.summary.merge_all", "tensorflow.global_norm", "tensorflow.nn.softmax", "tensorflow.train.replica_device_setter", "tensorflow.constant_initializer", "tensorflow.clip_by_global_norm", "tensorflow.variable_scope" ]
a3c.py
[(37, 'collections.namedtuple', 'namedtuple', (['"""Batch"""', "['si', 'a', 'adv', 'r', 'terminal', 'features']"], {}), False, 'from collections import namedtuple\n'), (22, 'numpy.asarray', 'np.asarray', (['rollout.states'], {}), True, 'import numpy as np\n'), (23, 'numpy.asarray', 'np.asarray', (['rollout.actions'], {}), True, 'import numpy as np\n'), (24, 'numpy.asarray', 'np.asarray', (['rollout.rewards'], {}), True, 'import numpy as np\n'), (25, 'numpy.asarray', 'np.asarray', (['(rollout.values + [rollout.r])'], {}), True, 'import numpy as np\n'), (27, 'numpy.asarray', 'np.asarray', (['(rollout.rewards + [rollout.r])'], {}), True, 'import numpy as np\n'), (81, 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), False, 'import threading\n'), (82, 'six.moves.queue.Queue', 'queue.Queue', (['(5)'], {}), True, 'import six.moves.queue as queue\n'), (213, 'tensorflow.device', 'tf.device', (['worker_device'], {}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, env.action_space.n]'], {'name': '"""ac"""'}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""adv"""'}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""r"""'}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['pi.logits'], {}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['pi.logits'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'pi.var_list'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""model/policy_loss"""', '(pi_loss / bs)'], {}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""model/value_loss"""', '(vf_loss / bs)'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""model/entropy"""', '(entropy / bs)'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.summary.image', 'tf.summary.image', (['"""model/state"""', 'pi.x'], {}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', '(40.0)'], {}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.Summary', 'tf.Summary', ([], {}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', (['(1)'], {'worker_device': 'worker_device'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""global"""'], {}), True, 'import tensorflow as tf\n'), (208, 'model.LSTMPolicy', 'LSTMPolicy', (['env.observation_space.shape', 'env.action_space.n'], {}), False, 'from model import LSTMPolicy\n'), (214, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""local"""'], {}), True, 'import tensorflow as tf\n'), (215, 'model.LSTMPolicy', 'LSTMPolicy', (['env.observation_space.shape', 'env.action_space.n'], {}), False, 'from model import LSTMPolicy\n'), (232, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prob_tf * log_prob_tf)'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.global_norm', 'tf.global_norm', (['grads'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.global_norm', 'tf.global_norm', (['pi.var_list'], {}), True, 'import tensorflow as tf\n'), (323, 'tensorflow.Summary.FromString', 'tf.Summary.FromString', (['fetched[0]'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.square', 'tf.square', (['(pi.vf - self.r)'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.shape', 'tf.shape', (['pi.x'], {}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.shape', 'tf.shape', (['pi.x'], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.Summary', 'tf.Summary', ([], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(log_prob_tf * self.ac)', '[1]'], {}), True, 'import tensorflow as tf\n')]
DLPerf/graphics
c42eb846f1a9b2b326c86ec08c2ba10f5903a460
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluator computing metrics over given pairs of predictions and labels.""" import os import pickle from absl import logging import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow_graphics.geometry.representation import grid from tensorflow_graphics.math.interpolation import trilinear from tensorflow_graphics.projects.points_to_3Dobjects.models import centernet_utils from tensorflow_graphics.projects.points_to_3Dobjects.utils import tf_utils from google3.pyglib import gfile from google3.third_party.google_research.google_research.tf3d.object_detection.box_utils import np_box_ops class ShapeAccuracyMetric: """Computes the accuracy of shpe prediction.""" def __init__(self, k=1): self.metric = tf.keras.metrics.SparseTopKCategoricalAccuracy(k) def update(self, sparse_labels, predicted_probabilities, sample_weights=None): self.metric.update_state(sparse_labels, predicted_probabilities, sample_weights) def evaluate(self): return self.metric.result().numpy() def reset(self): self.metric.reset_states() def get_2d_bounding_box_iou(box1, box2): """Compute IoU between two 2D bounding boxes. Args: box1: Input tensor with shape [4] [x_min, y_min, x_max, y_max] box2: Input tensor with shape [4] [x_min, y_min, x_max, y_max] Returns: The intersection over union as a float. """ x_min1, y_min1, x_max1, y_max1 = box1 x_min2, y_min2, x_max2, y_max2 = box2 ma = np.maximum mi = np.minimum intersection = ma(0, mi(x_max1, x_max2) - ma(x_min1, x_min2)) * \ ma(0, mi(y_max1, y_max2) - ma(y_min1, y_min2)) area1 = (x_max1 - x_min1) * (y_max1 - y_min1) area2 = (x_max2 - x_min2) * (y_max2 - y_min2) union = area1 + area2 - intersection print(intersection / union) return intersection / (union + 1e-5) def get_3d_bounding_box_iou(box1, box2): """Computes intersection between two given 3d bounding boxes. Args: box1: Input tensor with shape [B, 7] where the inner dimensions are as follows:[x, y, z, length, width, height, yaw]. box2: Input tensor with shape [B, 7] where the inner dimensions are as follows:[x, y, z, length, width, height, yaw]. Returns: The IoU between the two bounding boxes. """ box1 = box1.numpy() if isinstance(box1, tf.Tensor) else box1 box2 = box2.numpy() if isinstance(box2, tf.Tensor) else box2 box1 = box1.astype(np.float32) box2 = box2.astype(np.float32) # rotates around z, while we rotate around y so need to swap center_1 = tf.reshape(box1[0:3][[0, 2, 1]], [1, 3]) center_2 = tf.reshape(box2[0:3][[0, 2, 1]], [1, 3]) rotation_z_1 = tf.reshape(box1[-1], [1]) rotation_z_2 = tf.reshape(box2[-1], [1]) length_1 = tf.reshape(box1[3 + 0], [1]) height_1 = tf.reshape(box1[3 + 2], [1]) width_1 = tf.reshape(box1[3 + 1], [1]) length_2 = tf.reshape(box2[3 + 0], [1]) height_2 = tf.reshape(box2[3 + 2], [1]) width_2 = tf.reshape(box2[3 + 1], [1]) iou = np.squeeze(np_box_ops.iou3d_7dof_box( length_1, height_1, width_1, center_1, rotation_z_1, length_2, height_2, width_2, center_2, rotation_z_2)) return iou class IoUMetric: """IoU metric.""" def __init__(self, max_num_classes=6, resolution=128, tol=0.05, slave=False, path=None): self.max_num_classes = max_num_classes self.iou_per_class = {i: [] for i in range(self.max_num_classes)} self.resolution = resolution self.slave = slave self.path = path self.tol = tol def update(self, labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses): """Update.""" labeled_rotations = labeled_poses[0] labeled_translations = labeled_poses[1] labeled_sizes = labeled_poses[2] status = True if status: box_limits_x = [100, -100] # box_limits_y = [100, -100] box_limits_z = [100, -100] for i in range(labeled_translations.shape[0]): rot = tf.reshape(tf.gather(labeled_rotations[i], [0, 2, 6, 8]), [2, 2]) min_x = tf.cast(0.0 - labeled_sizes[i][0] / 2.0, dtype=tf.float32) max_x = tf.cast(0.0 + labeled_sizes[i][0] / 2.0, dtype=tf.float32) # min_y = tf.cast(0.0 - labeled_sizes[i][1] / 2.0, dtype=tf.float32) # max_y = tf.cast(0.0 + labeled_sizes[i][1] / 2.0, dtype=tf.float32) min_z = tf.cast(0.0 - labeled_sizes[i][2] / 2.0, dtype=tf.float32) max_z = tf.cast(0.0 + labeled_sizes[i][2] / 2.0, dtype=tf.float32) translation = tf.reshape([labeled_translations[i][0], labeled_translations[i][2]], [2, 1]) pt_0 = rot @ tf.reshape([min_x, min_z], [2, 1]) + translation pt_1 = rot @ tf.reshape([min_x, max_z], [2, 1]) + translation pt_2 = rot @ tf.reshape([max_x, min_z], [2, 1]) + translation pt_3 = rot @ tf.reshape([max_x, max_z], [2, 1]) + translation for pt in [pt_0, pt_1, pt_2, pt_3]: if pt[0] < box_limits_x[0]: box_limits_x[0] = pt[0] if pt[0] > box_limits_x[1]: box_limits_x[1] = pt[0] if pt[1] < box_limits_z[0]: box_limits_z[0] = pt[1] if pt[1] > box_limits_z[1]: box_limits_z[1] = pt[1] mean_x = tf.reduce_mean(box_limits_x) mean_z = tf.reduce_mean(box_limits_z) else: mean_x = tf.reduce_mean(labeled_translations[:, 0]) mean_z = tf.reduce_mean(labeled_translations[:, 2]) samples_world = grid.generate( (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5), [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate( # (box_limits_x[0][0], box_limits_y[0], box_limits_z[0][0]), # (box_limits_x[1][0], box_limits_y[1], box_limits_z[1][0]), # [self.resolution, self.resolution, self.resolution]) # samples_world = grid.generate( # (-5.0, -5.0, -5.0), # (5.0, 5.0, 5.0), # [self.resolution, self.resolution, self.resolution]) samples_world = tf.reshape(samples_world, [-1, 3]) ious = [] status = False if status: _, axs = plt.subplots(labeled_translations.shape[0], 5) fig_obj_count = 0 for class_id in range(self.max_num_classes): # Do the same for the ground truth and predictions sdf_values = tf.zeros_like(samples_world)[:, 0:1] for mtype, (classes, sdfs, poses) in enumerate([ (labeled_classes, labeled_sdfs, labeled_poses), (predicted_classes, predicted_sdfs, predicted_poses)]): for i in range(classes.shape[0]): if class_id == classes[i]: sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = \ (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) sdf_values += tf.math.sign(tf.nn.relu(interpolated + self.tol)) status2 = False if status2: a = 2 values = interpolated inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0]) print(mtype, fig_obj_count, 0) values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1]) print(mtype, fig_obj_count, 1) if mtype == 1: values = sdf_values inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 4].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 4]) print(mtype, fig_obj_count, 2) fig_obj_count += 1 intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1))) union = tf.reduce_sum(tf.math.sign(sdf_values)) iou = intersection / union if not tf.math.is_nan(iou): ious.append(iou) status3 = False if status3: _ = plt.figure(figsize=(5, 5)) plt.clf() # mask = (sdf_values.numpy() > 0)[:, 0] # plt.scatter(samples_world.numpy()[mask, 0], # samples_world.numpy()[mask, 1], # marker='.', c=sdf_values.numpy()[mask, 0]) plt.scatter(samples_world.numpy()[:, 0], samples_world.numpy()[:, 1], marker='.', c=sdf_values.numpy()[:, 0]) plt.colorbar() if not tf.math.is_nan(iou): self.iou_per_class[class_id].append(iou) if ious: ious = [0] return np.mean(ious), np.min(ious) def evaluate(self): """Evaluate.""" if self.slave: data = self.iou_per_class with gfile.Open(self.path, 'wb') as file: pickle.dump(data, file) logging.info(file) return else: iou_per_class_means = [] for _, v in self.iou_per_class.items(): if v: iou_per_class_means.append(np.mean(v)) return np.mean(iou_per_class_means) def reset(self): self.iou_per_class = {i: [] for i in range(self.max_num_classes)} class CollisionMetric: """Collision.""" def __init__(self, max_num_classes=6, resolution=128, tol=0.04, slave=False, path=None): self.max_num_classes = max_num_classes self.collisions = [] self.intersections = [] self.ious = [] self.resolution = resolution self.slave = slave self.path = path self.tol = tol def update(self, labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses): """Update.""" if labeled_sdfs or labeled_classes: print(labeled_sdfs) mean_x = tf.reduce_mean(labeled_poses[1][:, 0]) mean_z = tf.reduce_mean(labeled_poses[1][:, 2]) samples_world = grid.generate( (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5), [self.resolution, self.resolution, self.resolution]) samples_world = tf.reshape(samples_world, [-1, 3]) status = False if status: _, axs = plt.subplots(3, 3) fig_obj_count = 0 # Do the same for the ground truth and predictions num_collisions = 0 prev_intersection = 0 sdf_values = tf.zeros_like(samples_world)[:, 0:1] for classes, sdfs, poses in [(predicted_classes, predicted_sdfs, predicted_poses)]: for i in range(classes.shape[0]): sdf = tf.expand_dims(sdfs[i], -1) sdf = sdf * -1.0 # inside positive, outside zero samples_object = centernet_utils.transform_pointcloud( tf.reshape(samples_world, [1, 1, -1, 3]), tf.reshape(poses[2][i], [1, 1, 3]), tf.reshape(poses[0][i], [1, 1, 3, 3]), tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0 samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5 samples = tf.squeeze(samples_object) interpolated = trilinear.interpolate(sdf, samples) occupancy_value = tf.math.sign(tf.nn.relu(interpolated + self.tol)) sdf_values += occupancy_value intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1))) if intersection > prev_intersection: prev_intersection = intersection num_collisions += 1 status2 = False if status2: a = 1 values = interpolated inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 0].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 0]) values = tf.math.sign(tf.nn.relu(interpolated + self.tol)) inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 1].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 1]) values = sdf_values inter = tf.reshape(values, [self.resolution, self.resolution, self.resolution]) inter = tf.transpose(tf.reduce_max(inter, axis=a)) im = axs[fig_obj_count, 2].matshow(inter.numpy()) plt.colorbar(im, ax=axs[fig_obj_count, 2]) fig_obj_count += 1 intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1))) union = tf.reduce_sum(tf.math.sign(sdf_values)) iou = intersection / union self.collisions.append(num_collisions) self.intersections.append(intersection) self.ious.append(iou) return num_collisions, intersection, iou def evaluate(self): """Evaluate.""" if self.slave: data = {'collisions': self.collisions, 'intersections': self.intersections, 'ious': self.ious} with gfile.Open(self.path, 'wb') as file: pickle.dump(data, file) logging.info(file) return else: # self.collisions = [] # for k, v in self.iou_per_class.items(): # if len(v) > 0: # iou_per_class_means.append(np.mean(v)) return np.sum(self.collisions) def reset(self): self.intersections = [] self.ious = [] self.collisions = [] class BoxIoUMetric: """BoxIOU.""" def __init__(self, t=0.5, threed=False): self.labeled_boxes = {} self.predicted_boxes = {} self.threshold = t self.threed = threed self.get_iou_func = get_2d_bounding_box_iou if self.threed: self.get_iou_func = get_3d_bounding_box_iou def update(self, scene_id, labeled_boxes, labeled_classes, predicted_boxes, predicted_classes, confidences): """For one scene, provide all ground-truth and all predicted detections.""" self.labeled_boxes[scene_id] = (labeled_boxes, labeled_classes) self.predicted_boxes[scene_id] = (predicted_boxes, predicted_classes, confidences) def evaluate(self): """Eval.""" predictions_per_class = {} # map {classname: pred} labels_per_class = {} # map {classname: gt} for scene_id in self.predicted_boxes: bboxes, classnames, scores = self.predicted_boxes[scene_id] classnames = classnames.numpy() bboxes = bboxes.numpy() scores = scores.numpy() for i in range(classnames.shape[0]): classname = classnames[i] bbox = bboxes[i] score = scores[i] # for classname, bbox, score in self.predicted_boxes[scene_id]: if classname not in predictions_per_class: predictions_per_class[classname] = {} if scene_id not in predictions_per_class[classname]: predictions_per_class[classname][scene_id] = [] if classname not in labels_per_class: labels_per_class[classname] = {} if scene_id not in labels_per_class[classname]: labels_per_class[classname][scene_id] = [] predictions_per_class[classname][scene_id].append((bbox, score)) for scene_id in self.labeled_boxes: bboxes, classnames = self.labeled_boxes[scene_id] classnames = classnames.numpy() bboxes = bboxes.numpy() for i in range(classnames.shape[0]): classname = classnames[i] bbox = bboxes[i] if classname not in labels_per_class: labels_per_class[classname] = {} if scene_id not in labels_per_class[classname]: labels_per_class[classname][scene_id] = [] labels_per_class[classname][scene_id].append(bbox) recall_per_class = {} precision_per_class = {} ap_per_class = {} for classname in labels_per_class: print('Computing AP for class: ', classname) if classname in predictions_per_class: recall, precision, ap = self._eval_detections_per_class( # this does not work when class was never predicted predictions_per_class[classname], labels_per_class[classname], self.threshold) else: recall, precision, ap = 0.0, 0.0, 0.0 recall_per_class[classname] = recall precision_per_class[classname] = precision ap_per_class[classname] = ap print(classname, ap) # return recall_per_class, precision_per_class, ap_per_class mean = np.mean(np.array([v for k, v in ap_per_class.items()])) print(mean) return mean def _get_iou_main(self, get_iou_func, args): return get_iou_func(*args) def _voc_ap(self, rec, prec): """Compute VOC AP given precision and recall.""" mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec return np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) def _eval_detections_per_class(self, pred, gt, ovthresh=0.25): """Generic functions to compute precision/recall for object detection.""" # construct gt objects class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}} npos = 0 for img_id in gt.keys(): bbox = np.array(gt[img_id]) det = [False] * len(bbox) npos += len(bbox) class_recs[img_id] = {'bbox': bbox, 'det': det} # pad empty list to all other imgids for img_id in pred: if img_id not in gt: class_recs[img_id] = {'bbox': np.array([]), 'det': []} # construct dets image_ids = [] confidence = [] bb = [] for img_id in pred: for box, score in pred[img_id]: image_ids.append(img_id) confidence.append(score) bb.append(box) confidence = np.array(confidence) bb = np.array(bb) # (nd,4 or 8,3 or 6) # sort by confidence sorted_ind = np.argsort(-confidence) bb = bb[sorted_ind, ...] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): r = class_recs[image_ids[d]] bb = bb[d, ...].astype(float) ovmax = -np.inf bbgt = r['bbox'].astype(float) if bbgt.size > 0: # compute overlaps for j in range(bbgt.shape[0]): iou = self._get_iou_main(self.get_iou_func, (bb, bbgt[j, ...])) if iou > ovmax: ovmax = iou jmax = j if ovmax > ovthresh: if not r['det'][jmax]: tp[d] = 1. r['det'][jmax] = 1 else: fp[d] = 1. else: fp[d] = 1. # compute precision recall fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos + 1e-5) # avoid divide by zero in case the first detection matches a difficult # ground truth prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = self._voc_ap(rec, prec) return rec, prec, ap def reset(self): self.labeled_boxes = {} self.predicted_boxes = {} class Evaluator: """Evaluator for specified metrics.""" def __init__(self, metrics, split, shapenet_dir): self.metrics = metrics self.split = split self.shapenet_dir = shapenet_dir def add_detections(self, sample, detections): """Add detections to evaluation. Args: sample: the ground truth information detections: the predicted detections Returns: dict of intermediate results. """ result_dict = {'iou_mean': -1, 'iou_min': -1, 'collisions': 0, 'collision_intersection': 0, 'collision_iou': 0} num_boxes = sample['num_boxes'].numpy() labeled_boxes_init = tf.gather( sample['groundtruth_boxes'], axis=1, indices=[1, 0, 3, 2]) * 256.0 for _, metric in self.metrics.items(): if isinstance(metric, ShapeAccuracyMetric): labels = sample['shapes'] weights = tf.math.sign(labels + 1) # -1 is mapped to zero, else 1 metric.update(labels, detections['shapes_logits'], weights) elif isinstance(metric, BoxIoUMetric): scene_id = str(sample['scene_filename'].numpy(), 'utf-8') # Get ground truth boxes labeled_boxes = labeled_boxes_init if metric.threed: rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix( tf.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) labeled_boxes = tf.concat([sample['translations_3d'], sample['sizes_3d'], rotations_y], axis=1) # Get predicted boxes predicted_boxes = detections['detection_boxes'] if metric.threed: rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix( tf.reshape(detections['rotations_3d'][i], [3, 3]), 1) for i in range(num_boxes)], axis=0) rotations_y = tf.reshape(rotations_y, [-1, 1]) predicted_boxes = tf.concat([detections['translations_3d'], detections['sizes_3d'], rotations_y], axis=1) labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64) predicted_classes = tf.cast(detections['detection_classes'], tf.int64) confidences = detections['detection_scores'] metric.update(scene_id, labeled_boxes, labeled_classes, predicted_boxes, predicted_classes, confidences) elif isinstance(metric, IoUMetric): classes = sample['classes'] mesh_names = sample['mesh_names'] labeled_sdfs = [] for i in range(num_boxes): class_id = str(classes[i].numpy()).zfill(8) model_name = str(mesh_names[i].numpy(), 'utf-8') path_prefix = os.path.join(self.shapenet_dir, class_id, model_name) file_sdf = os.path.join(path_prefix, 'model_normalized_sdf.npy') with gfile.Open(file_sdf, 'rb') as f: labeled_sdfs.append(tf.expand_dims(np.load(f).astype(np.float32), 0)) labeled_sdfs = tf.concat(labeled_sdfs, axis=0) labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64) labeled_permutation = np.argsort(labeled_classes) labeled_sdfs = labeled_sdfs.numpy()[labeled_permutation] labeled_classes = labeled_classes.numpy()[labeled_permutation] labeled_rotations_3d = sample['rotations_3d'].numpy() labeled_rotations_3d = labeled_rotations_3d[labeled_permutation] labeled_translations_3d = sample['translations_3d'].numpy() labeled_translations_3d = labeled_translations_3d[labeled_permutation] labeled_sizes_3d = sample['sizes_3d'].numpy()[labeled_permutation] labeled_poses = (labeled_rotations_3d, labeled_translations_3d, labeled_sizes_3d) # Predictions predicted_classes = tf.cast(detections['detection_classes'], tf.int64) predicted_permutation = np.argsort(predicted_classes) predicted_classes = predicted_classes.numpy()[predicted_permutation] predicted_sdfs = \ detections['predicted_sdfs'].numpy()[predicted_permutation] predicted_rotations_3d = \ detections['rotations_3d'].numpy()[predicted_permutation] predicted_translations_3d = \ detections['translations_3d'].numpy()[predicted_permutation] predicted_sizes_3d = \ detections['sizes_3d'].numpy()[predicted_permutation] predicted_poses = (predicted_rotations_3d, predicted_translations_3d, predicted_sizes_3d) full_oracle = False if full_oracle: predicted_sdfs = detections['groundtruth_sdfs'].numpy() predicted_sdfs = predicted_sdfs[labeled_permutation] predicted_classes = labeled_classes predicted_poses = labeled_poses print('----------------------------') print(predicted_sdfs.shape) print(predicted_classes.shape) print(predicted_poses[0].shape) print(predicted_poses[1].shape) print(predicted_poses[2].shape) pose_oracle = False if pose_oracle: predicted_sdfs = detections['predicted_sdfs'].numpy() predicted_sdfs = predicted_sdfs[predicted_permutation] predicted_poses = (labeled_rotations_3d, labeled_translations_3d, labeled_sizes_3d) class_oracle = True if class_oracle: predicted_classes *= 0 labeled_classes *= 0 iou_mean, iou_min = metric.update( labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses, sample['dot']) result_dict['iou_mean'] = iou_mean result_dict['iou_min'] = iou_min elif isinstance(metric, CollisionMetric): labeled_sdfs = detections['groundtruth_sdfs'] labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64) labeled_poses = (sample['rotations_3d'], sample['translations_3d'], sample['sizes_3d']) predicted_classes = tf.cast(detections['detection_classes'], tf.int64) predicted_sdfs = detections['predicted_sdfs'] predicted_poses = (detections['rotations_3d'], detections['translations_3d'], detections['sizes_3d']) full_oracle = False if full_oracle: predicted_sdfs = detections['groundtruth_sdfs'].numpy() predicted_classes = labeled_classes predicted_poses = labeled_poses num_collisions, intersection, iou = metric.update( labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs, predicted_classes, predicted_poses) result_dict['collisions'] = num_collisions result_dict['collision_intersection'] = intersection result_dict['collision_iou'] = iou return result_dict def evaluate(self): """Runs metrics over provided pairs and returns metric dict.""" metrics = {} for name, metric in self.metrics.items(): metrics[name] = metric.evaluate() return metrics def reset_metrics(self): for _, metric in self.metrics.items(): metric.reset()
[ "tensorflow.concat", "tensorflow.math.sign", "tensorflow.cast", "numpy.cumsum", "numpy.concatenate", "numpy.mean", "numpy.where", "tensorflow.squeeze", "numpy.finfo", "tensorflow.gather", "numpy.load", "numpy.zeros", "matplotlib.pyplot.figure", "tensorflow.keras.metrics.SparseTopKCategoricalAccuracy", "numpy.min", "tensorflow.zeros_like", "numpy.argsort", "numpy.array", "numpy.sum", "tensorflow.nn.relu", "tensorflow.reduce_max", "numpy.maximum", "tensorflow.reduce_mean", "tensorflow.reshape", "matplotlib.pyplot.subplots", "tensorflow.math.is_nan", "tensorflow.expand_dims", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.clf" ]
tensorflow_graphics/projects/points_to_3Dobjects/utils/evaluator.py
[(89, 'tensorflow.reshape', 'tf.reshape', (['box1[0:3][[0, 2, 1]]', '[1, 3]'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.reshape', 'tf.reshape', (['box2[0:3][[0, 2, 1]]', '[1, 3]'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.reshape', 'tf.reshape', (['box1[-1]', '[1]'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.reshape', 'tf.reshape', (['box2[-1]', '[1]'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.reshape', 'tf.reshape', (['box1[3 + 0]', '[1]'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.reshape', 'tf.reshape', (['box1[3 + 2]', '[1]'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.reshape', 'tf.reshape', (['box1[3 + 1]', '[1]'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.reshape', 'tf.reshape', (['box2[3 + 0]', '[1]'], {}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.reshape', 'tf.reshape', (['box2[3 + 2]', '[1]'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.reshape', 'tf.reshape', (['box2[3 + 1]', '[1]'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.keras.metrics.SparseTopKCategoricalAccuracy', 'tf.keras.metrics.SparseTopKCategoricalAccuracy', (['k'], {}), True, 'import tensorflow as tf\n'), (103, 'google3.third_party.google_research.google_research.tf3d.object_detection.box_utils.np_box_ops.iou3d_7dof_box', 'np_box_ops.iou3d_7dof_box', (['length_1', 'height_1', 'width_1', 'center_1', 'rotation_z_1', 'length_2', 'height_2', 'width_2', 'center_2', 'rotation_z_2'], {}), False, 'from google3.third_party.google_research.google_research.tf3d.object_detection.box_utils import np_box_ops\n'), (169, 'tensorflow_graphics.geometry.representation.grid.generate', 'grid.generate', (['(mean_x - 0.5, 0.0, mean_z - 0.5)', '(mean_x + 0.5, 1.0, mean_z + 0.5)', '[self.resolution, self.resolution, self.resolution]'], {}), False, 'from tensorflow_graphics.geometry.representation import grid\n'), (180, 'tensorflow.reshape', 'tf.reshape', (['samples_world', '[-1, 3]'], {}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['labeled_poses[1][:, (0)]'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['labeled_poses[1][:, (2)]'], {}), True, 'import tensorflow as tf\n'), (304, 'tensorflow_graphics.geometry.representation.grid.generate', 'grid.generate', (['(mean_x - 0.5, 0.0, mean_z - 0.5)', '(mean_x + 0.5, 1.0, mean_z + 0.5)', '[self.resolution, self.resolution, self.resolution]'], {}), False, 'from tensorflow_graphics.geometry.representation import grid\n'), (307, 'tensorflow.reshape', 'tf.reshape', (['samples_world', '[-1, 3]'], {}), True, 'import tensorflow as tf\n'), (482, 'numpy.concatenate', 'np.concatenate', (['([0.0], rec, [1.0])'], {}), True, 'import numpy as np\n'), (483, 'numpy.concatenate', 'np.concatenate', (['([0.0], prec, [0.0])'], {}), True, 'import numpy as np\n'), (491, 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), True, 'import numpy as np\n'), (518, 'numpy.array', 'np.array', (['confidence'], {}), True, 'import numpy as np\n'), (519, 'numpy.array', 'np.array', (['bb'], {}), True, 'import numpy as np\n'), (522, 'numpy.argsort', 'np.argsort', (['(-confidence)'], {}), True, 'import numpy as np\n'), (528, 'numpy.zeros', 'np.zeros', (['nd'], {}), True, 'import numpy as np\n'), (529, 'numpy.zeros', 'np.zeros', (['nd'], {}), True, 'import numpy as np\n'), (554, 'numpy.cumsum', 'np.cumsum', (['fp'], {}), True, 'import numpy as np\n'), (555, 'numpy.cumsum', 'np.cumsum', (['tp'], {}), True, 'import numpy as np\n'), (164, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['box_limits_x'], {}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['box_limits_z'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['labeled_translations[:, (0)]'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['labeled_translations[:, (2)]'], {}), True, 'import tensorflow as tf\n'), (185, 'matplotlib.pyplot.subplots', 'plt.subplots', (['labeled_translations.shape[0]', '(5)'], {}), True, 'import matplotlib.pyplot as plt\n'), (262, 'numpy.mean', 'np.mean', (['ious'], {}), True, 'import numpy as np\n'), (262, 'numpy.min', 'np.min', (['ious'], {}), True, 'import numpy as np\n'), (270, 'absl.logging.info', 'logging.info', (['file'], {}), False, 'from absl import logging\n'), (277, 'numpy.mean', 'np.mean', (['iou_per_class_means'], {}), True, 'import numpy as np\n'), (311, 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {}), True, 'import matplotlib.pyplot as plt\n'), (317, 'tensorflow.zeros_like', 'tf.zeros_like', (['samples_world'], {}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.math.sign', 'tf.math.sign', (['sdf_values'], {}), True, 'import tensorflow as tf\n'), (383, 'absl.logging.info', 'logging.info', (['file'], {}), False, 'from absl import logging\n'), (390, 'numpy.sum', 'np.sum', (['self.collisions'], {}), True, 'import numpy as np\n'), (486, 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), True, 'import numpy as np\n'), (489, 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), True, 'import numpy as np\n'), (500, 'numpy.array', 'np.array', (['gt[img_id]'], {}), True, 'import numpy as np\n'), (591, 'tensorflow.gather', 'tf.gather', (["sample['groundtruth_boxes']"], {'axis': '(1)', 'indices': '[1, 0, 3, 2]'}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.cast', 'tf.cast', (['(0.0 - labeled_sizes[i][0] / 2.0)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.cast', 'tf.cast', (['(0.0 + labeled_sizes[i][0] / 2.0)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.cast', 'tf.cast', (['(0.0 - labeled_sizes[i][2] / 2.0)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.cast', 'tf.cast', (['(0.0 + labeled_sizes[i][2] / 2.0)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.reshape', 'tf.reshape', (['[labeled_translations[i][0], labeled_translations[i][2]]', '[2, 1]'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.zeros_like', 'tf.zeros_like', (['samples_world'], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.math.sign', 'tf.math.sign', (['sdf_values'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.math.is_nan', 'tf.math.is_nan', (['iou'], {}), True, 'import tensorflow as tf\n'), (247, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), True, 'import matplotlib.pyplot as plt\n'), (248, 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (257, 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (258, 'tensorflow.math.is_nan', 'tf.math.is_nan', (['iou'], {}), True, 'import tensorflow as tf\n'), (268, 'google3.pyglib.gfile.Open', 'gfile.Open', (['self.path', '"""wb"""'], {}), False, 'from google3.pyglib import gfile\n'), (269, 'pickle.dump', 'pickle.dump', (['data', 'file'], {}), False, 'import pickle\n'), (322, 'tensorflow.expand_dims', 'tf.expand_dims', (['sdfs[i]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (330, 'tensorflow.squeeze', 'tf.squeeze', (['samples_object'], {}), True, 'import tensorflow as tf\n'), (331, 'tensorflow_graphics.math.interpolation.trilinear.interpolate', 'trilinear.interpolate', (['sdf', 'samples'], {}), False, 'from tensorflow_graphics.math.interpolation import trilinear\n'), (367, 'tensorflow.nn.relu', 'tf.nn.relu', (['(sdf_values - 1)'], {}), True, 'import tensorflow as tf\n'), (381, 'google3.pyglib.gfile.Open', 'gfile.Open', (['self.path', '"""wb"""'], {}), False, 'from google3.pyglib import gfile\n'), (382, 'pickle.dump', 'pickle.dump', (['data', 'file'], {}), False, 'import pickle\n'), (597, 'tensorflow.math.sign', 'tf.math.sign', (['(labels + 1)'], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.gather', 'tf.gather', (['labeled_rotations[i]', '[0, 2, 6, 8]'], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.nn.relu', 'tf.nn.relu', (['(sdf_values - 1)'], {}), True, 'import tensorflow as tf\n'), (332, 'tensorflow.nn.relu', 'tf.nn.relu', (['(interpolated + self.tol)'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.reshape', 'tf.reshape', (['values', '[self.resolution, self.resolution, self.resolution]'], {}), True, 'import tensorflow as tf\n'), (347, 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'axs[fig_obj_count, 0]'}), True, 'import matplotlib.pyplot as plt\n'), (350, 'tensorflow.reshape', 'tf.reshape', (['values', '[self.resolution, self.resolution, self.resolution]'], {}), True, 'import tensorflow as tf\n'), (355, 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'axs[fig_obj_count, 1]'}), True, 'import matplotlib.pyplot as plt\n'), (358, 'tensorflow.reshape', 'tf.reshape', (['values', '[self.resolution, self.resolution, self.resolution]'], {}), True, 'import tensorflow as tf\n'), (363, 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'axs[fig_obj_count, 2]'}), True, 'import matplotlib.pyplot as plt\n'), (507, 'numpy.array', 'np.array', (['[]'], {}), True, 'import numpy as np\n'), (559, 'numpy.finfo', 'np.finfo', (['np.float64'], {}), True, 'import numpy as np\n'), (624, 'tensorflow.cast', 'tf.cast', (["sample['groundtruth_valid_classes']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (625, 'tensorflow.cast', 'tf.cast', (["detections['detection_classes']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.reshape', 'tf.reshape', (['[min_x, min_z]', '[2, 1]'], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.reshape', 'tf.reshape', (['[min_x, max_z]', '[2, 1]'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.reshape', 'tf.reshape', (['[max_x, min_z]', '[2, 1]'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.reshape', 'tf.reshape', (['[max_x, max_z]', '[2, 1]'], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.expand_dims', 'tf.expand_dims', (['sdfs[i]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.squeeze', 'tf.squeeze', (['samples_object'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow_graphics.math.interpolation.trilinear.interpolate', 'trilinear.interpolate', (['sdf', 'samples'], {}), False, 'from tensorflow_graphics.math.interpolation import trilinear\n'), (276, 'numpy.mean', 'np.mean', (['v'], {}), True, 'import numpy as np\n'), (325, 'tensorflow.reshape', 'tf.reshape', (['samples_world', '[1, 1, -1, 3]'], {}), True, 'import tensorflow as tf\n'), (326, 'tensorflow.reshape', 'tf.reshape', (['poses[2][i]', '[1, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.reshape', 'tf.reshape', (['poses[0][i]', '[1, 1, 3, 3]'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.reshape', 'tf.reshape', (['poses[1][i]', '[1, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.nn.relu', 'tf.nn.relu', (['(sdf_values - 1)'], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.reduce_max', 'tf.reduce_max', (['inter'], {'axis': 'a'}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.nn.relu', 'tf.nn.relu', (['(interpolated + self.tol)'], {}), True, 'import tensorflow as tf\n'), (353, 'tensorflow.reduce_max', 'tf.reduce_max', (['inter'], {'axis': 'a'}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.reduce_max', 'tf.reduce_max', (['inter'], {'axis': 'a'}), True, 'import tensorflow as tf\n'), (608, 'tensorflow.reshape', 'tf.reshape', (['rotations_y', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (609, 'tensorflow.concat', 'tf.concat', (["[sample['translations_3d'], sample['sizes_3d'], rotations_y]"], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (619, 'tensorflow.reshape', 'tf.reshape', (['rotations_y', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (620, 'tensorflow.concat', 'tf.concat', (["[detections['translations_3d'], detections['sizes_3d'], rotations_y]"], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (641, 'tensorflow.concat', 'tf.concat', (['labeled_sdfs'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (643, 'tensorflow.cast', 'tf.cast', (["sample['groundtruth_valid_classes']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (644, 'numpy.argsort', 'np.argsort', (['labeled_classes'], {}), True, 'import numpy as np\n'), (657, 'tensorflow.cast', 'tf.cast', (["detections['detection_classes']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (658, 'numpy.argsort', 'np.argsort', (['predicted_classes'], {}), True, 'import numpy as np\n'), (207, 'tensorflow.nn.relu', 'tf.nn.relu', (['(interpolated + self.tol)'], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.reshape', 'tf.reshape', (['values', '[self.resolution, self.resolution, self.resolution]'], {}), True, 'import tensorflow as tf\n'), (217, 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'axs[fig_obj_count, mtype * 2 + 0]'}), True, 'import matplotlib.pyplot as plt\n'), (221, 'tensorflow.reshape', 'tf.reshape', (['values', '[self.resolution, self.resolution, self.resolution]'], {}), True, 'import tensorflow as tf\n'), (226, 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'axs[fig_obj_count, mtype * 2 + 1]'}), True, 'import matplotlib.pyplot as plt\n'), (636, 'os.path.join', 'os.path.join', (['self.shapenet_dir', 'class_id', 'model_name'], {}), False, 'import os\n'), (637, 'os.path.join', 'os.path.join', (['path_prefix', '"""model_normalized_sdf.npy"""'], {}), False, 'import os\n'), (706, 'tensorflow.cast', 'tf.cast', (["sample['groundtruth_valid_classes']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (711, 'tensorflow.cast', 'tf.cast', (["detections['detection_classes']", 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.reshape', 'tf.reshape', (['samples_world', '[1, 1, -1, 3]'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.reshape', 'tf.reshape', (['poses[2][i]', '[1, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.reshape', 'tf.reshape', (['poses[0][i]', '[1, 1, 3, 3]'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.reshape', 'tf.reshape', (['poses[1][i]', '[1, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.reduce_max', 'tf.reduce_max', (['inter'], {'axis': 'a'}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.nn.relu', 'tf.nn.relu', (['(interpolated + self.tol)'], {}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.reduce_max', 'tf.reduce_max', (['inter'], {'axis': 'a'}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.reshape', 'tf.reshape', (['values', '[self.resolution, self.resolution, self.resolution]'], {}), True, 'import tensorflow as tf\n'), (236, 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'axs[fig_obj_count, 4]'}), True, 'import matplotlib.pyplot as plt\n'), (638, 'google3.pyglib.gfile.Open', 'gfile.Open', (['file_sdf', '"""rb"""'], {}), False, 'from google3.pyglib import gfile\n'), (234, 'tensorflow.reduce_max', 'tf.reduce_max', (['inter'], {'axis': 'a'}), True, 'import tensorflow as tf\n'), (606, 'tensorflow.reshape', 'tf.reshape', (["detections['rotations_3d'][i]", '[3, 3]'], {}), True, 'import tensorflow as tf\n'), (617, 'tensorflow.reshape', 'tf.reshape', (["detections['rotations_3d'][i]", '[3, 3]'], {}), True, 'import tensorflow as tf\n'), (639, 'numpy.load', 'np.load', (['f'], {}), True, 'import numpy as np\n')]
qimingj/tensor2tensor
a6df48799dc93176df94c36d3a1aea75caa7c594
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic models for testing simple tasks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensor2tensor.layers import common_attention from tensor2tensor.layers import common_layers from tensor2tensor.layers import common_video from tensor2tensor.models.video import base_vae from tensor2tensor.models.video import basic_deterministic from tensor2tensor.models.video import basic_deterministic_params from tensor2tensor.utils import registry import tensorflow as tf @registry.register_model class NextFrameBasicStochastic( basic_deterministic.NextFrameBasicDeterministic, base_vae.NextFrameBaseVae): """Stochastic version of basic next-frame model.""" def inject_latent(self, layer, features, filters): """Inject a VAE-style latent.""" # Latent for stochastic model input_frames = tf.to_float(features["inputs_raw"]) target_frames = tf.to_float(features["targets_raw"]) full_video = tf.concat([input_frames, target_frames], axis=1) latent_mean, latent_std = self.construct_latent_tower( full_video, time_axis=1) latent = common_video.get_gaussian_tensor(latent_mean, latent_std) latent = tf.layers.flatten(latent) latent = tf.expand_dims(latent, axis=1) latent = tf.expand_dims(latent, axis=1) latent_mask = tf.layers.dense(latent, filters, name="latent_mask") zeros_mask = tf.zeros( common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32) layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1) extra_loss = self.get_extra_loss(latent_mean, latent_std) return layer, extra_loss @registry.register_model class NextFrameBasicStochasticDiscrete( basic_deterministic.NextFrameBasicDeterministic): """Basic next-frame model with a tiny discrete latent.""" def inject_latent(self, layer, features, filters): """Inject a deterministic latent based on the target frame.""" del filters hparams = self.hparams final_filters = common_layers.shape_list(layer)[-1] filters = hparams.hidden_size kernel = (4, 4) if hparams.mode == tf.estimator.ModeKeys.PREDICT: layer_shape = common_layers.shape_list(layer) if hparams.full_latent_tower: rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) else: rand = tf.random_uniform(layer_shape[:-3] + [ 1, 1, hparams.bottleneck_bits]) d = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 z = tf.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 # Embed. x = tf.layers.dense( features["cur_target_frame"], filters, name="latent_embed", bias_initializer=tf.random_normal_initializer(stddev=0.01)) x = common_attention.add_timing_signal_nd(x) if hparams.full_latent_tower: for i in range(hparams.num_compress_steps): with tf.variable_scope("latent_downstride%d" % i): x = common_layers.make_even_size(x) if i < hparams.filter_double_steps: filters *= 2 x = common_attention.add_timing_signal_nd(x) x = tf.layers.conv2d(x, filters, kernel, activation=common_layers.belu, strides=(2, 2), padding="SAME") x = common_layers.layer_norm(x) else: x = common_layers.double_discriminator(x) x = tf.expand_dims(tf.expand_dims(x, axis=1), axis=1) x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck")) d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) if hparams.mode == tf.estimator.ModeKeys.TRAIN: noise = tf.random_uniform(common_layers.shape_list(x)) noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0 d *= noise z = tf.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 @registry.register_hparams def next_frame_basic_stochastic(): """Basic 2-frame conv model with stochastic tower.""" hparams = basic_deterministic_params.next_frame_basic_deterministic() hparams.stochastic_model = True hparams.add_hparam("latent_channels", 1) hparams.add_hparam("latent_std_min", -5.0) hparams.add_hparam("num_iterations_1st_stage", 25000) hparams.add_hparam("num_iterations_2nd_stage", 25000) hparams.add_hparam("latent_loss_multiplier", 1e-3) hparams.add_hparam("latent_loss_multiplier_dynamic", False) hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) hparams.add_hparam("latent_loss_multiplier_schedule", "constant") hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. hparams.add_hparam("anneal_end", 100000) hparams.add_hparam("information_capacity", 0.0) return hparams @registry.register_hparams def next_frame_basic_stochastic_discrete(): """Basic 2-frame conv model with stochastic discrete latent.""" hparams = basic_deterministic_params.next_frame_sampling() hparams.add_hparam("bottleneck_bits", 16) hparams.add_hparam("bottleneck_noise", 0.02) hparams.add_hparam("full_latent_tower", False) return hparams
[ "tensorflow.layers.flatten", "tensorflow.layers.conv2d", "tensorflow.concat", "tensorflow.less", "tensorflow.expand_dims", "tensorflow.layers.dense", "tensorflow.to_float", "tensorflow.variable_scope", "tensorflow.random_normal_initializer", "tensorflow.random_uniform" ]
tensor2tensor/models/video/basic_stochastic.py
[(118, 'tensor2tensor.models.video.basic_deterministic_params.next_frame_basic_deterministic', 'basic_deterministic_params.next_frame_basic_deterministic', ([], {}), False, 'from tensor2tensor.models.video import basic_deterministic_params\n'), (138, 'tensor2tensor.models.video.basic_deterministic_params.next_frame_sampling', 'basic_deterministic_params.next_frame_sampling', ([], {}), False, 'from tensor2tensor.models.video import basic_deterministic_params\n'), (43, 'tensorflow.to_float', 'tf.to_float', (["features['inputs_raw']"], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.to_float', 'tf.to_float', (["features['targets_raw']"], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.concat', 'tf.concat', (['[input_frames, target_frames]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (48, 'tensor2tensor.layers.common_video.get_gaussian_tensor', 'common_video.get_gaussian_tensor', (['latent_mean', 'latent_std'], {}), False, 'from tensor2tensor.layers import common_video\n'), (49, 'tensorflow.layers.flatten', 'tf.layers.flatten', (['latent'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.expand_dims', 'tf.expand_dims', (['latent'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.expand_dims', 'tf.expand_dims', (['latent'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.layers.dense', 'tf.layers.dense', (['latent', 'filters'], {'name': '"""latent_mask"""'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.concat', 'tf.concat', (['[layer, latent_mask + zeros_mask]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (88, 'tensor2tensor.layers.common_attention.add_timing_signal_nd', 'common_attention.add_timing_signal_nd', (['x'], {}), False, 'from tensor2tensor.layers import common_attention\n'), (111, 'tensorflow.layers.dense', 'tf.layers.dense', (['d', 'final_filters'], {'name': '"""unbottleneck"""'}), True, 'import tensorflow as tf\n'), (69, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['layer'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (74, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['layer'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (81, 'tensorflow.layers.dense', 'tf.layers.dense', (['d', 'final_filters'], {'name': '"""unbottleneck"""'}), True, 'import tensorflow as tf\n'), (102, 'tensor2tensor.layers.common_layers.double_discriminator', 'common_layers.double_discriminator', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (104, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'hparams.bottleneck_bits'], {'name': '"""bottleneck"""'}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.random_uniform', 'tf.random_uniform', (['(layer_shape[:-1] + [hparams.bottleneck_bits])'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.random_uniform', 'tf.random_uniform', (['(layer_shape[:-3] + [1, 1, hparams.bottleneck_bits])'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.expand_dims', 'tf.expand_dims', (['x'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (107, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (54, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['layer'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (92, 'tensorflow.variable_scope', 'tf.variable_scope', (["('latent_downstride%d' % i)"], {}), True, 'import tensorflow as tf\n'), (93, 'tensor2tensor.layers.common_layers.make_even_size', 'common_layers.make_even_size', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (96, 'tensor2tensor.layers.common_attention.add_timing_signal_nd', 'common_attention.add_timing_signal_nd', (['x'], {}), False, 'from tensor2tensor.layers import common_attention\n'), (97, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', 'filters', 'kernel'], {'activation': 'common_layers.belu', 'strides': '(2, 2)', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (100, 'tensor2tensor.layers.common_layers.layer_norm', 'common_layers.layer_norm', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (80, 'tensorflow.less', 'tf.less', (['(0.5)', 'rand'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.less', 'tf.less', (['hparams.bottleneck_noise', 'noise'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.less', 'tf.less', (['(0.0)', 'x'], {}), True, 'import tensorflow as tf\n')]
cankunqiu/tensorlayer2
423283ef96d6db485e431d01e360535d1803f34d
#! /usr/bin/python # -*- coding: utf-8 -*- import tensorflow as tf from tensorlayer.layers.core import Layer from tensorlayer import logging from tensorlayer.decorators import deprecated_alias from tensorlayer.decorators import private_method __all__ = [ 'SubpixelConv1d', 'SubpixelConv2d', ] class SubpixelConv2d(Layer): """It is a 2D sub-pixel up-sampling layer, usually be used for Super-Resolution applications, see `SRGAN <https://github.com/tensorlayer/srgan/>`__ for example. Parameters ------------ scale : int The up-scaling ratio, a wrong setting will lead to dimension size error. n_out_channel : int or None The number of output channels. - If None, automatically set n_out_channel == the number of input channels / (scale x scale). - The number of input channels == (scale x scale) x The number of output channels. act : activation function The activation function of this layer. name : str A unique layer name. Examples --------- >>> # examples here just want to tell you how to set the n_out_channel. >>> import numpy as np >>> import tensorflow as tf >>> import tensorlayer as tl >>> x = np.random.rand(2, 16, 16, 4) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4), name="X") >>> net = tl.layers.InputLayer(X, name='input') >>> net = tl.layers.SubpixelConv2d(net, scale=2, n_out_channel=1, name='subpixel_conv2d') >>> sess = tf.Session() >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) (2, 16, 16, 4) (2, 32, 32, 1) >>> x = np.random.rand(2, 16, 16, 4*10) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4*10), name="X") >>> net = tl.layers.InputLayer(X, name='input2') >>> net = tl.layers.SubpixelConv2d(net, scale=2, n_out_channel=10, name='subpixel_conv2d2') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) (2, 16, 16, 40) (2, 32, 32, 10) >>> x = np.random.rand(2, 16, 16, 25*10) >>> X = tf.placeholder("float32", shape=(2, 16, 16, 25*10), name="X") >>> net = tl.layers.InputLayer(X, name='input3') >>> net = tl.layers.SubpixelConv2d(net, scale=5, n_out_channel=None, name='subpixel_conv2d3') >>> y = sess.run(net.outputs, feed_dict={X: x}) >>> print(x.shape, y.shape) (2, 16, 16, 250) (2, 80, 80, 10) References ------------ - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/pdf/1609.05158.pdf>`__ """ # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py def __init__(self, scale=2, n_out_channel=None, act=None, name=None):#'subpixel_conv2d'): # super(SubpixelConv2d, self).__init__(prev_layer=prev_layer, act=act, name=name) super().__init__(name) self.scale = scale self.n_out_channel = n_out_channel self.act = act if n_out_channel is None: if int(self.inputs.get_shape()[-1]) / (scale**2) % 1 != 0: raise Exception( "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" ) n_out_channel = int(int(self.inputs.get_shape()[-1]) / (scale**2)) logging.info( "SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (self.name, scale, n_out_channel, self.act.__name__ if self.act is not None else 'No Activation') ) def build(self, inputs): pass def forward(self, inputs): """ prev_layer : :class:`Layer` Previous layer, """ # with tf.variable_scope(name): # self.outputs = self._apply_activation(self._PS(self.inputs, r=scale, n_out_channels=n_out_channel)) outputs = self.act(self._PS(inputs, r=self.scale, n_out_channels=self.n_out_channel)) return outputs @private_method def _PS(self, X, r, n_out_channels): _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" if n_out_channels >= 1: if int(X.get_shape()[-1]) != (r**2) * n_out_channels: raise Exception(_err_log) # bsize, a, b, c = X.get_shape().as_list() # bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim # Xs=tf.split(X,r,3) #b*h*w*r*r # Xr=tf.concat(Xs,2) #b*h*(r*w)*r # X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c X = tf.depth_to_space(X, r) else: raise RuntimeError(_err_log) return X class SubpixelConv1d(Layer): """It is a 1D sub-pixel up-sampling layer. Calls a TensorFlow function that directly implements this functionality. We assume input has dim (batch, width, r) Parameters ------------ scale : int The up-scaling ratio, a wrong setting will lead to Dimension size error. act : activation function The activation function of this layer. name : str A unique layer name. Examples ---------- >>> import tensorflow as tf >>> import tensorlayer as tl >>> t_signal = tf.placeholder('float32', [10, 100, 4], name='x') >>> n = tl.layers.InputLayer(t_signal, name='in') >>> n = tl.layers.SubpixelConv1d(n, scale=2, name='s') >>> print(n.outputs.shape) (10, 200, 2) References ----------- `Audio Super Resolution Implementation <https://github.com/kuleshov/audio-super-res/blob/master/src/models/layers/subpixel.py>`__. """ def __init__(self, scale=2, act=None, name=None):#'subpixel_conv1d'): # super(SubpixelConv1d, self).__init__(prev_layer=prev_layer, act=act, name=name) super().__init__(name) self.scale = scale self.act = act logging.info( "SubpixelConv1d %s: scale: %d act: %s" % (self.name, scale, self.act.__name__ if self.act is not None else 'No Activation') ) def build(self, inputs): pass def forward(self, inputs): """ Parameters ------------ net : :class:`Layer` Previous layer with output shape of (batch, width, r). """ # with tf.name_scope(name): # self.outputs = self._apply_activation(self._PS(self.inputs, r=scale)) outputs = self.act(self._PS(inputs, r=self.scale)) return outputs @private_method def _PS(self, I, r): X = tf.transpose(I, [2, 1, 0]) # (r, w, b) X = tf.batch_to_space_nd(X, [r], [[0, 0]]) # (1, r*w, b) X = tf.transpose(X, [2, 1, 0]) return X
[ "tensorflow.batch_to_space_nd", "tensorflow.depth_to_space", "tensorflow.transpose" ]
tensorlayer/layers/convolution/super_resolution.py
[(90, 'tensorlayer.logging.info', 'logging.info', (["('SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s' % (self.name,\n scale, n_out_channel, self.act.__name__ if self.act is not None else\n 'No Activation'))"], {}), False, 'from tensorlayer import logging\n'), (166, 'tensorlayer.logging.info', 'logging.info', (["('SubpixelConv1d %s: scale: %d act: %s' % (self.name, scale, self.act.\n __name__ if self.act is not None else 'No Activation'))"], {}), False, 'from tensorlayer import logging\n'), (189, 'tensorflow.transpose', 'tf.transpose', (['I', '[2, 1, 0]'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.batch_to_space_nd', 'tf.batch_to_space_nd', (['X', '[r]', '[[0, 0]]'], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.transpose', 'tf.transpose', (['X', '[2, 1, 0]'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.depth_to_space', 'tf.depth_to_space', (['X', 'r'], {}), True, 'import tensorflow as tf\n')]
PacktPublishing/TensorFlow-for-Machine-Learning-Solutions-
3f258ee117bffaf18f5420fc4e6eefaab604fa02
import tensorflow as tf from tensorflow.python.framework import ops ops.reset_default_graph() sess = tf.Session() my_tensor = tf.zeros([1,20]) sess.run(my_tensor) my_var = tf.Variable(tf.zeros([1,20])) sess.run(my_var.initializer) sess.run(my_var) row_dim = 2 col_dim = 3 zero_var = tf.Variable(tf.zeros([row_dim, col_dim])) ones_var = tf.Variable(tf.ones([row_dim, col_dim])) sess.run(zero_var.initializer) sess.run(ones_var.initializer) print(sess.run(zero_var)) print(sess.run(ones_var)) zero_similar = tf.Variable(tf.zeros_like(zero_var)) ones_similar = tf.Variable(tf.ones_like(ones_var)) sess.run(ones_similar.initializer) sess.run(zero_similar.initializer) print(sess.run(ones_similar)) print(sess.run(zero_similar)) fill_var = tf.Variable(tf.fill([row_dim, col_dim], -1)) sess.run(fill_var.initializer) print(sess.run(fill_var)) const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9])) const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim])) sess.run(const_var.initializer) sess.run(const_fill_var.initializer) print(sess.run(const_var)) print(sess.run(const_fill_var)) linear_var = tf.Variable(tf.linspace(start=0.0, stop=1.0, num=3)) # Generates [0.0, 0.5, 1.0] includes the end sequence_var = tf.Variable(tf.range(start=6, limit=15, delta=3)) # Generates [6, 9, 12] doesn't include the end sess.run(linear_var.initializer) sess.run(sequence_var.initializer) print(sess.run(linear_var)) print(sess.run(sequence_var)) rnorm_var = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0) runif_var = tf.random_uniform([row_dim, col_dim], minval=0, maxval=4) print(sess.run(rnorm_var)) print(sess.run(runif_var)) ops.reset_default_graph() sess = tf.Session() my_var = tf.Variable(tf.zeros([1,20])) merged = tf.summary.merge_all() writer = tf.summary.FileWriter("./logs", graph=sess.graph) initialize_op = tf.global_variables_initializer() sess.run(initialize_op)
[ "tensorflow.fill", "tensorflow.summary.FileWriter", "tensorflow.constant", "tensorflow.range", "tensorflow.zeros", "tensorflow.ones_like", "tensorflow.ones", "tensorflow.global_variables_initializer", "tensorflow.zeros_like", "tensorflow.summary.merge_all", "tensorflow.Session", "tensorflow.linspace", "tensorflow.random_uniform", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.random_normal" ]
Section 1/How TensorFlow Works.py
[(3, 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (5, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (7, 'tensorflow.zeros', 'tf.zeros', (['[1, 20]'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.random_normal', 'tf.random_normal', (['[row_dim, col_dim]'], {'mean': '(0.0)', 'stddev': '(1.0)'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.random_uniform', 'tf.random_uniform', (['[row_dim, col_dim]'], {'minval': '(0)', 'maxval': '(4)'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n'), (58, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./logs"""'], {'graph': 'sess.graph'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (11, 'tensorflow.zeros', 'tf.zeros', (['[1, 20]'], {}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.zeros', 'tf.zeros', (['[row_dim, col_dim]'], {}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.ones', 'tf.ones', (['[row_dim, col_dim]'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.zeros_like', 'tf.zeros_like', (['zero_var'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.ones_like', 'tf.ones_like', (['ones_var'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.fill', 'tf.fill', (['[row_dim, col_dim]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.constant', 'tf.constant', (['[8, 6, 7, 5, 3, 0, 9]'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.constant', 'tf.constant', (['(-1)'], {'shape': '[row_dim, col_dim]'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.linspace', 'tf.linspace', ([], {'start': '(0.0)', 'stop': '(1.0)', 'num': '(3)'}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.range', 'tf.range', ([], {'start': '(6)', 'limit': '(15)', 'delta': '(3)'}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.zeros', 'tf.zeros', (['[1, 20]'], {}), True, 'import tensorflow as tf\n')]
xhsheng-ustc/Deep-PCAC
3e655fc2df5c4491257f1556ac34e1f0b270e974
import os import argparse import numpy as np import tensorflow as tf import importlib import subprocess tf.enable_eager_execution() from entropy_model import EntropyBottleneck from conditional_entropy_model import SymmetricConditional import open3d as o3d ###################################### Preprocess & Postprocess ###################################### def preprocess(input_file, points_num=2048): """Partition. Input: .ply file and arguments for pre-process. Output: partitioned cubes, cube positions, and number of points in each cube. """ print('===== Partition =====') # scaling (optional) pcd = o3d.io.read_point_cloud(input_file) coordinate = np.asarray(pcd.points) color = np.asarray(pcd.colors) point_cloud = np.concatenate((coordinate,color),axis=1) number_of_points_of_ply = point_cloud.shape[0] number_of_feature = point_cloud.shape[1] set_num = int(np.ceil(number_of_points_of_ply/points_num)) point_set = np.zeros((1,points_num,number_of_feature)) point_cloud = np.expand_dims(point_cloud,0) for i in range(set_num): if i <set_num-1: #print(i) point_set = np.concatenate((point_set,point_cloud[:,i*2048:(i+1)*2048,:]),0) else: temp = np.zeros((1,points_num,number_of_feature)) num_less_than_2048 = number_of_points_of_ply-points_num*i #number points of last set whose number of points is less than 2048 temp[:,0:num_less_than_2048,:] = point_cloud[:,i*points_num:,:] point_set = np.concatenate((point_set,temp),0) point_set = point_set[1:,:,:] print(point_set.shape) print("Partition") return point_set,num_less_than_2048 def postprocess(output_file, point_set, num_less_than_2048,points_num=2048): """Reconstrcut point cloud and write to ply file. Input: output_file, point_set """ set_num = point_set.shape[0] feature_num = point_set.shape[2] number_of_points_of_ply = (set_num-1)*points_num+num_less_than_2048 point_cloud = np.zeros((number_of_points_of_ply,feature_num)) for i in range(set_num): if i<set_num-1: point_cloud[i*2048:(i+1)*2048] = point_set[i] else: point_cloud[i*2048:] = point_set[i,0:num_less_than_2048,:] pcd = o3d.geometry.PointCloud() point_ori_position = point_cloud[:,0:3] point_ori_color = point_cloud[:,3:6] pcd.points=o3d.utility.Vector3dVector(point_ori_position) pcd.colors=o3d.utility.Vector3dVector(point_ori_color) o3d.io.write_point_cloud(output_file,pcd,write_ascii=False) return point_cloud ###################################### Compress & Decompress ###################################### def compress(x_coori,x_color,model, ckpt_dir, latent_points): """Compress cubes to bitstream. Input: cubes with shape [batch size, length, width, height, channel(1)]. Input: cubes with shape [batch size, num_points=2048, num_feature=6]. Output: compressed bitstream. """ print('===== Compress =====') # load model. model = importlib.import_module(model) analysis_transform = model.AnalysisTransform(latent_points) hyper_encoder = model.HyperEncoder() hyper_decoder = model.HyperDecoder() entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = tf.train.Checkpoint(analysis_transform=analysis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder, estimator=entropy_bottleneck) status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir)) x = tf.convert_to_tensor(x_color, "float32") x_coori = tf.convert_to_tensor(x_coori, "float32") def loop_analysis(element): x = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) y = analysis_transform(x_coori,x) return tf.squeeze(y,axis=0) element = [x,x_coori] ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Analysis Transform") def loop_hyper_encoder(y): y = tf.expand_dims(y, 0) z = hyper_encoder(y) return tf.squeeze(z,axis=0) zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Hyper Encoder") z_hats, _ = entropy_bottleneck(zs, False) print("Quantize hyperprior") def loop_hyper_deocder(z): z = tf.expand_dims(z, 0) loc, scale = hyper_decoder(z) return tf.squeeze(loc, [0]), tf.squeeze(scale, [0]) locs, scales = tf.map_fn(loop_hyper_deocder, z_hats, dtype=(tf.float32, tf.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO scales = tf.maximum(scales, lower_bound) print("Hyper Decoder") z_strings, z_min_v, z_max_v = entropy_bottleneck.compress(zs) z_shape = tf.shape(zs)[:] print("Entropy Encode (Hyper)") y_strings, y_min_v, y_max_v = conditional_entropy_model.compress(ys, locs, scales) y_shape = tf.shape(ys)[:] print("Entropy Encode") return y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape def decompress(x_coori,y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape, model, ckpt_dir,latent_points): """Decompress bitstream to cubes. Input: compressed bitstream. latent representations (y) and hyper prior (z). Output: cubes with shape [batch size, length, width, height, channel(1)] """ print('===== Decompress =====') # load model. model = importlib.import_module(model) synthesis_transform = model.SynthesisTransform(latent_points) hyper_encoder = model.HyperEncoder() hyper_decoder = model.HyperDecoder() entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder, estimator=entropy_bottleneck) status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir)) zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1]) print("Entropy Decoder (Hyper)") def loop_hyper_deocder(z): z = tf.expand_dims(z, 0) loc, scale = hyper_decoder(z) return tf.squeeze(loc, [0]), tf.squeeze(scale, [0]) locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO scales = tf.maximum(scales, lower_bound) print("Hyper Decoder") ys = conditional_entropy_model.decompress(y_strings, locs, scales, y_min_v, y_max_v, y_shape) print("Entropy Decoder") def loop_synthesis(element): y = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) x_coori= tf.cast(x_coori,tf.float32) x = synthesis_transform(x_coori,y) return tf.squeeze(x, [0]) element=[ys,x_coori] xs = tf.map_fn(loop_synthesis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Synthesis Transform") return xs ###################################### write & read binary files. ###################################### def write_binary_files(filename, y_strings, z_strings, points_numbers_less_than2048, y_min_v, y_max_v, y_shape, z_min_v, z_max_v, z_shape, rootdir='/code'): """Write compressed binary files: 1) Compressed latent features. 2) Compressed hyperprior. 3) Number of input points. """ if not os.path.exists(rootdir): os.makedirs(rootdir) print('===== Write binary files =====') file_strings = os.path.join(rootdir, filename+'.strings') file_strings_hyper = os.path.join(rootdir, filename+'.strings_hyper') file_pointnums = os.path.join(rootdir, filename+'.pointnums') with open(file_strings, 'wb') as f: f.write(np.array(y_shape, dtype=np.int16).tobytes())# [batch size, length, width, height, channels] f.write(np.array((y_min_v, y_max_v), dtype=np.int8).tobytes()) f.write(y_strings) with open(file_strings_hyper, 'wb') as f: f.write(np.array(z_shape, dtype=np.int16).tobytes())# [batch size, length, width, height, channels] f.write(np.array((z_min_v, z_max_v), dtype=np.int8).tobytes()) f.write(z_strings) # TODO: Compress numbers of points. with open(file_pointnums, 'wb') as f: f.write(np.array(points_numbers_less_than2048, dtype=np.uint16).tobytes()) bytes_strings = os.path.getsize(file_strings) bytes_strings_hyper = os.path.getsize(file_strings_hyper) bytes_pointnums = os.path.getsize(file_pointnums) print('Total file size (Bytes): {}'.format(bytes_strings+bytes_strings_hyper+bytes_pointnums)) print('Strings (Bytes): {}'.format(bytes_strings)) print('Strings hyper (Bytes): {}'.format(bytes_strings_hyper)) print('Numbers of points (Bytes): {}'.format(bytes_pointnums)) return bytes_strings, bytes_strings_hyper, bytes_pointnums def read_binary_files(filename, rootdir='/code'): """Read from compressed binary files: 1) Compressed latent features. 2) Compressed hyperprior. 3) Number of input points. """ print('===== Read binary files =====') file_strings = os.path.join(rootdir, filename+'.strings') file_strings_hyper = os.path.join(rootdir, filename+'.strings_hyper') file_pointnums = os.path.join(rootdir, filename+'.pointnums') with open(file_strings, 'rb') as f: y_shape = np.frombuffer(f.read(2*4), dtype=np.int16) y_min_v, y_max_v = np.frombuffer(f.read(1*2), dtype=np.int8) y_strings = f.read() with open(file_strings_hyper, 'rb') as f: z_shape = np.frombuffer(f.read(2*4), dtype=np.int16) z_min_v, z_max_v = np.frombuffer(f.read(1*2), dtype=np.int8) z_strings = f.read() with open(file_pointnums, 'rb') as f: points_numbers_less_than2048 = np.frombuffer(f.read(2), dtype=np.uint16) return y_strings, z_strings, points_numbers_less_than2048, y_min_v, y_max_v, y_shape, z_min_v, z_max_v, z_shape def parse_args(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "command", choices=["compress", "decompress"], help="What to do: 'compress' reads a point cloud (.ply format) " "and writes compressed binary files. 'decompress' " "reads binary files and reconstructs the point cloud (.ply format). " "input and output filenames need to be provided for the latter. ") parser.add_argument( "--input", default='',dest="input", help="Input filename.") parser.add_argument( "--output", default='',dest="output", help="Output filename.") parser.add_argument( "--ckpt_dir", type=str, default='', dest="ckpt_dir", help='checkpoint direction trained with different RD tradeoff') parser.add_argument( "--model", default="model", help="model.") parser.add_argument( "--gpu", type=int, default=1, dest="gpu", help="use gpu (1) or not (0).") parser.add_argument( "--latent_points", type=int, default=256, dest="latent_points") args = parser.parse_args() print(args) return args if __name__ == "__main__": args = parse_args() if args.gpu==1: os.environ['CUDA_VISIBLE_DEVICES']="0" else: os.environ['CUDA_VISIBLE_DEVICES']="" config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 1.0 config.gpu_options.allow_growth = True config.log_device_placement=True sess = tf.Session(config=config) if args.command == "compress": rootdir, filename = os.path.split(args.input) if not args.output: args.output = filename.split('.')[0] print(args.output) point_set,num_less_than_2048 = preprocess(args.input) x_coori = point_set[:,:,0:3] x_color = point_set[:,:,3:6] y_strings, y_min_v, y_max_v, y_shape, z_strings, z_min_v, z_max_v, z_shape = compress(x_coori,x_color, args.model, args.ckpt_dir,args.latent_points) bytes_strings, bytes_strings_hyper, bytes_pointnums = write_binary_files( args.output, y_strings.numpy(), z_strings.numpy(), num_less_than_2048, y_min_v.numpy(), y_max_v.numpy(), y_shape.numpy(), z_min_v.numpy(), z_max_v.numpy(), z_shape.numpy(), rootdir='./compressed') elif args.command == "decompress": rootdir, filename = os.path.split(args.input) if not args.output: args.output = filename + "_rec.ply" ori_cooridinate_path = args.input + ".ply" y_strings_d, z_strings_d, num_less_than_2048_d, \ y_min_v_d, y_max_v_d, y_shape_d, z_min_v_d, z_max_v_d, z_shape_d = read_binary_files(filename, './compressed') point_set_ori,num_less_than_2048 = preprocess(ori_cooridinate_path) ori_coori = point_set_ori[:,:,0:3] rec_color = decompress(ori_coori,y_strings_d, y_min_v_d, y_max_v_d, y_shape_d, z_strings_d, z_min_v_d, z_max_v_d, z_shape_d, args.model, args.ckpt_dir,args.latent_points) ori_coori = point_set_ori[:,:,0:3] rec_point_cloud = np.concatenate((ori_coori,rec_color),-1) postprocess(args.output, rec_point_cloud, int(num_less_than_2048_d),points_num=2048)
[ "tensorflow.convert_to_tensor", "numpy.expand_dims", "tensorflow.enable_eager_execution", "tensorflow.train.latest_checkpoint", "tensorflow.shape", "numpy.asarray", "tensorflow.train.Checkpoint", "tensorflow.maximum", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.squeeze", "numpy.concatenate", "tensorflow.ConfigProto", "numpy.ceil", "tensorflow.map_fn", "tensorflow.Session", "numpy.array", "numpy.zeros" ]
mycodec.py
[(7, 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), True, 'import tensorflow as tf\n'), (21, 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['input_file'], {}), True, 'import open3d as o3d\n'), (22, 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), True, 'import numpy as np\n'), (23, 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), True, 'import numpy as np\n'), (24, 'numpy.concatenate', 'np.concatenate', (['(coordinate, color)'], {'axis': '(1)'}), True, 'import numpy as np\n'), (28, 'numpy.zeros', 'np.zeros', (['(1, points_num, number_of_feature)'], {}), True, 'import numpy as np\n'), (29, 'numpy.expand_dims', 'np.expand_dims', (['point_cloud', '(0)'], {}), True, 'import numpy as np\n'), (53, 'numpy.zeros', 'np.zeros', (['(number_of_points_of_ply, feature_num)'], {}), True, 'import numpy as np\n'), (59, 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), True, 'import open3d as o3d\n'), (62, 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['point_ori_position'], {}), True, 'import open3d as o3d\n'), (63, 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['point_ori_color'], {}), True, 'import open3d as o3d\n'), (64, 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['output_file', 'pcd'], {'write_ascii': '(False)'}), True, 'import open3d as o3d\n'), (78, 'importlib.import_module', 'importlib.import_module', (['model'], {}), False, 'import importlib\n'), (82, 'entropy_model.EntropyBottleneck', 'EntropyBottleneck', ([], {}), False, 'from entropy_model import EntropyBottleneck\n'), (83, 'conditional_entropy_model.SymmetricConditional', 'SymmetricConditional', ([], {}), False, 'from conditional_entropy_model import SymmetricConditional\n'), (85, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'analysis_transform': 'analysis_transform', 'hyper_encoder': 'hyper_encoder', 'hyper_decoder': 'hyper_decoder', 'estimator': 'entropy_bottleneck'}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_color', '"""float32"""'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_coori', '"""float32"""'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.map_fn', 'tf.map_fn', (['loop_analysis', 'element'], {'dtype': 'tf.float32', 'parallel_iterations': '(1)', 'back_prop': '(False)'}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.map_fn', 'tf.map_fn', (['loop_hyper_encoder', 'ys'], {'dtype': 'tf.float32', 'parallel_iterations': '(1)', 'back_prop': '(False)'}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.map_fn', 'tf.map_fn', (['loop_hyper_deocder', 'z_hats'], {'dtype': '(tf.float32, tf.float32)', 'parallel_iterations': '(1)', 'back_prop': '(False)'}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.maximum', 'tf.maximum', (['scales', 'lower_bound'], {}), True, 'import tensorflow as tf\n'), (144, 'importlib.import_module', 'importlib.import_module', (['model'], {}), False, 'import importlib\n'), (148, 'entropy_model.EntropyBottleneck', 'EntropyBottleneck', ([], {}), False, 'from entropy_model import EntropyBottleneck\n'), (149, 'conditional_entropy_model.SymmetricConditional', 'SymmetricConditional', ([], {}), False, 'from conditional_entropy_model import SymmetricConditional\n'), (151, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'synthesis_transform': 'synthesis_transform', 'hyper_encoder': 'hyper_encoder', 'hyper_decoder': 'hyper_decoder', 'estimator': 'entropy_bottleneck'}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.map_fn', 'tf.map_fn', (['loop_hyper_deocder', 'zs'], {'dtype': '(tf.float32, tf.float32)', 'parallel_iterations': '(1)', 'back_prop': '(False)'}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.maximum', 'tf.maximum', (['scales', 'lower_bound'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.map_fn', 'tf.map_fn', (['loop_synthesis', 'element'], {'dtype': 'tf.float32', 'parallel_iterations': '(1)', 'back_prop': '(False)'}), True, 'import tensorflow as tf\n'), (199, 'os.path.join', 'os.path.join', (['rootdir', "(filename + '.strings')"], {}), False, 'import os\n'), (200, 'os.path.join', 'os.path.join', (['rootdir', "(filename + '.strings_hyper')"], {}), False, 'import os\n'), (201, 'os.path.join', 'os.path.join', (['rootdir', "(filename + '.pointnums')"], {}), False, 'import os\n'), (217, 'os.path.getsize', 'os.path.getsize', (['file_strings'], {}), False, 'import os\n'), (218, 'os.path.getsize', 'os.path.getsize', (['file_strings_hyper'], {}), False, 'import os\n'), (219, 'os.path.getsize', 'os.path.getsize', (['file_pointnums'], {}), False, 'import os\n'), (236, 'os.path.join', 'os.path.join', (['rootdir', "(filename + '.strings')"], {}), False, 'import os\n'), (237, 'os.path.join', 'os.path.join', (['rootdir', "(filename + '.strings_hyper')"], {}), False, 'import os\n'), (238, 'os.path.join', 'os.path.join', (['rootdir', "(filename + '.pointnums')"], {}), False, 'import os\n'), (258, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), False, 'import argparse\n'), (295, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (27, 'numpy.ceil', 'np.ceil', (['(number_of_points_of_ply / points_num)'], {}), True, 'import numpy as np\n'), (89, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['ckpt_dir'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.expand_dims', 'tf.expand_dims', (['element[0]', '(0)'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.expand_dims', 'tf.expand_dims', (['element[1]', '(0)'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.squeeze', 'tf.squeeze', (['y'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.expand_dims', 'tf.expand_dims', (['y', '(0)'], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.squeeze', 'tf.squeeze', (['z'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.expand_dims', 'tf.expand_dims', (['z', '(0)'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.shape', 'tf.shape', (['zs'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.shape', 'tf.shape', (['ys'], {}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['ckpt_dir'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.expand_dims', 'tf.expand_dims', (['z', '(0)'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.expand_dims', 'tf.expand_dims', (['element[0]', '(0)'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.expand_dims', 'tf.expand_dims', (['element[1]', '(0)'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.cast', 'tf.cast', (['x_coori', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.squeeze', 'tf.squeeze', (['x', '[0]'], {}), True, 'import tensorflow as tf\n'), (196, 'os.path.exists', 'os.path.exists', (['rootdir'], {}), False, 'import os\n'), (197, 'os.makedirs', 'os.makedirs', (['rootdir'], {}), False, 'import os\n'), (302, 'os.path.split', 'os.path.split', (['args.input'], {}), False, 'import os\n'), (34, 'numpy.concatenate', 'np.concatenate', (['(point_set, point_cloud[:, i * 2048:(i + 1) * 2048, :])', '(0)'], {}), True, 'import numpy as np\n'), (36, 'numpy.zeros', 'np.zeros', (['(1, points_num, number_of_feature)'], {}), True, 'import numpy as np\n'), (40, 'numpy.concatenate', 'np.concatenate', (['(point_set, temp)', '(0)'], {}), True, 'import numpy as np\n'), (118, 'tensorflow.squeeze', 'tf.squeeze', (['loc', '[0]'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.squeeze', 'tf.squeeze', (['scale', '[0]'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.squeeze', 'tf.squeeze', (['loc', '[0]'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.squeeze', 'tf.squeeze', (['scale', '[0]'], {}), True, 'import tensorflow as tf\n'), (317, 'os.path.split', 'os.path.split', (['args.input'], {}), False, 'import os\n'), (329, 'numpy.concatenate', 'np.concatenate', (['(ori_coori, rec_color)', '(-1)'], {}), True, 'import numpy as np\n'), (204, 'numpy.array', 'np.array', (['y_shape'], {'dtype': 'np.int16'}), True, 'import numpy as np\n'), (205, 'numpy.array', 'np.array', (['(y_min_v, y_max_v)'], {'dtype': 'np.int8'}), True, 'import numpy as np\n'), (209, 'numpy.array', 'np.array', (['z_shape'], {'dtype': 'np.int16'}), True, 'import numpy as np\n'), (210, 'numpy.array', 'np.array', (['(z_min_v, z_max_v)'], {'dtype': 'np.int8'}), True, 'import numpy as np\n'), (215, 'numpy.array', 'np.array', (['points_numbers_less_than2048'], {'dtype': 'np.uint16'}), True, 'import numpy as np\n')]
chuyj/saliency
878680dd326f983b051fc33dd6212f28f1d9a7a7
# Copyright 2019 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np import tensorflow as tf from . import integrated_gradients from tensorflow.python.platform import googletest class IntegratedGradientsTest(googletest.TestCase): """ To run: "python -m saliency.integrated_gradients_test" from the PAIR-code/saliency directory. """ def testIntegratedGradientsGetMask(self): with tf.Graph().as_default() as graph: x = tf.placeholder(shape=[None, 3], dtype=tf.float32) y = 5 * x[:, 0] + x[:, 0] * x[:, 1] + tf.sin(x[:, 2]) with tf.Session() as sess: # Calculate the value of `y` at the baseline. x_baseline_val = np.array([[0.5, 0.8, 1.0]], dtype=np.float) y_baseline_val = sess.run(y, feed_dict={x: x_baseline_val}) # Calculate the value of `y` at the input. x_input_val = np.array([[1.0, 2.0, 3.0]], dtype=np.float) y_input_val = sess.run(y, feed_dict={x: x_input_val}) # Due to mathematical properties of the integrated gradients, # the expected IG value is equal to the difference between # the `y` value at the input and the `y` value at the baseline. expected_val = y_input_val[0] - y_baseline_val[0] # Calculate the integrated gradients attribution of the input. ig = integrated_gradients.IntegratedGradients(graph, sess, y[0], x) mask = ig.GetMask(x_value=x_input_val[0], feed_dict={}, x_baseline=x_baseline_val[0], x_steps=1000) # Verify the result. self.assertAlmostEqual(expected_val, mask.sum(), places=3) if __name__ == '__main__': googletest.main()
[ "tensorflow.Graph", "tensorflow.sin", "tensorflow.placeholder", "tensorflow.Session", "tensorflow.python.platform.googletest.main", "numpy.array" ]
saliency/integrated_gradients_test.py
[(57, 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), False, 'from tensorflow.python.platform import googletest\n'), (31, 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 3]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.sin', 'tf.sin', (['x[:, (2)]'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (35, 'numpy.array', 'np.array', (['[[0.5, 0.8, 1.0]]'], {'dtype': 'np.float'}), True, 'import numpy as np\n'), (39, 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0]]'], {'dtype': 'np.float'}), True, 'import numpy as np\n'), (30, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n')]
Wu-Zhe/maskgan-local
446688d9317fea0a5cbb4bd8b1cf227df6679dc7
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Asynchronous data producer for the NCF pipeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import atexit import functools import os import sys import tempfile import threading import time import timeit import traceback import typing import numpy as np import six from six.moves import queue import tensorflow as tf from tensorflow.contrib.tpu.python.tpu.datasets import StreamingFilesDataset from official.datasets import movielens from official.recommendation import constants as rconst from official.recommendation import popen_helper from official.recommendation import stat_utils SUMMARY_TEMPLATE = """General: {spacer}Num users: {num_users} {spacer}Num items: {num_items} Training: {spacer}Positive count: {train_pos_ct} {spacer}Batch size: {train_batch_size} {multiplier} {spacer}Batch count per epoch: {train_batch_ct} Eval: {spacer}Positive count: {eval_pos_ct} {spacer}Batch size: {eval_batch_size} {multiplier} {spacer}Batch count per epoch: {eval_batch_ct}""" _TRAIN_FEATURE_MAP = { movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string), movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string), rconst.MASK_START_INDEX: tf.FixedLenFeature([1], dtype=tf.string), "labels": tf.FixedLenFeature([], dtype=tf.string), } _EVAL_FEATURE_MAP = { movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string), movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string), rconst.DUPLICATE_MASK: tf.FixedLenFeature([], dtype=tf.string) } class DatasetManager(object): """Helper class for handling TensorFlow specific data tasks. This class takes the (relatively) framework agnostic work done by the data constructor classes and handles the TensorFlow specific portions (TFRecord management, tf.Dataset creation, etc.). """ def __init__(self, is_training, stream_files, batches_per_epoch, shard_root=None, deterministic=False): # type: (bool, bool, int, typing.Optional[str], bool) -> None """Constructs a `DatasetManager` instance. Args: is_training: Boolean of whether the data provided is training or evaluation data. This determines whether to reuse the data (if is_training=False) and the exact structure to use when storing and yielding data. stream_files: Boolean indicating whether data should be serialized and written to file shards. batches_per_epoch: The number of batches in a single epoch. shard_root: The base directory to be used when stream_files=True. deterministic: Forgo non-deterministic speedups. (i.e. sloppy=True) """ self._is_training = is_training self._deterministic = deterministic self._stream_files = stream_files self._writers = [] self._write_locks = [threading.RLock() for _ in range(rconst.NUM_FILE_SHARDS)] if stream_files else [] self._batches_per_epoch = batches_per_epoch self._epochs_completed = 0 self._epochs_requested = 0 self._shard_root = shard_root self._result_queue = queue.Queue() self._result_reuse = [] @property def current_data_root(self): subdir = (rconst.TRAIN_FOLDER_TEMPLATE.format(self._epochs_completed) if self._is_training else rconst.EVAL_FOLDER) return os.path.join(self._shard_root, subdir) def buffer_reached(self): # Only applicable for training. return (self._epochs_completed - self._epochs_requested >= rconst.CYCLES_TO_BUFFER and self._is_training) @staticmethod def _serialize(data): """Convert NumPy arrays into a TFRecords entry.""" feature_dict = { k: tf.train.Feature(bytes_list=tf.train.BytesList( value=[memoryview(v).tobytes()])) for k, v in data.items()} return tf.train.Example( features=tf.train.Features(feature=feature_dict)).SerializeToString() def _deserialize(self, serialized_data, batch_size): """Convert serialized TFRecords into tensors. Args: serialized_data: A tensor containing serialized records. batch_size: The data arrives pre-batched, so batch size is needed to deserialize the data. """ feature_map = _TRAIN_FEATURE_MAP if self._is_training else _EVAL_FEATURE_MAP features = tf.parse_single_example(serialized_data, feature_map) users = tf.reshape(tf.decode_raw( features[movielens.USER_COLUMN], rconst.USER_DTYPE), (batch_size,)) items = tf.reshape(tf.decode_raw( features[movielens.ITEM_COLUMN], rconst.ITEM_DTYPE), (batch_size,)) def decode_binary(data_bytes): # tf.decode_raw does not support bool as a decode type. As a result it is # necessary to decode to int8 (7 of the bits will be ignored) and then # cast to bool. return tf.reshape(tf.cast(tf.decode_raw(data_bytes, tf.int8), tf.bool), (batch_size,)) if self._is_training: mask_start_index = tf.decode_raw( features[rconst.MASK_START_INDEX], tf.int32)[0] valid_point_mask = tf.less(tf.range(batch_size), mask_start_index) return { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.VALID_POINT_MASK: valid_point_mask, }, decode_binary(features["labels"]) return { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.DUPLICATE_MASK: decode_binary(features[rconst.DUPLICATE_MASK]), } def put(self, index, data): # type: (int, dict) -> None """Store data for later consumption. Because there are several paths for storing and yielding data (queues, lists, files) the data producer simply provides the data in a standard format at which point the dataset manager handles storing it in the correct form. Args: index: Used to select shards when writing to files. data: A dict of the data to be stored. This method mutates data, and therefore expects to be the only consumer. """ if self._stream_files: example_bytes = self._serialize(data) with self._write_locks[index % rconst.NUM_FILE_SHARDS]: self._writers[index % rconst.NUM_FILE_SHARDS].write(example_bytes) else: if self._is_training: mask_start_index = data.pop(rconst.MASK_START_INDEX) batch_size = data[movielens.ITEM_COLUMN].shape[0] data[rconst.VALID_POINT_MASK] = np.less(np.arange(batch_size), mask_start_index) data = (data, data.pop("labels")) self._result_queue.put(data) def start_construction(self): if self._stream_files: tf.gfile.MakeDirs(self.current_data_root) template = os.path.join(self.current_data_root, rconst.SHARD_TEMPLATE) self._writers = [tf.io.TFRecordWriter(template.format(i)) for i in range(rconst.NUM_FILE_SHARDS)] def end_construction(self): if self._stream_files: [writer.close() for writer in self._writers] self._writers = [] self._result_queue.put(self.current_data_root) self._epochs_completed += 1 def data_generator(self, epochs_between_evals): """Yields examples during local training.""" assert not self._stream_files assert self._is_training or epochs_between_evals == 1 if self._is_training: for _ in range(self._batches_per_epoch * epochs_between_evals): yield self._result_queue.get(timeout=300) else: if self._result_reuse: assert len(self._result_reuse) == self._batches_per_epoch for i in self._result_reuse: yield i else: # First epoch. for _ in range(self._batches_per_epoch * epochs_between_evals): result = self._result_queue.get(timeout=300) self._result_reuse.append(result) yield result def increment_request_epoch(self): self._epochs_requested += 1 def get_dataset(self, batch_size, epochs_between_evals): """Construct the dataset to be used for training and eval. For local training, data is provided through Dataset.from_generator. For remote training (TPUs) the data is first serialized to files and then sent to the TPU through a StreamingFilesDataset. Args: batch_size: The per-device batch size of the dataset. epochs_between_evals: How many epochs worth of data to yield. (Generator mode only.) """ self.increment_request_epoch() if self._stream_files: if epochs_between_evals > 1: raise ValueError("epochs_between_evals > 1 not supported for file " "based dataset.") epoch_data_dir = self._result_queue.get(timeout=300) if not self._is_training: self._result_queue.put(epoch_data_dir) # Eval data is reused. file_pattern = os.path.join( epoch_data_dir, rconst.SHARD_TEMPLATE.format("*")) dataset = StreamingFilesDataset( files=file_pattern, worker_job="worker", num_parallel_reads=rconst.NUM_FILE_SHARDS, num_epochs=1, sloppy=not self._deterministic) map_fn = functools.partial(self._deserialize, batch_size=batch_size) dataset = dataset.map(map_fn, num_parallel_calls=16) else: types = {movielens.USER_COLUMN: rconst.USER_DTYPE, movielens.ITEM_COLUMN: rconst.ITEM_DTYPE} shapes = {movielens.USER_COLUMN: tf.TensorShape([batch_size]), movielens.ITEM_COLUMN: tf.TensorShape([batch_size])} if self._is_training: types[rconst.VALID_POINT_MASK] = np.bool shapes[rconst.VALID_POINT_MASK] = tf.TensorShape([batch_size]) types = (types, np.bool) shapes = (shapes, tf.TensorShape([batch_size])) else: types[rconst.DUPLICATE_MASK] = np.bool shapes[rconst.DUPLICATE_MASK] = tf.TensorShape([batch_size]) data_generator = functools.partial( self.data_generator, epochs_between_evals=epochs_between_evals) dataset = tf.data.Dataset.from_generator( generator=data_generator, output_types=types, output_shapes=shapes) return dataset.prefetch(16) def make_input_fn(self, batch_size): """Create an input_fn which checks for batch size consistency.""" def input_fn(params): param_batch_size = (params["batch_size"] if self._is_training else params["eval_batch_size"]) if batch_size != param_batch_size: raise ValueError("producer batch size ({}) differs from params batch " "size ({})".format(batch_size, param_batch_size)) epochs_between_evals = (params.get("epochs_between_evals", 1) if self._is_training else 1) return self.get_dataset(batch_size=batch_size, epochs_between_evals=epochs_between_evals) return input_fn class BaseDataConstructor(threading.Thread): """Data constructor base class. This class manages the control flow for constructing data. It is not meant to be used directly, but instead subclasses should implement the following two methods: self.construct_lookup_variables self.lookup_negative_items """ def __init__(self, maximum_number_epochs, # type: int num_users, # type: int num_items, # type: int user_map, # type: dict item_map, # type: dict train_pos_users, # type: np.ndarray train_pos_items, # type: np.ndarray train_batch_size, # type: int batches_per_train_step, # type: int num_train_negatives, # type: int eval_pos_users, # type: np.ndarray eval_pos_items, # type: np.ndarray eval_batch_size, # type: int batches_per_eval_step, # type: int stream_files, # type: bool deterministic=False # type: bool ): # General constants self._maximum_number_epochs = maximum_number_epochs self._num_users = num_users self._num_items = num_items self.user_map = user_map self.item_map = item_map self._train_pos_users = train_pos_users self._train_pos_items = train_pos_items self.train_batch_size = train_batch_size self._num_train_negatives = num_train_negatives self._batches_per_train_step = batches_per_train_step self._eval_pos_users = eval_pos_users self._eval_pos_items = eval_pos_items self.eval_batch_size = eval_batch_size # Training if self._train_pos_users.shape != self._train_pos_items.shape: raise ValueError( "User positives ({}) is different from item positives ({})".format( self._train_pos_users.shape, self._train_pos_items.shape)) (self._train_pos_count,) = self._train_pos_users.shape self._elements_in_epoch = (1 + num_train_negatives) * self._train_pos_count self.train_batches_per_epoch = self._count_batches( self._elements_in_epoch, train_batch_size, batches_per_train_step) # Evaluation if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES): raise ValueError("Eval batch size {} is not divisible by {}".format( eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES)) self._eval_users_per_batch = int( eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES)) self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES) self.eval_batches_per_epoch = self._count_batches( self._eval_elements_in_epoch, eval_batch_size, batches_per_eval_step) # Intermediate artifacts self._current_epoch_order = np.empty(shape=(0,)) self._shuffle_iterator = None self._shuffle_with_forkpool = not stream_files if stream_files: self._shard_root = tempfile.mkdtemp(prefix="ncf_") atexit.register(tf.gfile.DeleteRecursively, dirname=self._shard_root) else: self._shard_root = None self._train_dataset = DatasetManager( True, stream_files, self.train_batches_per_epoch, self._shard_root, deterministic) self._eval_dataset = DatasetManager( False, stream_files, self.eval_batches_per_epoch, self._shard_root, deterministic) # Threading details super(BaseDataConstructor, self).__init__() self.daemon = True self._stop_loop = False self._fatal_exception = None self.deterministic = deterministic def __str__(self): multiplier = ("(x{} devices)".format(self._batches_per_train_step) if self._batches_per_train_step > 1 else "") summary = SUMMARY_TEMPLATE.format( spacer=" ", num_users=self._num_users, num_items=self._num_items, train_pos_ct=self._train_pos_count, train_batch_size=self.train_batch_size, train_batch_ct=self.train_batches_per_epoch, eval_pos_ct=self._num_users, eval_batch_size=self.eval_batch_size, eval_batch_ct=self.eval_batches_per_epoch, multiplier=multiplier) return super(BaseDataConstructor, self).__str__() + "\n" + summary @staticmethod def _count_batches(example_count, batch_size, batches_per_step): """Determine the number of batches, rounding up to fill all devices.""" x = (example_count + batch_size - 1) // batch_size return (x + batches_per_step - 1) // batches_per_step * batches_per_step def stop_loop(self): self._stop_loop = True def construct_lookup_variables(self): """Perform any one time pre-compute work.""" raise NotImplementedError def lookup_negative_items(self, **kwargs): """Randomly sample negative items for given users.""" raise NotImplementedError def _run(self): atexit.register(self.stop_loop) self._start_shuffle_iterator() self.construct_lookup_variables() self._construct_training_epoch() self._construct_eval_epoch() for _ in range(self._maximum_number_epochs - 1): self._construct_training_epoch() self.stop_loop() def run(self): try: self._run() except Exception as e: # The Thread base class swallows stack traces, so unfortunately it is # necessary to catch and re-raise to get debug output traceback.print_exc() self._fatal_exception = e sys.stderr.flush() raise def _start_shuffle_iterator(self): if self._shuffle_with_forkpool: pool = popen_helper.get_forkpool(3, closing=False) else: pool = popen_helper.get_threadpool(1, closing=False) atexit.register(pool.close) args = [(self._elements_in_epoch, stat_utils.random_int32()) for _ in range(self._maximum_number_epochs)] imap = pool.imap if self.deterministic else pool.imap_unordered self._shuffle_iterator = imap(stat_utils.permutation, args) def _get_training_batch(self, i): """Construct a single batch of training data. Args: i: The index of the batch. This is used when stream_files=True to assign data to file shards. """ batch_indices = self._current_epoch_order[i * self.train_batch_size: (i + 1) * self.train_batch_size] (mask_start_index,) = batch_indices.shape batch_ind_mod = np.mod(batch_indices, self._train_pos_count) users = self._train_pos_users[batch_ind_mod] negative_indices = np.greater_equal(batch_indices, self._train_pos_count) negative_users = users[negative_indices] negative_items = self.lookup_negative_items(negative_users=negative_users) items = self._train_pos_items[batch_ind_mod] items[negative_indices] = negative_items labels = np.logical_not(negative_indices) # Pad last partial batch pad_length = self.train_batch_size - mask_start_index if pad_length: # We pad with arange rather than zeros because the network will still # compute logits for padded examples, and padding with zeros would create # a very "hot" embedding key which can have performance implications. user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype) users = np.concatenate([users, user_pad]) items = np.concatenate([items, item_pad]) labels = np.concatenate([labels, label_pad]) self._train_dataset.put(i, { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.MASK_START_INDEX: np.array(mask_start_index, dtype=np.int32), "labels": labels, }) def _wait_to_construct_train_epoch(self): count = 0 while self._train_dataset.buffer_reached() and not self._stop_loop: time.sleep(0.01) count += 1 if count >= 100 and np.log10(count) == np.round(np.log10(count)): tf.logging.info( "Waited {} times for training data to be consumed".format(count)) def _construct_training_epoch(self): """Loop to construct a batch of training data.""" self._wait_to_construct_train_epoch() start_time = timeit.default_timer() if self._stop_loop: return self._train_dataset.start_construction() map_args = list(range(self.train_batches_per_epoch)) self._current_epoch_order = next(self._shuffle_iterator) get_pool = (popen_helper.get_fauxpool if self.deterministic else popen_helper.get_threadpool) with get_pool(6) as pool: pool.map(self._get_training_batch, map_args) self._train_dataset.end_construction() tf.logging.info("Epoch construction complete. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) @staticmethod def _assemble_eval_batch(users, positive_items, negative_items, users_per_batch): """Construct duplicate_mask and structure data accordingly. The positive items should be last so that they lose ties. However, they should not be masked out if the true eval positive happens to be selected as a negative. So instead, the positive is placed in the first position, and then switched with the last element after the duplicate mask has been computed. Args: users: An array of users in a batch. (should be identical along axis 1) positive_items: An array (batch_size x 1) of positive item indices. negative_items: An array of negative item indices. users_per_batch: How many users should be in the batch. This is passed as an argument so that ncf_test.py can use this method. Returns: User, item, and duplicate_mask arrays. """ items = np.concatenate([positive_items, negative_items], axis=1) # We pad the users and items here so that the duplicate mask calculation # will include padding. The metric function relies on all padded elements # except the positive being marked as duplicate to mask out padded points. if users.shape[0] < users_per_batch: pad_rows = users_per_batch - users.shape[0] padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32) users = np.concatenate([users, padding.astype(users.dtype)], axis=0) items = np.concatenate([items, padding.astype(items.dtype)], axis=0) duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.bool) items[:, (0, -1)] = items[:, (-1, 0)] duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)] assert users.shape == items.shape == duplicate_mask.shape return users, items, duplicate_mask def _get_eval_batch(self, i): """Construct a single batch of evaluation data. Args: i: The index of the batch. """ low_index = i * self._eval_users_per_batch high_index = (i + 1) * self._eval_users_per_batch users = np.repeat(self._eval_pos_users[low_index:high_index, np.newaxis], 1 + rconst.NUM_EVAL_NEGATIVES, axis=1) positive_items = self._eval_pos_items[low_index:high_index, np.newaxis] negative_items = (self.lookup_negative_items(negative_users=users[:, :-1]) .reshape(-1, rconst.NUM_EVAL_NEGATIVES)) users, items, duplicate_mask = self._assemble_eval_batch( users, positive_items, negative_items, self._eval_users_per_batch) self._eval_dataset.put(i, { movielens.USER_COLUMN: users.flatten(), movielens.ITEM_COLUMN: items.flatten(), rconst.DUPLICATE_MASK: duplicate_mask.flatten(), }) def _construct_eval_epoch(self): """Loop to construct data for evaluation.""" if self._stop_loop: return start_time = timeit.default_timer() self._eval_dataset.start_construction() map_args = [i for i in range(self.eval_batches_per_epoch)] get_pool = (popen_helper.get_fauxpool if self.deterministic else popen_helper.get_threadpool) with get_pool(6) as pool: pool.map(self._get_eval_batch, map_args) self._eval_dataset.end_construction() tf.logging.info("Eval construction complete. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) def make_input_fn(self, is_training): # It isn't feasible to provide a foolproof check, so this is designed to # catch most failures rather than provide an exhaustive guard. if self._fatal_exception is not None: raise ValueError("Fatal exception in the data production loop: {}" .format(self._fatal_exception)) return ( self._train_dataset.make_input_fn(self.train_batch_size) if is_training else self._eval_dataset.make_input_fn(self.eval_batch_size)) def increment_request_epoch(self): self._train_dataset.increment_request_epoch() class DummyConstructor(threading.Thread): """Class for running with synthetic data.""" def run(self): pass def stop_loop(self): pass def increment_request_epoch(self): pass @staticmethod def make_input_fn(is_training): """Construct training input_fn that uses synthetic data.""" def input_fn(params): """Generated input_fn for the given epoch.""" batch_size = (params["batch_size"] if is_training else params["eval_batch_size"]) num_users = params["num_users"] num_items = params["num_items"] users = tf.random_uniform([batch_size], dtype=tf.int32, minval=0, maxval=num_users) items = tf.random_uniform([batch_size], dtype=tf.int32, minval=0, maxval=num_items) if is_training: valid_point_mask = tf.cast(tf.random_uniform( [batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool) labels = tf.cast(tf.random_uniform( [batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool) data = { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.VALID_POINT_MASK: valid_point_mask, }, labels else: dupe_mask = tf.cast(tf.random_uniform([batch_size], dtype=tf.int32, minval=0, maxval=2), tf.bool) data = { movielens.USER_COLUMN: users, movielens.ITEM_COLUMN: items, rconst.DUPLICATE_MASK: dupe_mask, } dataset = tf.data.Dataset.from_tensors(data).repeat( rconst.SYNTHETIC_BATCHES_PER_EPOCH * params["batches_per_step"]) dataset = dataset.prefetch(32) return dataset return input_fn class MaterializedDataConstructor(BaseDataConstructor): """Materialize a table of negative examples for fast negative generation. This class creates a table (num_users x num_items) containing all of the negative examples for each user. This table is conceptually ragged; that is to say the items dimension will have a number of unused elements at the end equal to the number of positive elements for a given user. For instance: num_users = 3 num_items = 5 positives = [[1, 3], [0], [1, 2, 3, 4]] will generate a negative table: [ [0 2 4 int32max int32max], [1 2 3 4 int32max], [0 int32max int32max int32max int32max], ] and a vector of per-user negative counts, which in this case would be: [3, 4, 1] When sampling negatives, integers are (nearly) uniformly selected from the range [0, per_user_neg_count[user]) which gives a column_index, at which point the negative can be selected as: negative_table[user, column_index] This technique will not scale; however MovieLens is small enough that even a pre-compute which is quadratic in problem size will still fit in memory. A more scalable lookup method is in the works. """ def __init__(self, *args, **kwargs): super(MaterializedDataConstructor, self).__init__(*args, **kwargs) self._negative_table = None self._per_user_neg_count = None def construct_lookup_variables(self): # Materialize negatives for fast lookup sampling. start_time = timeit.default_timer() inner_bounds = np.argwhere(self._train_pos_users[1:] - self._train_pos_users[:-1])[:, 0] + 1 (upper_bound,) = self._train_pos_users.shape index_bounds = [0] + inner_bounds.tolist() + [upper_bound] self._negative_table = np.zeros(shape=(self._num_users, self._num_items), dtype=rconst.ITEM_DTYPE) # Set the table to the max value to make sure the embedding lookup will fail # if we go out of bounds, rather than just overloading item zero. self._negative_table += np.iinfo(rconst.ITEM_DTYPE).max assert self._num_items < np.iinfo(rconst.ITEM_DTYPE).max # Reuse arange during generation. np.delete will make a copy. full_set = np.arange(self._num_items, dtype=rconst.ITEM_DTYPE) self._per_user_neg_count = np.zeros( shape=(self._num_users,), dtype=np.int32) # Threading does not improve this loop. For some reason, the np.delete # call does not parallelize well. Multiprocessing incurs too much # serialization overhead to be worthwhile. for i in range(self._num_users): positives = self._train_pos_items[index_bounds[i]:index_bounds[i+1]] negatives = np.delete(full_set, positives) self._per_user_neg_count[i] = self._num_items - positives.shape[0] self._negative_table[i, :self._per_user_neg_count[i]] = negatives tf.logging.info("Negative sample table built. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) def lookup_negative_items(self, negative_users, **kwargs): negative_item_choice = stat_utils.very_slightly_biased_randint( self._per_user_neg_count[negative_users]) return self._negative_table[negative_users, negative_item_choice] class BisectionDataConstructor(BaseDataConstructor): """Use bisection to index within positive examples. This class tallies the number of negative items which appear before each positive item for a user. This means that in order to select the ith negative item for a user, it only needs to determine which two positive items bound it at which point the item id for the ith negative is a simply algebraic expression. """ def __init__(self, *args, **kwargs): super(BisectionDataConstructor, self).__init__(*args, **kwargs) self.index_bounds = None self._sorted_train_pos_items = None self._total_negatives = None def _index_segment(self, user): lower, upper = self.index_bounds[user:user+2] items = self._sorted_train_pos_items[lower:upper] negatives_since_last_positive = np.concatenate( [items[0][np.newaxis], items[1:] - items[:-1] - 1]) return np.cumsum(negatives_since_last_positive) def construct_lookup_variables(self): start_time = timeit.default_timer() inner_bounds = np.argwhere(self._train_pos_users[1:] - self._train_pos_users[:-1])[:, 0] + 1 (upper_bound,) = self._train_pos_users.shape self.index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound]) # Later logic will assume that the users are in sequential ascending order. assert np.array_equal(self._train_pos_users[self.index_bounds[:-1]], np.arange(self._num_users)) self._sorted_train_pos_items = self._train_pos_items.copy() for i in range(self._num_users): lower, upper = self.index_bounds[i:i+2] self._sorted_train_pos_items[lower:upper].sort() self._total_negatives = np.concatenate([ self._index_segment(i) for i in range(self._num_users)]) tf.logging.info("Negative total vector built. Time: {:.1f} seconds".format( timeit.default_timer() - start_time)) def lookup_negative_items(self, negative_users, **kwargs): output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1 left_index = self.index_bounds[negative_users] right_index = self.index_bounds[negative_users + 1] - 1 num_positives = right_index - left_index + 1 num_negatives = self._num_items - num_positives neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives) # Shortcuts: # For points where the negative is greater than or equal to the tally before # the last positive point there is no need to bisect. Instead the item id # corresponding to the negative item choice is simply: # last_postive_index + 1 + (neg_choice - last_negative_tally) # Similarly, if the selection is less than the tally at the first positive # then the item_id is simply the selection. # # Because MovieLens organizes popular movies into low integers (which is # preserved through the preprocessing), the first shortcut is very # efficient, allowing ~60% of samples to bypass the bisection. For the same # reason, the second shortcut is rarely triggered (<0.02%) and is therefore # not worth implementing. use_shortcut = neg_item_choice >= self._total_negatives[right_index] output[use_shortcut] = ( self._sorted_train_pos_items[right_index] + 1 + (neg_item_choice - self._total_negatives[right_index]) )[use_shortcut] if np.all(use_shortcut): # The bisection code is ill-posed when there are no elements. return output not_use_shortcut = np.logical_not(use_shortcut) left_index = left_index[not_use_shortcut] right_index = right_index[not_use_shortcut] neg_item_choice = neg_item_choice[not_use_shortcut] num_loops = np.max( np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32)) for i in range(num_loops): mid_index = (left_index + right_index) // 2 right_criteria = self._total_negatives[mid_index] > neg_item_choice left_criteria = np.logical_not(right_criteria) right_index[right_criteria] = mid_index[right_criteria] left_index[left_criteria] = mid_index[left_criteria] # Expected state after bisection pass: # The right index is the smallest index whose tally is greater than the # negative item choice index. assert np.all((right_index - left_index) <= 1) output[not_use_shortcut] = ( self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice) ) assert np.all(output >= 0) return output def get_constructor(name): if name == "bisection": return BisectionDataConstructor if name == "materialized": return MaterializedDataConstructor raise ValueError("Unrecognized constructor: {}".format(name))
[ "tensorflow.FixedLenFeature", "numpy.cumsum", "numpy.concatenate", "numpy.all", "tensorflow.gfile.MakeDirs", "tensorflow.contrib.tpu.python.tpu.datasets.StreamingFilesDataset", "tensorflow.data.Dataset.from_generator", "numpy.iinfo", "numpy.arange", "tensorflow.decode_raw", "numpy.greater_equal", "tensorflow.parse_single_example", "numpy.repeat", "numpy.zeros", "numpy.logical_not", "tensorflow.TensorShape", "tensorflow.data.Dataset.from_tensors", "numpy.delete", "numpy.log10", "tensorflow.train.Features", "numpy.array", "numpy.log2", "tensorflow.range", "numpy.argwhere", "numpy.mod", "tensorflow.random_uniform", "numpy.empty" ]
official/recommendation/data_pipeline.py
[(60, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), True, 'import tensorflow as tf\n'), (107, 'six.moves.queue.Queue', 'queue.Queue', ([], {}), False, 'from six.moves import queue\n'), (114, 'os.path.join', 'os.path.join', (['self._shard_root', 'subdir'], {}), False, 'import os\n'), (141, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['serialized_data', 'feature_map'], {}), True, 'import tensorflow as tf\n'), (380, 'numpy.empty', 'np.empty', ([], {'shape': '(0,)'}), True, 'import numpy as np\n'), (434, 'atexit.register', 'atexit.register', (['self.stop_loop'], {}), False, 'import atexit\n'), (459, 'atexit.register', 'atexit.register', (['pool.close'], {}), False, 'import atexit\n'), (476, 'numpy.mod', 'np.mod', (['batch_indices', 'self._train_pos_count'], {}), True, 'import numpy as np\n'), (479, 'numpy.greater_equal', 'np.greater_equal', (['batch_indices', 'self._train_pos_count'], {}), True, 'import numpy as np\n'), (487, 'numpy.logical_not', 'np.logical_not', (['negative_indices'], {}), True, 'import numpy as np\n'), (521, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (559, 'numpy.concatenate', 'np.concatenate', (['[positive_items, negative_items]'], {'axis': '(1)'}), True, 'import numpy as np\n'), (586, 'numpy.repeat', 'np.repeat', (['self._eval_pos_users[low_index:high_index, (np.newaxis)]', '(1 + rconst.NUM_EVAL_NEGATIVES)'], {'axis': '(1)'}), True, 'import numpy as np\n'), (606, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (727, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (732, 'numpy.zeros', 'np.zeros', ([], {'shape': '(self._num_users, self._num_items)', 'dtype': 'rconst.ITEM_DTYPE'}), True, 'import numpy as np\n'), (741, 'numpy.arange', 'np.arange', (['self._num_items'], {'dtype': 'rconst.ITEM_DTYPE'}), True, 'import numpy as np\n'), (743, 'numpy.zeros', 'np.zeros', ([], {'shape': '(self._num_users,)', 'dtype': 'np.int32'}), True, 'import numpy as np\n'), (759, 'official.recommendation.stat_utils.very_slightly_biased_randint', 'stat_utils.very_slightly_biased_randint', (['self._per_user_neg_count[negative_users]'], {}), False, 'from official.recommendation import stat_utils\n'), (783, 'numpy.concatenate', 'np.concatenate', (['[items[0][np.newaxis], items[1:] - items[:-1] - 1]'], {}), True, 'import numpy as np\n'), (786, 'numpy.cumsum', 'np.cumsum', (['negatives_since_last_positive'], {}), True, 'import numpy as np\n'), (789, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (819, 'official.recommendation.stat_utils.very_slightly_biased_randint', 'stat_utils.very_slightly_biased_randint', (['num_negatives'], {}), False, 'from official.recommendation import stat_utils\n'), (840, 'numpy.all', 'np.all', (['use_shortcut'], {}), True, 'import numpy as np\n'), (844, 'numpy.logical_not', 'np.logical_not', (['use_shortcut'], {}), True, 'import numpy as np\n'), (864, 'numpy.all', 'np.all', (['(right_index - left_index <= 1)'], {}), True, 'import numpy as np\n'), (871, 'numpy.all', 'np.all', (['(output >= 0)'], {}), True, 'import numpy as np\n'), (112, 'official.recommendation.constants.TRAIN_FOLDER_TEMPLATE.format', 'rconst.TRAIN_FOLDER_TEMPLATE.format', (['self._epochs_completed'], {}), True, 'from official.recommendation import constants as rconst\n'), (143, 'tensorflow.decode_raw', 'tf.decode_raw', (['features[movielens.USER_COLUMN]', 'rconst.USER_DTYPE'], {}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.decode_raw', 'tf.decode_raw', (['features[movielens.ITEM_COLUMN]', 'rconst.ITEM_DTYPE'], {}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['self.current_data_root'], {}), True, 'import tensorflow as tf\n'), (204, 'os.path.join', 'os.path.join', (['self.current_data_root', 'rconst.SHARD_TEMPLATE'], {}), False, 'import os\n'), (264, 'tensorflow.contrib.tpu.python.tpu.datasets.StreamingFilesDataset', 'StreamingFilesDataset', ([], {'files': 'file_pattern', 'worker_job': '"""worker"""', 'num_parallel_reads': 'rconst.NUM_FILE_SHARDS', 'num_epochs': '(1)', 'sloppy': '(not self._deterministic)'}), False, 'from tensorflow.contrib.tpu.python.tpu.datasets import StreamingFilesDataset\n'), (268, 'functools.partial', 'functools.partial', (['self._deserialize'], {'batch_size': 'batch_size'}), False, 'import functools\n'), (288, 'functools.partial', 'functools.partial', (['self.data_generator'], {'epochs_between_evals': 'epochs_between_evals'}), False, 'import functools\n'), (290, 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', ([], {'generator': 'data_generator', 'output_types': 'types', 'output_shapes': 'shapes'}), True, 'import tensorflow as tf\n'), (385, 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""ncf_"""'}), False, 'import tempfile\n'), (386, 'atexit.register', 'atexit.register', (['tf.gfile.DeleteRecursively'], {'dirname': 'self._shard_root'}), False, 'import atexit\n'), (456, 'official.recommendation.popen_helper.get_forkpool', 'popen_helper.get_forkpool', (['(3)'], {'closing': '(False)'}), False, 'from official.recommendation import popen_helper\n'), (458, 'official.recommendation.popen_helper.get_threadpool', 'popen_helper.get_threadpool', (['(1)'], {'closing': '(False)'}), False, 'from official.recommendation import popen_helper\n'), (497, 'numpy.zeros', 'np.zeros', ([], {'shape': '(pad_length,)', 'dtype': 'labels.dtype'}), True, 'import numpy as np\n'), (498, 'numpy.concatenate', 'np.concatenate', (['[users, user_pad]'], {}), True, 'import numpy as np\n'), (499, 'numpy.concatenate', 'np.concatenate', (['[items, item_pad]'], {}), True, 'import numpy as np\n'), (500, 'numpy.concatenate', 'np.concatenate', (['[labels, label_pad]'], {}), True, 'import numpy as np\n'), (512, 'time.sleep', 'time.sleep', (['(0.01)'], {}), False, 'import time\n'), (566, 'numpy.zeros', 'np.zeros', ([], {'shape': '(pad_rows, users.shape[1])', 'dtype': 'np.int32'}), True, 'import numpy as np\n'), (657, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch_size]'], {'dtype': 'tf.int32', 'minval': '(0)', 'maxval': 'num_users'}), True, 'import tensorflow as tf\n'), (659, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch_size]'], {'dtype': 'tf.int32', 'minval': '(0)', 'maxval': 'num_items'}), True, 'import tensorflow as tf\n'), (737, 'numpy.iinfo', 'np.iinfo', (['rconst.ITEM_DTYPE'], {}), True, 'import numpy as np\n'), (751, 'numpy.delete', 'np.delete', (['full_set', 'positives'], {}), True, 'import numpy as np\n'), (797, 'numpy.arange', 'np.arange', (['self._num_users'], {}), True, 'import numpy as np\n'), (812, 'numpy.zeros', 'np.zeros', ([], {'shape': 'negative_users.shape', 'dtype': 'rconst.ITEM_DTYPE'}), True, 'import numpy as np\n'), (855, 'numpy.logical_not', 'np.logical_not', (['right_criteria'], {}), True, 'import numpy as np\n'), (100, 'threading.RLock', 'threading.RLock', ([], {}), False, 'import threading\n'), (156, 'tensorflow.decode_raw', 'tf.decode_raw', (['features[rconst.MASK_START_INDEX]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.range', 'tf.range', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (263, 'official.recommendation.constants.SHARD_TEMPLATE.format', 'rconst.SHARD_TEMPLATE.format', (['"""*"""'], {}), True, 'from official.recommendation import constants as rconst\n'), (274, 'tensorflow.TensorShape', 'tf.TensorShape', (['[batch_size]'], {}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.TensorShape', 'tf.TensorShape', (['[batch_size]'], {}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.TensorShape', 'tf.TensorShape', (['[batch_size]'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.TensorShape', 'tf.TensorShape', (['[batch_size]'], {}), True, 'import tensorflow as tf\n'), (449, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (451, 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), False, 'import sys\n'), (460, 'official.recommendation.stat_utils.random_int32', 'stat_utils.random_int32', ([], {}), False, 'from official.recommendation import stat_utils\n'), (495, 'numpy.arange', 'np.arange', (['pad_length'], {'dtype': 'users.dtype'}), True, 'import numpy as np\n'), (496, 'numpy.arange', 'np.arange', (['pad_length'], {'dtype': 'items.dtype'}), True, 'import numpy as np\n'), (505, 'numpy.array', 'np.array', (['mask_start_index'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (570, 'official.recommendation.stat_utils.mask_duplicates', 'stat_utils.mask_duplicates', (['items'], {'axis': '(1)'}), False, 'from official.recommendation import stat_utils\n'), (728, 'numpy.argwhere', 'np.argwhere', (['(self._train_pos_users[1:] - self._train_pos_users[:-1])'], {}), True, 'import numpy as np\n'), (738, 'numpy.iinfo', 'np.iinfo', (['rconst.ITEM_DTYPE'], {}), True, 'import numpy as np\n'), (790, 'numpy.argwhere', 'np.argwhere', (['(self._train_pos_users[1:] - self._train_pos_users[:-1])'], {}), True, 'import numpy as np\n'), (152, 'tensorflow.decode_raw', 'tf.decode_raw', (['data_bytes', 'tf.int8'], {}), True, 'import tensorflow as tf\n'), (196, 'numpy.arange', 'np.arange', (['batch_size'], {}), True, 'import numpy as np\n'), (282, 'tensorflow.TensorShape', 'tf.TensorShape', (['[batch_size]'], {}), True, 'import tensorflow as tf\n'), (514, 'numpy.log10', 'np.log10', (['count'], {}), True, 'import numpy as np\n'), (536, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (618, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (663, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch_size]'], {'dtype': 'tf.int32', 'minval': '(0)', 'maxval': '(2)'}), True, 'import tensorflow as tf\n'), (665, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch_size]'], {'dtype': 'tf.int32', 'minval': '(0)', 'maxval': '(2)'}), True, 'import tensorflow as tf\n'), (673, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch_size]'], {'dtype': 'tf.int32', 'minval': '(0)', 'maxval': '(2)'}), True, 'import tensorflow as tf\n'), (681, 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (['data'], {}), True, 'import tensorflow as tf\n'), (756, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (809, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (130, 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature_dict'}), True, 'import tensorflow as tf\n'), (514, 'numpy.log10', 'np.log10', (['count'], {}), True, 'import numpy as np\n'), (850, 'numpy.log2', 'np.log2', (['num_positives[not_use_shortcut]'], {}), True, 'import numpy as np\n')]
jasonplato/RL_for_AutonomousGreenhouse
e814f3dc42a9ae684a1a6198c31dc900a8636d34
import multiprocessing # 多线程模块 import threading # 线程模块 import queue import tensorflow as tf import numpy as np import gym import os import shutil # 拷贝文件用 import matplotlib.pyplot as plt from FeudalBatchProcessor import FeudalBatchProcessor import policy_utils from LSTMmodel import SingleStepLSTM Game = 'CartPole-v0' N_workers = multiprocessing.cpu_count() # 独立玩家个体数为cpu数 # MAX_GLOBAL_EP = 2000 # 中央大脑最大回合数 MAX_GLOBALE_STEP = 100000 # 中央大脑最大步数 GLOBAL_NET_SCOPE = 'Global_Net' # 中央大脑的名字 UPDATE_GLOBALE_ITER = 10 # 中央大脑每N次提升一次 GAMMA = 0.9 # 衰减度 LR_A = 0.0001 # Actor网络学习率 LR_C = 0.001 # Critic 网络学习率 beta_start = 0.01 beta_end = 0.001 decay_steps = 50000 GLOBALE_RUNNING_R = [] # 存储总的reward # GLOBALE_EP = 0 # 中央大脑步数 GLOBALE_STEP = 0 # 中央大脑步数 env = gym.make(Game) # 定义游戏环境 N_S = env.observation_space.shape[0] # 观测值个数 N_A = env.action_space.n # 行为值个数 class ACnet(object): # 这个class即可用于生产global net,也可生成 worker net,因为结构相同 def __init__(self, scope, globalAC=None, global_step=None): # scope 用于确定生成什么网络 # global GLOBALE_STEP # self.global_step = GLOBALE_STEP if scope == GLOBAL_NET_SCOPE: # 创建中央大脑 with tf.variable_scope(scope): self.global_step = tf.get_variable("global_step", [], tf.int32, initializer=tf.constant_initializer(0, dtype=tf.int32), trainable=False) self.obs_space = N_S self.act_space = N_A self.k = 16 self.g_dim = 256 self.c = 10 self.vf_hidden_size = 128 # for value function network self.alpha = 0.5 # for build loss self.batch_processor = FeudalBatchProcessor(self.c) self.build_model() # build feudal policy model else: # 创建worker两个网络的具体步骤 with tf.variable_scope(scope): # 这里的scope传入的是worker的名字 self.global_step = globalAC.global_step self.obs_space = N_S self.act_space = N_A self.k = 16 self.g_dim = 256 self.c = 10 self.vf_hidden_size = 128 # for value function network self.alpha = 0.5 # for build loss self.batch_processor = FeudalBatchProcessor(self.c) self.build_model() # build feudal policy model with tf.name_scope('local_grad'): grads = tf.gradients(self.loss, self.var_list) grads, _ = tf.clip_by_global_norm(grads, 40) with tf.name_scope('sync'): # worker和global的同步过程 with tf.name_scope('pull'): # 获取global参数,复制到local—net self.pull_params_op = tf.group(*[v1.assign(v2) for v1, v2 in zip(self.var_list, globalAC.var_list)]) with tf.name_scope('push'): # 将参数传送到gloabl中去 self.update_params_op = OPT.apply_gradients(zip(grads, globalAC.var_list)) # 其中传送的是local—net的actor和critic的参数梯度grads,具体计算在上面定义 # apply_gradients是tf.train.Optimizer中自带的功能函数,将求得的梯度参数更新到global中 self.inc_step = self.global_step.assign_add(tf.shape(self.obs)[0]) self.train_op = tf.group(self.update_params_op, self.inc_step) # GLOBALE_STEP += tf.shape(self.obs)[0] def build_model(self): """ Builds the manager and worker models. """ with tf.variable_scope('FeUdal'): self.build_placeholders() self.build_perception() self.build_manager() self.build_worker() self.build_loss() self.var_list = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name) # for v in self.var_list: # print v.name self.state_in = [self.worker_lstm.state_in[0], self.worker_lstm.state_in[1], self.manager_lstm.state_in[0], self.manager_lstm.state_in[1] ] self.state_out = [self.worker_lstm.state_out[0], self.worker_lstm.state_out[1], self.manager_lstm.state_out[0], self.manager_lstm.state_out[1] ] # for v in self.var_list: # print v def build_placeholders(self): # standard for all policies self.obs = tf.placeholder(tf.float32, [None, self.obs_space]) # ! self.obs = tf.placeholder(tf.float32, [None] + list(self.obs_space)) # ! self.obs_space = env.observation_space.shape self.r = tf.placeholder(tf.float32, (None,1)) self.ac = tf.placeholder(tf.float32, (None, self.act_space)) self.adv = tf.placeholder(tf.float32, [None]) # unused # specific to FeUdal self.prev_g = tf.placeholder(tf.float32, (None, None, self.g_dim)) self.ri = tf.placeholder(tf.float32, (None,)) self.s_diff = tf.placeholder(tf.float32, (None, self.g_dim)) def build_perception(self): self._obs = tf.expand_dims(self.obs, -1) # ! self._obs = tf.expand_dims(self._obs, -1) # ! conv1 = tf.layers.conv2d(inputs=self._obs, filters=16, kernel_size=[2, 1], # ! kernel_size = [8,8] activation=tf.nn.elu, strides=1) # ! strides = 4 conv2 = tf.layers.conv2d(inputs=conv1, filters=32, kernel_size=[2, 1], # ! kernel_size = [4,4] activation=tf.nn.elu, strides=1) # ! strides = 2 flattened_filters = policy_utils.flatten(conv2) self.z = tf.layers.dense(inputs=flattened_filters, units=256, activation=tf.nn.elu) def build_manager(self): with tf.variable_scope('manager'): # Calculate manager internal state self.s = tf.layers.dense(inputs=self.z, units=self.g_dim, activation=tf.nn.elu) # Calculate manager output g x = tf.expand_dims(self.s, [0]) self.manager_lstm = SingleStepLSTM(x, self.g_dim, step_size=tf.shape(self.obs)[:1]) g_hat = self.manager_lstm.output self.g = tf.nn.l2_normalize(g_hat, dim=1) self.manager_vf = self.build_value(g_hat) def build_worker(self): with tf.variable_scope('worker'): num_acts = self.act_space # Calculate U self.worker_lstm = SingleStepLSTM(tf.expand_dims(self.z, [0]), size=num_acts * self.k, step_size=tf.shape(self.obs)[:1]) flat_logits = self.worker_lstm.output self.worker_vf = self.build_value(flat_logits) U = tf.reshape(flat_logits, [-1, num_acts, self.k]) # Calculate w cut_g = tf.stop_gradient(self.g) cut_g = tf.expand_dims(cut_g, [1]) gstack = tf.concat([self.prev_g, cut_g], axis=1) self.last_c_g = gstack[:, 1:] # print self.last_c_g gsum = tf.reduce_sum(gstack, axis=1) phi = tf.get_variable("phi", (self.g_dim, self.k)) w = tf.matmul(gsum, phi) w = tf.expand_dims(w, [2]) # Calculate policy and sample logits = tf.reshape(tf.matmul(U, w), [-1, num_acts]) self.pi = tf.nn.softmax(logits) self.log_pi = tf.nn.log_softmax(logits) self.sample = policy_utils.categorical_sample( tf.reshape(logits, [-1, num_acts]), num_acts)[0, :] def build_value(self, _input): with tf.variable_scope('VF'): hidden = tf.layers.dense(inputs=_input, units=self.vf_hidden_size, activation=tf.nn.elu) w = tf.get_variable("weights", (self.vf_hidden_size, 1)) return tf.matmul(hidden, w) def build_loss(self): cutoff_vf_manager = tf.reshape(tf.stop_gradient(self.manager_vf), [-1]) dot = tf.reduce_sum(tf.multiply(self.s_diff, self.g), axis=1) gcut = tf.stop_gradient(self.g) mag = tf.norm(self.s_diff, axis=1) * tf.norm(gcut, axis=1) + .0001 dcos = dot / mag manager_loss = -tf.reduce_sum((self.r - cutoff_vf_manager) * dcos) cutoff_vf_worker = tf.reshape(tf.stop_gradient(self.worker_vf), [-1]) log_p = tf.reduce_sum(self.log_pi * self.ac, [1]) worker_loss = (self.r + self.alpha * self.ri - cutoff_vf_worker) * log_p worker_loss = -tf.reduce_sum(worker_loss, axis=0) Am = self.r - self.manager_vf manager_vf_loss = .5 * tf.reduce_sum(tf.square(Am)) Aw = (self.r + self.alpha * self.ri) - self.worker_vf worker_vf_loss = .5 * tf.reduce_sum(tf.square(Aw)) entropy = -tf.reduce_sum(self.pi * self.log_pi) beta = tf.train.polynomial_decay(beta_start, self.global_step, end_learning_rate=beta_end, decay_steps=decay_steps, power=1) # worker_loss = tf.Print(worker_loss,[manager_loss,worker_loss,manager_vf_loss,worker_vf_loss,entropy]) self.loss = worker_loss + manager_loss + \ worker_vf_loss + manager_vf_loss - \ entropy * beta def update_global(self, feed_dict): # 定义更新global参数函数 SESS.run([self.update_params_op], feed_dict) # 分别更新actor和critic网络 def pull_global(self): # 定义更新local参数函数 SESS.run([self.pull_params_op]) def action(self, ob, g, cw, hw, cm, hm): # 定义选择动作函数 # ob = ob[np.newaxis, :] ob = ob.reshape([-1, self.obs_space]) return SESS.run([self.sample, self.manager_vf, self.g, self.s, self.last_c_g] + self.state_out, feed_dict={self.obs: ob, self.state_in[0]: cw, self.state_in[1]: hw, self.state_in[2]: cm, self.state_in[3]: hm, self.prev_g: g}) # return np.random.choice(range(probs.shape[1]), p=probs.ravel()) # 从probs中按概率选取出某一个动作 def value(self, ob, g, cw, hw, cm, hm): sess = tf.get_default_session() return sess.run(self.manager_vf, {self.obs: [ob], self.state_in[0]: cw, self.state_in[1]: hw, self.state_in[2]: cm, self.state_in[3]: hm, self.prev_g: g})[0] def get_initial_features(self): return np.zeros((1, 1, self.g_dim), np.float32), self.worker_lstm.state_init + self.manager_lstm.state_init def update_batch(self, batch): return self.batch_processor.process_batch(batch) class Worker(object): def __init__(self, name, globalAC): # 传入的name是worker的名字,globalAC是已经建立好的中央大脑GLOBALE—AC self.env = gym.make(Game).unwrapped self.name = name # worker的名字 self.global_AC = globalAC self.local_AC = ACnet(scope=name, globalAC=globalAC) # 第二个参数当传入的是已经建立好的GLOBALE—AC时创建的是local net # 建立worker的AC网络 self.runner = policy_utils.RunnerThread(self.env, self.local_AC, 20, visualise=0) def pull_batch_from_queue(self): """ self explanatory: take a rollout from the queue of the thread runner. """ rollout = self.runner.queue.get(timeout=600.0) while not rollout.terminal: try: rollout.extend(self.runner.queue.get_nowait()) except queue.Empty: break return rollout def start(self, sess, summary_writer): self.runner.start_runner(sess, summary_writer) def work(self): # 定义worker运行的的具体过程 global GLOBALE_STEP, MAX_GLOBALE_STEP # global GLOBALE_RUNNING_R, GLOBALE_EP # 两个全局变量,R是所有worker的总reward,ep是所有worker的总episode # total_step = 1 # 本worker的总步数 # buffer_s, buffer_a, buffer_r = [], [], [] # state,action,reward的缓存 SESS.run(self.local_AC.pull_params_op) self.start(SESS, summary_writer=0) global_step = SESS.run(self.global_AC.global_step) # print(type(GLOBALE_STEP < MAX_GLOBALE_STEP)) while not COORD.should_stop() and global_step < MAX_GLOBALE_STEP: # 停止本worker运行的条件 # 本循环一次是一个回合 # s = self.env.reset() # 初始化环境 if self.name == 'W_0': # 只有worker0才将动画图像显示 self.env.render() ep_r = 0 # 本回合总的reward SESS.run(self.local_AC.pull_params_op) rollout = self.pull_batch_from_queue() batch = policy_utils.process_rollout(rollout, gamma=.99) batch = self.local_AC.update_batch(batch) # batch.ri = [item for sublist in batch.ri for item in sublist] # returns = [item for sublist in batch.returns for item in sublist] # batch._replace(returns=returns) # print("batch.returns.shape:",batch.returns.shape) # print("batch.ri.shape:",batch.ri.le) fetches = [self.local_AC.train_op] feed_dict = { self.local_AC.obs: batch.obs, self.global_AC.obs: batch.obs, self.local_AC.ac: batch.a, self.global_AC.ac: batch.a, self.local_AC.r: batch.returns, self.global_AC.r: batch.returns, self.local_AC.s_diff: batch.s_diff, self.global_AC.s_diff: batch.s_diff, self.local_AC.prev_g: batch.gsum, self.global_AC.prev_g: batch.gsum, self.local_AC.ri: batch.ri, self.global_AC.ri: batch.ri } for i in range(len(self.local_AC.state_in)): feed_dict[self.local_AC.state_in[i]] = batch.features[i] feed_dict[self.global_AC.state_in[i]] = batch.features[i] fetched = SESS.run(fetches, feed_dict=feed_dict) # while True: # 本循环一次是一步 # if self.name == 'W_0': # 只有worker0才将动画图像显示 # self.env.render() # # fetched = self.AC.action(last_state, *last_features) # 将当前状态state传入AC网络选择动作action # action, value_, g, s, last_c_g, features = fetched[0], fetched[1], \ # fetched[2], fetched[3], \ # fetched[4], fetched[5:] # a = action.argmax() # state, reward, done, info = self.env.step(a) # 行动并获得新的状态和回报等信息 # rollout.add(last_state,action,reward,value_,g,s,done,last_features) # # if done: reward = -5 # 如果结束了,reward给一个惩罚数 # # ep_r += reward # 记录本回合总体reward # # buffer_s.append(s) # 将当前状态,行动和回报加入缓存 # # buffer_a.append(a) # # buffer_r.append(r) # last_state = state # last_features = features # if total_step % UPDATE_GLOBALE_ITER == 0 or done: # 每iter步完了或者或者到达终点了,进行同步sync操作 # if done: # v_s_ = 0 # 如果结束了,设定对未来的评价值为0 # else: # v_s_ = SESS.run(self.AC.v, feed_dict={self.AC.s: s_[np.newaxis, :]})[ # 0, 0] # 如果是中间步骤,则用AC网络分析下一个state的v评价 # # buffer_v_target = [] # for r in buffer_r[::-1]: # 将下一个state的v评价进行一个反向衰减传递得到每一步的v现实 # v_s_ = r + GAMMA * v_s_ # buffer_v_target.append(v_s_) # 将每一步的v现实都加入缓存中 # buffer_v_target.reverse() # 反向后,得到本系列操作每一步的v现实(v-target) # # buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack( # buffer_v_target) # # feed_dict = { # self.AC.obs: buffer_s, # 本次走过的所有状态,用于计算v估计 # self.AC.ac: buffer_a, # 本次进行过的所有操作,用于计算a—loss # self.AC.v: buffer_v_target # 走过的每一个state的v现实值,用于计算td # } # # self.AC.update_global(feed_dict) # update—global的具体过程在AC类中定义,feed-dict如上 # # buffer_s, buffer_a, buffer_r = [], [], [] # 清空缓存 # # self.AC.pull_global() # 从global—net提取出参数赋值给local—net # # s = s_ # 跳转到下一个状态 # total_step += 1 # 本回合总步数加1 # # if done: # 如果本回合结束了 # if len(GLOBALE_RUNNING_R) == 0: # 如果尚未记录总体running # GLOBALE_RUNNING_R.append(ep_r) # else: # GLOBALE_RUNNING_R.append(0.9 * GLOBALE_RUNNING_R[-1] + 0.1 * ep_r) # # print(self.name, 'EP:', GLOBALE_EP) # GLOBALE_EP += 1 # 加一回合 # break # 结束本回合 # global_step = SESS.run(self.global_AC.global_step) if __name__ == '__main__': SESS = tf.Session() with tf.device('/cpu:0'): OPT = tf.train.AdamOptimizer(1e-4) # 后续主要是使用该optimizer中的apply—gradients操作 # OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # 定义critic训练过程 GLOBAL_AC = ACnet(scope=GLOBAL_NET_SCOPE) # 创建中央大脑GLOBALE_AC,只创建结构(A和C的参数) workers = [] for i in range(N_workers): # N—workers等于cpu数量 i_name = 'W_%i' % i # worker name workers.append(Worker(name=i_name, globalAC=GLOBAL_AC)) # 创建独立的worker COORD = tf.train.Coordinator() # 多线程 SESS.run(tf.global_variables_initializer()) # 初始化所有参数 worker_threads = [] for worker in workers: # 并行过程 job = lambda: worker.work() # worker的工作目标,此处调用Worker类中的work t = threading.Thread(target=job) # 每一个线程完成一个worker的工作目标 t.start() # 启动每一个worker worker_threads.append(t) # 每一个worker的工作都加入thread中 COORD.join(worker_threads) # 合并几个worker,当每一个worker都运行完再继续后面步骤 plt.plot(np.arange(len(GLOBALE_RUNNING_R)), GLOBALE_RUNNING_R) # 绘制reward图像 plt.xlabel('step') plt.ylabel('Total moving reward') plt.show()
[ "tensorflow.device", "tensorflow.get_variable", "tensorflow.concat", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.train.AdamOptimizer", "tensorflow.group", "tensorflow.gradients", "tensorflow.layers.dense", "tensorflow.stop_gradient", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.square", "numpy.zeros", "tensorflow.nn.l2_normalize", "tensorflow.layers.conv2d", "tensorflow.matmul", "tensorflow.norm", "tensorflow.shape", "tensorflow.train.Coordinator", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "tensorflow.train.polynomial_decay", "tensorflow.get_default_session", "tensorflow.nn.softmax", "tensorflow.multiply", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.clip_by_global_norm", "tensorflow.variable_scope", "matplotlib.pyplot.xlabel", "tensorflow.get_variable_scope" ]
A3C.py
[(15, 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), False, 'import multiprocessing\n'), (31, 'gym.make', 'gym.make', (['Game'], {}), False, 'import gym\n'), (403, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.obs_space]'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 1)'], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.act_space)'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, None, self.g_dim)'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None,)'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, self.g_dim)'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.obs', '(-1)'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.expand_dims', 'tf.expand_dims', (['self._obs', '(-1)'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'self._obs', 'filters': '(16)', 'kernel_size': '[2, 1]', 'activation': 'tf.nn.elu', 'strides': '(1)'}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'conv1', 'filters': '(32)', 'kernel_size': '[2, 1]', 'activation': 'tf.nn.elu', 'strides': '(1)'}), True, 'import tensorflow as tf\n'), (141, 'policy_utils.flatten', 'policy_utils.flatten', (['conv2'], {}), False, 'import policy_utils\n'), (142, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'flattened_filters', 'units': '(256)', 'activation': 'tf.nn.elu'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.g'], {}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.log_pi * self.ac)', '[1]'], {}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['beta_start', 'self.global_step'], {'end_learning_rate': 'beta_end', 'decay_steps': 'decay_steps', 'power': '(1)'}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), True, 'import tensorflow as tf\n'), (270, 'policy_utils.RunnerThread', 'policy_utils.RunnerThread', (['self.env', 'self.local_AC', '(20)'], {'visualise': '(0)'}), False, 'import policy_utils\n'), (405, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (406, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), True, 'import tensorflow as tf\n'), (414, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (426, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (427, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total moving reward"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (428, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (82, 'tensorflow.group', 'tf.group', (['self.update_params_op', 'self.inc_step'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""FeUdal"""'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""manager"""'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.z', 'units': 'self.g_dim', 'activation': 'tf.nn.elu'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.s', '[0]'], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['g_hat'], {'dim': '(1)'}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""worker"""'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.reshape', 'tf.reshape', (['flat_logits', '[-1, num_acts, self.k]'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.g'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.expand_dims', 'tf.expand_dims', (['cut_g', '[1]'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.concat', 'tf.concat', (['[self.prev_g, cut_g]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['gstack'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.get_variable', 'tf.get_variable', (['"""phi"""', '(self.g_dim, self.k)'], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.matmul', 'tf.matmul', (['gsum', 'phi'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.expand_dims', 'tf.expand_dims', (['w', '[2]'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""VF"""'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': '_input', 'units': 'self.vf_hidden_size', 'activation': 'tf.nn.elu'}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.get_variable', 'tf.get_variable', (['"""weights"""', '(self.vf_hidden_size, 1)'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.matmul', 'tf.matmul', (['hidden', 'w'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.manager_vf'], {}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.multiply', 'tf.multiply', (['self.s_diff', 'self.g'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((self.r - cutoff_vf_manager) * dcos)'], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.worker_vf'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['worker_loss'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.pi * self.log_pi)'], {}), True, 'import tensorflow as tf\n'), (257, 'numpy.zeros', 'np.zeros', (['(1, 1, self.g_dim)', 'np.float32'], {}), True, 'import numpy as np\n'), (265, 'gym.make', 'gym.make', (['Game'], {}), False, 'import gym\n'), (305, 'policy_utils.process_rollout', 'policy_utils.process_rollout', (['rollout'], {'gamma': '(0.99)'}), False, 'import policy_utils\n'), (415, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (420, 'threading.Thread', 'threading.Thread', ([], {'target': 'job'}), False, 'import threading\n'), (42, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (53, 'FeudalBatchProcessor.FeudalBatchProcessor', 'FeudalBatchProcessor', (['self.c'], {}), False, 'from FeudalBatchProcessor import FeudalBatchProcessor\n'), (57, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (66, 'FeudalBatchProcessor.FeudalBatchProcessor', 'FeudalBatchProcessor', (['self.c'], {}), False, 'from FeudalBatchProcessor import FeudalBatchProcessor\n'), (69, 'tensorflow.name_scope', 'tf.name_scope', (['"""local_grad"""'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'self.var_list'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', '(40)'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.name_scope', 'tf.name_scope', (['"""sync"""'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.z', '[0]'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.matmul', 'tf.matmul', (['U', 'w'], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.norm', 'tf.norm', (['self.s_diff'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.norm', 'tf.norm', (['gcut'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.square', 'tf.square', (['Am'], {}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.square', 'tf.square', (['Aw'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.name_scope', 'tf.name_scope', (['"""pull"""'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.name_scope', 'tf.name_scope', (['"""push"""'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.shape', 'tf.shape', (['self.obs'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1, num_acts]'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.shape', 'tf.shape', (['self.obs'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.shape', 'tf.shape', (['self.obs'], {}), True, 'import tensorflow as tf\n')]
201419/Optimizer-PyTorch
5db2164fef8d419d4a1486c923f6835f54f0b091
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.framework import ops from tensorflow.python.training import optimizer import tensorflow as tf # Adapted from https://raw.githubusercontent.com/openai/iaf/master/tf_utils/adamax.py class OptimisticMirrorDescentOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, use_locking=False, name="OMD"): super(OptimisticMirrorDescentOptimizer, self).__init__(use_locking, name) self._lr = learning_rate # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) g_t = grad g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t return control_flow_ops.group(*[var_update, g_t]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.") class OptimisticAdamOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False, name="Adamirror"): super(OptimisticAdamOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._beta1 = beta1 self._beta2 = beta2 # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._beta1_t = None self._beta2_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1") self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2") def _create_slots(self, var_list): # Create slots for the first and second moments. for v in var_list: self._zeros_slot(v, "m", self._name) self._zeros_slot(v, "v", self._name) self._zeros_slot(v, "g", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype) if var.dtype.base_dtype == tf.float16: eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference. else: eps = 1e-8 v = self.get_slot(var, "v") v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad)) m = self.get_slot(var, "m") m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad) v_t_hat = tf.div(v_t, 1. - beta2_t) m_t_hat = tf.div(m_t, 1. - beta1_t) g_t = tf.div(m_t_hat, tf.sqrt(v_t_hat) + eps) g_t_1 = self.get_slot(var, "g") g_t = g_t_1.assign(g_t) var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) # Adam would be lr_t * g_t return control_flow_ops.group(*[var_update, m_t, v_t, g_t]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.") class RegularizeGradientDescentOptimizer(optimizer.Optimizer): def __init__(self, learning_rate=0.001, lambd=0.5, use_locking=False, name="RGD"): super(RegularizeGradientDescentOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._lambda = lambd # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._lambda_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._lambda_t = ops.convert_to_tensor(self._lambda, name="lambda") def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) lambda_t = math_ops.cast(self._lambda_t, var.dtype.base_dtype) g_t = grad var_update = state_ops.assign_sub(var, lr_t * (g_t - lambda_t * var) ) return control_flow_ops.group(*[var_update]) def _apply_sparse(self, grad, var): raise NotImplementedError("Sparse gradient updates are not supported.")
[ "tensorflow.python.ops.control_flow_ops.group", "tensorflow.div", "tensorflow.python.ops.state_ops.assign_sub", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.square", "tensorflow.sqrt", "tensorflow.python.ops.math_ops.cast" ]
optimistic.py
[(24, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._lr'], {'name': '"""learning_rate"""'}), False, 'from tensorflow.python.framework import ops\n'), (32, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._lr_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (38, 'tensorflow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', (['var', '(2.0 * lr_t * g_t - lr_t * g_t_1)'], {}), False, 'from tensorflow.python.ops import state_ops\n'), (40, 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['*[var_update, g_t]'], {}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (62, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._lr'], {'name': '"""learning_rate"""'}), False, 'from tensorflow.python.framework import ops\n'), (63, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._beta1'], {'name': '"""beta1"""'}), False, 'from tensorflow.python.framework import ops\n'), (64, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._beta2'], {'name': '"""beta2"""'}), False, 'from tensorflow.python.framework import ops\n'), (74, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._lr_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (75, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._beta1_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (76, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._beta2_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (86, 'tensorflow.div', 'tf.div', (['v_t', '(1.0 - beta2_t)'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.div', 'tf.div', (['m_t', '(1.0 - beta1_t)'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', (['var', '(2.0 * lr_t * g_t - lr_t * g_t_1)'], {}), False, 'from tensorflow.python.ops import state_ops\n'), (95, 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['*[var_update, m_t, v_t, g_t]'], {}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (111, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._lr'], {'name': '"""learning_rate"""'}), False, 'from tensorflow.python.framework import ops\n'), (112, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._lambda'], {'name': '"""lambda"""'}), False, 'from tensorflow.python.framework import ops\n'), (115, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._lr_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (116, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._lambda_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (119, 'tensorflow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', (['var', '(lr_t * (g_t - lambda_t * var))'], {}), False, 'from tensorflow.python.ops import state_ops\n'), (121, 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['*[var_update]'], {}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (89, 'tensorflow.sqrt', 'tf.sqrt', (['v_t_hat'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.square', 'tf.square', (['grad'], {}), True, 'import tensorflow as tf\n')]
ChrisMorter/trieste
229ebb8a308e970b2ad2f4a10429209099e3a4f8
# %% [markdown] # # Asynchronous Bayesian optimization with Trieste # # In this notebook we demonstrate Trieste's ability to perform asynchronous Bayesian optimisation, as is suitable for scenarios where the objective function can be run for several points in parallel but where observations might return back at different times. To avoid wasting resources waiting for the evaluation of the whole batch, we immediately request the next point asynchronously, taking into account points that are still being evaluated. Besides saving resources, asynchronous approach also can potentially [improve sample efficiency](https://arxiv.org/abs/1901.10452) in comparison with synchronous batch strategies, although this is highly dependent on the use case. # # To contrast this approach with regular [batch optimization](batch_optimization.ipynb), this notebook also shows how to run parallel synchronous batch approach. # %% # silence TF warnings and info messages, only print errors # https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" import tensorflow as tf tf.get_logger().setLevel("ERROR") import numpy as np import time import timeit # %% [markdown] # First, let's define a simple objective that will emulate evaluations taking variable time. We will be using a classic Bayesian optimisation benchmark function [Branin](https://www.sfu.ca/~ssurjano/branin.html) with a sleep call inserted in the middle of the calculation to emulate delay. Our sleep delay is a scaled sum of all input values to make sure delays are uneven. # %% from trieste.objectives import scaled_branin def objective(points, sleep=True): if points.shape[1] != 2: raise ValueError(f"Incorrect input shape, expected (*, 2), got {points.shape}") observations = [] for point in points: observation = scaled_branin(point) if sleep: # insert some artificial delay # increases linearly with the absolute value of points # which means our evaluations will take different time delay = 3 * np.sum(point) pid = os.getpid() print( f"Process {pid}: Objective: pretends like it's doing something for {delay:.2}s", flush=True, ) time.sleep(delay) observations.append(observation) return np.array(observations) # test the defined objective function objective(np.array([[0.1, 0.5]]), sleep=False) # %% [markdown] # As always, we need to prepare the model and some initial data to kick-start the optimization process. # %% from trieste.space import Box from trieste.data import Dataset from trieste.objectives import SCALED_BRANIN_MINIMUM search_space = Box([0, 0], [1, 1]) num_initial_points = 3 initial_query_points = search_space.sample(num_initial_points) initial_observations = objective(initial_query_points.numpy(), sleep=False) initial_data = Dataset( query_points=initial_query_points, observations=tf.constant(initial_observations, dtype=tf.float64), ) import gpflow from trieste.models.gpflow import GaussianProcessRegression def build_model(data): variance = tf.math.reduce_variance(data.observations) kernel = gpflow.kernels.RBF(variance=variance) gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5) gpflow.set_trainable(gpr.likelihood, False) return GaussianProcessRegression(gpr) # these imports will be used later for optimization from trieste.acquisition import LocalPenalizationAcquisitionFunction from trieste.acquisition.rule import AsynchronousGreedy, EfficientGlobalOptimization from trieste.ask_tell_optimization import AskTellOptimizer # %% [markdown] # ## Multiprocessing setup # # To keep this notebook as reproducible as possible, we will only be using Python's multiprocessing package here. In this section we will explain our setup and define some common code to be used later. # # In both synchronous and asynchronous scenarios we will have a fixed set of worker processes performing observations. We will also have a main process responsible for optimization process with Trieste. When Trieste suggests a new point, it is inserted into a points queue. One of the workers picks this point from the queue, performs the observation, and inserts the output into the observations queue. The main process then picks up the observation from the queue, at which moment it either waits for the rest of the points in the batch to come back (synchronous scenario) or immediately suggests a new point (asynchronous scenario). This process continues either for a certain number of iterations or until we accumulate necessary number of observations. # # The overall setup is illustrated in this diagram: # ![multiprocessing setup](figures/async_bo.png) # %% # Necessary multiprocessing primitives from multiprocessing import Manager, Process # %% [markdown] # We now define several common functions to implement the described setup. First we define a worker function that will be running a single observation in a separate process. Worker takes both queues as an input, reads next point from the points queue, makes an observation, and inserts observed data into the observations queue. # %% def observer_proc(points_queue, observations_queue): pid = os.getpid() while True: point_to_observe = points_queue.get() if point_to_observe is None: return print(f"Process {pid}: Observer : observing data at point {point_to_observe}", flush=True) new_observation = objective(point_to_observe, sleep=enable_sleep_delays) new_data = (point_to_observe, new_observation) print(f"Process {pid}: Observer : observed data {new_data}", flush=True) observations_queue.put(new_data) # %% [markdown] # Next we define two helper functions, one is to create a certain number of worker processes, and another is to terminate them once we are done. # %% def create_worker_processes(n_workers, points_queue, obseverations_queue): observer_processes = [] for i in range(n_workers): worker_proc = Process(target=observer_proc, args=(points_queue, obseverations_queue)) worker_proc.daemon = True worker_proc.start() observer_processes.append(worker_proc) return observer_processes def terminate_processes(processes): for prc in processes: prc.terminate() prc.join() prc.close() # %% [markdown] # Finally we set some common parameters. See comments below for explanation of what each one means. # %% # Number of worker processes to run simultaneously # Setting this to 1 will turn both setups into non-batch sequential optimization num_workers = 3 # Number of iterations to run the sycnhronous scenario for num_iterations = 10 # Number of observations to collect in the asynchronous scenario num_observations = num_workers * num_iterations # Set this flag to False to disable sleep delays in case you want the notebook to execute quickly enable_sleep_delays = True # %% [markdown] # ## Asynchronous optimization # This section runs the asynchronous optimization routine. We first setup the [ask/tell optimizer](ask_tell_optimization.ipynb) as we cannot hand over the evaluation of the objective to Trieste. Next we create thread-safe queues for points and observations, and run the optimization loop. # # Crucially, even though we are using batch acquisition function Local Penalization, we specify batch size of 1. This is because we don't really want a batch. Since the amount of workers we have is fixed, whenever we see a new observation we only need one point back. However this process can only be done with acquisition functions that implement greedy batch collection strategies, because they are able to take into account points that are currently being observed (in Trieste we call them "pending"). Trieste currently provides two such functions: Local Penalization and GIBBON. Notice that we use **AsynchronousGreedy** rule specifically designed for using greedy batch acquisition functions in asynchronous scenarios. # %% # setup Ask Tell BO model = build_model(initial_data) local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=2000) local_penalization_rule = AsynchronousGreedy(builder=local_penalization_acq) # type: ignore async_bo = AskTellOptimizer(search_space, initial_data, model, local_penalization_rule) # retrieve process id for nice logging pid = os.getpid() # create point and observation queues m = Manager() pq = m.Queue() oq = m.Queue() # keep track of all workers we have launched observer_processes = [] # counter to keep track of collected observations points_observed = 0 start = timeit.default_timer() try: observer_processes = create_worker_processes(num_workers, pq, oq) # init the queue with first batch of points for _ in range(num_workers): point = async_bo.ask() pq.put(np.atleast_2d(point.numpy())) while points_observed < num_observations: # keep asking queue for new observations until one arrives try: new_data = oq.get_nowait() print(f"Process {pid}: Main : received data {new_data}", flush=True) except: continue # new_data is a tuple of (point, observation value) # here we turn it into a Dataset and tell of it Trieste points_observed += 1 new_data = Dataset( query_points=tf.constant(new_data[0], dtype=tf.float64), observations=tf.constant(new_data[1], dtype=tf.float64), ) async_bo.tell(new_data) # now we can ask Trieste for one more point # and feed that back into the points queue point = async_bo.ask() print(f"Process {pid}: Main : acquired point {point}", flush=True) pq.put(np.atleast_2d(point)) finally: terminate_processes(observer_processes) stop = timeit.default_timer() # Collect the observations, compute the running time async_lp_observations = async_bo.to_result().try_get_final_dataset().observations - SCALED_BRANIN_MINIMUM async_lp_time = stop - start print(f"Got {len(async_lp_observations)} observations in {async_lp_time:.2f}s") # %% [markdown] # ## Synchronous parallel optimization # # This section runs the synchronous parallel optimization with Trieste. We again use Local Penalization acquisition function, but this time with batch size equal to the number of workers we have available. Once Trieste suggests the batch, we add all points to the point queue, and workers immediatelly pick them up, one point per worker. Therefore all points in the batch are evaluated in parallel. # %% # setup Ask Tell BO model = build_model(initial_data) local_penalization_acq = LocalPenalizationAcquisitionFunction(search_space, num_samples=2000) local_penalization_rule = EfficientGlobalOptimization( # type: ignore num_query_points=num_workers, builder=local_penalization_acq ) sync_bo = AskTellOptimizer(search_space, initial_data, model, local_penalization_rule) # retrieve process id for nice logging pid = os.getpid() # create point and observation queues m = Manager() pq = m.Queue() oq = m.Queue() # keep track of all workers we have launched observer_processes = [] start = timeit.default_timer() try: observer_processes = create_worker_processes(num_workers, pq, oq) # BO loop starts here for i in range(num_iterations): print(f"Process {pid}: Main : iteration {i} starts", flush=True) # get a batch of points from Trieste, send them to points queue # each worker picks up a point and processes it points = sync_bo.ask() for point in points.numpy(): pq.put(point.reshape(1, -1)) # reshape is to make point a 2d array # now we wait for all workers to finish # we create an empty dataset and wait # until we collected as many observations in it # as there were points in the batch all_new_data = Dataset( tf.zeros((0, initial_data.query_points.shape[1]), tf.float64), tf.zeros((0, initial_data.observations.shape[1]), tf.float64), ) while len(all_new_data) < num_workers: # this line blocks the process until new data is available in the queue new_data = oq.get() print(f"Process {pid}: Main : received data {new_data}", flush=True) new_data = Dataset( query_points=tf.constant(new_data[0], dtype=tf.float64), observations=tf.constant(new_data[1], dtype=tf.float64), ) all_new_data = all_new_data + new_data # tell Trieste of new batch of observations sync_bo.tell(all_new_data) finally: terminate_processes(observer_processes) stop = timeit.default_timer() # Collect the observations, compute the running time sync_lp_observations = ( sync_bo.to_result().try_get_final_dataset().observations - SCALED_BRANIN_MINIMUM ) sync_lp_time = stop - start print(f"Got {len(sync_lp_observations)} observations in {sync_lp_time:.2f}s") # %% [markdown] # ## Comparison # To compare outcomes of sync and async runs, let's plot their respective regrets side by side, and print out the running time. For this toy problem we expect async scenario to run a little bit faster on machines with multiple CPU. # %% from util.plotting import plot_regret import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2) sync_lp_min_idx = tf.squeeze(tf.argmin(sync_lp_observations, axis=0)) async_lp_min_idx = tf.squeeze(tf.argmin(async_lp_observations, axis=0)) plot_regret( sync_lp_observations.numpy(), ax[0], num_init=len(initial_data), idx_best=sync_lp_min_idx ) ax[0].set_yscale("log") ax[0].set_ylabel("Regret") ax[0].set_ylim(0.0000001, 100) ax[0].set_xlabel("# evaluations") ax[0].set_title(f"Sync LP, {len(sync_lp_observations)} points, time {sync_lp_time:.2f}") plot_regret( async_lp_observations.numpy(), ax[1], num_init=len(initial_data), idx_best=async_lp_min_idx ) ax[1].set_yscale("log") ax[1].set_ylabel("Regret") ax[1].set_ylim(0.0000001, 100) ax[1].set_xlabel("# evaluations") ax[1].set_title(f"Async LP, {len(async_lp_observations)} points, time {async_lp_time:.2f}s") fig.tight_layout()
[ "tensorflow.constant", "tensorflow.zeros", "matplotlib.pyplot.subplots", "tensorflow.get_logger", "tensorflow.math.reduce_variance", "numpy.atleast_2d", "numpy.array", "numpy.sum", "tensorflow.argmin" ]
docs/notebooks/asynchronous_greedy_multiprocessing.pct.py
[(58, 'trieste.space.Box', 'Box', (['[0, 0]', '[1, 1]'], {}), False, 'from trieste.space import Box\n'), (165, 'trieste.acquisition.LocalPenalizationAcquisitionFunction', 'LocalPenalizationAcquisitionFunction', (['search_space'], {'num_samples': '(2000)'}), False, 'from trieste.acquisition import LocalPenalizationAcquisitionFunction\n'), (166, 'trieste.acquisition.rule.AsynchronousGreedy', 'AsynchronousGreedy', ([], {'builder': 'local_penalization_acq'}), False, 'from trieste.acquisition.rule import AsynchronousGreedy, EfficientGlobalOptimization\n'), (168, 'trieste.ask_tell_optimization.AskTellOptimizer', 'AskTellOptimizer', (['search_space', 'initial_data', 'model', 'local_penalization_rule'], {}), False, 'from trieste.ask_tell_optimization import AskTellOptimizer\n'), (171, 'os.getpid', 'os.getpid', ([], {}), False, 'import os\n'), (173, 'multiprocessing.Manager', 'Manager', ([], {}), False, 'from multiprocessing import Manager, Process\n'), (181, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (214, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (230, 'trieste.acquisition.LocalPenalizationAcquisitionFunction', 'LocalPenalizationAcquisitionFunction', (['search_space'], {'num_samples': '(2000)'}), False, 'from trieste.acquisition import LocalPenalizationAcquisitionFunction\n'), (231, 'trieste.acquisition.rule.EfficientGlobalOptimization', 'EfficientGlobalOptimization', ([], {'num_query_points': 'num_workers', 'builder': 'local_penalization_acq'}), False, 'from trieste.acquisition.rule import AsynchronousGreedy, EfficientGlobalOptimization\n'), (235, 'trieste.ask_tell_optimization.AskTellOptimizer', 'AskTellOptimizer', (['search_space', 'initial_data', 'model', 'local_penalization_rule'], {}), False, 'from trieste.ask_tell_optimization import AskTellOptimizer\n'), (239, 'os.getpid', 'os.getpid', ([], {}), False, 'import os\n'), (241, 'multiprocessing.Manager', 'Manager', ([], {}), False, 'from multiprocessing import Manager, Process\n'), (247, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (286, 'timeit.default_timer', 'timeit.default_timer', ([], {}), False, 'import timeit\n'), (304, 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), True, 'import matplotlib.pyplot as plt\n'), (45, 'numpy.array', 'np.array', (['observations'], {}), True, 'import numpy as np\n'), (48, 'numpy.array', 'np.array', (['[[0.1, 0.5]]'], {}), True, 'import numpy as np\n'), (72, 'tensorflow.math.reduce_variance', 'tf.math.reduce_variance', (['data.observations'], {}), True, 'import tensorflow as tf\n'), (73, 'gpflow.kernels.RBF', 'gpflow.kernels.RBF', ([], {'variance': 'variance'}), False, 'import gpflow\n'), (75, 'gpflow.set_trainable', 'gpflow.set_trainable', (['gpr.likelihood', '(False)'], {}), False, 'import gpflow\n'), (76, 'trieste.models.gpflow.GaussianProcessRegression', 'GaussianProcessRegression', (['gpr'], {}), False, 'from trieste.models.gpflow import GaussianProcessRegression\n'), (104, 'os.getpid', 'os.getpid', ([], {}), False, 'import os\n'), (306, 'tensorflow.argmin', 'tf.argmin', (['sync_lp_observations'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.argmin', 'tf.argmin', (['async_lp_observations'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (31, 'trieste.objectives.scaled_branin', 'scaled_branin', (['point'], {}), False, 'from trieste.objectives import scaled_branin\n'), (64, 'tensorflow.constant', 'tf.constant', (['initial_observations'], {'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (127, 'multiprocessing.Process', 'Process', ([], {'target': 'observer_proc', 'args': '(points_queue, obseverations_queue)'}), False, 'from multiprocessing import Manager, Process\n'), (37, 'os.getpid', 'os.getpid', ([], {}), False, 'import os\n'), (42, 'time.sleep', 'time.sleep', (['delay'], {}), False, 'import time\n'), (211, 'numpy.atleast_2d', 'np.atleast_2d', (['point'], {}), True, 'import numpy as np\n'), (266, 'tensorflow.zeros', 'tf.zeros', (['(0, initial_data.query_points.shape[1])', 'tf.float64'], {}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.zeros', 'tf.zeros', (['(0, initial_data.observations.shape[1])', 'tf.float64'], {}), True, 'import tensorflow as tf\n'), (36, 'numpy.sum', 'np.sum', (['point'], {}), True, 'import numpy as np\n'), (202, 'tensorflow.constant', 'tf.constant', (['new_data[0]'], {'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.constant', 'tf.constant', (['new_data[1]'], {'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.constant', 'tf.constant', (['new_data[0]'], {'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.constant', 'tf.constant', (['new_data[1]'], {'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n')]
Briggybros/Uni-Deep-Learning
5225130435356f1d7fc4c8bdbb3dcc34f9bef964
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import os.path import tensorflow as tf sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'labsheets', 'CIFAR10')) import cifar10 as cf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('data-dir', os.getcwd() + '/dataset/', 'Directory where the dataset will be stored and checkpoint. (default: %(default)s)') tf.app.flags.DEFINE_integer('max-steps', 10000, 'Number of mini-batches to train on. (default: %(default)d)') tf.app.flags.DEFINE_integer('log-frequency', 10, 'Number of steps between logging results to the console and saving summaries (default: %(default)d)') tf.app.flags.DEFINE_integer('save-model', 1000, 'Number of steps between model saves (default: %(default)d)') # Optimisation hyperparameters tf.app.flags.DEFINE_integer('batch-size', 256, 'Number of examples per mini-batch (default: %(default)d)') tf.app.flags.DEFINE_float('learning-rate', 1e-4, 'Learning rate (default: %(default)d)') tf.app.flags.DEFINE_integer('img-width', 32, 'Image width (default: %(default)d)') tf.app.flags.DEFINE_integer('img-height', 32, 'Image height (default: %(default)d)') tf.app.flags.DEFINE_integer('img-channels', 3, 'Image channels (default: %(default)d)') tf.app.flags.DEFINE_integer('num-classes', 10, 'Number of classes (default: %(default)d)') tf.app.flags.DEFINE_string('log-dir', '{cwd}/logs/'.format(cwd=os.getcwd()), 'Directory where to write event logs and checkpoint. (default: %(default)s)') run_log_dir = os.path.join(FLAGS.log_dir, 'exp_BN_bs_{bs}_lr_{lr}_aug_flip_brightness'.format(bs=FLAGS.batch_size, lr=FLAGS.learning_rate)) def weight_variable(shape): """weight_variable generates a weight variable of a given shape.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial, name='weights') def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial, name='biases') def deepnn(x, train): """deepnn builds the graph for a deep net for classifying CIFAR10 images. Args: x: an input tensor with the dimensions (N_examples, 3072), where 3072 is the number of pixels in a standard CIFAR10 image. Returns: y: is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the object images into one of 10 classes (airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck) img_summary: a string tensor containing sampled input images. """ # Reshape to use within a convolutional neural net. Last dimension is for # 'features' - it would be 1 one for a grayscale image, 3 for an RGB image, # 4 for RGBA, etc. x_image = tf.reshape(x, [-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels]) x_image = tf.cond(train, lambda: tf.map_fn(tf.image.random_flip_left_right, x_image), lambda: x_image) x_image = tf.cond(train, lambda: tf.map_fn(lambda x: tf.image.random_brightness(x, 0.5), x_image), lambda: x_image) img_summary = tf.summary.image('Input_images', x_image) # First convolutional layer - maps one image to 32 feature maps. with tf.variable_scope('Conv_1'): conv1 = tf.layers.conv2d( inputs=x_image, filters=32, kernel_size=[5,5], padding='same', use_bias=False, name='conv1' ) conv1_bn = tf.nn.relu(tf.layers.batch_normalization(conv1, training=train)) pool1 = tf.layers.max_pooling2d( inputs=conv1_bn, pool_size=[2, 2], strides=2, name='pool1' ) conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5,5], padding='same', use_bias=False, name='conv2' ) conv2_bn = tf.nn.relu(tf.layers.batch_normalization(conv2, training=train)) pool2 = tf.layers.max_pooling2d( inputs=conv2_bn, pool_size=[2, 2], strides=2, name='pool2' ) v = tf.reshape(pool2, [-1, 4096]) fc1 = tf.layers.dense( inputs=v, units=1024, activation=tf.nn.relu, use_bias=True, name='fc1' ) fc2 = tf.layers.dense( inputs=fc1, units=1024, activation=tf.nn.relu, use_bias=True, name='fc2' ) out = tf.layers.dense( inputs=fc2, units=10, activation=None, use_bias=False, name='out' ) return out, img_summary def main(_): tf.reset_default_graph() # Import data cifar = cf.cifar10(batchSize=FLAGS.batch_size, downloadDir=FLAGS.data_dir) with tf.variable_scope('inputs'): # Create the model x = tf.placeholder(tf.float32, [None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels]) # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, FLAGS.num_classes]) # Whether model is training train = tf.placeholder(tf.bool, []) # Build the graph for the deep net y_conv, img_summary = deepnn(x, train) # Define your loss function - softmax_cross_entropy with tf.variable_scope('x_entropy'): cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) # Define your AdamOptimiser, using FLAGS.learning_rate to minimixe the loss function decayed_learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, tf.Variable(0, trainable=False), 1000, 0.8) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): optimiser = tf.train.AdamOptimizer(decayed_learning_rate, name="Adam").minimize(cross_entropy) # calculate the prediction and the accuracy accuracy, acc_op = tf.metrics.accuracy(labels=tf.argmax(y_, axis=1), predictions=tf.argmax(y_conv, axis=1)) loss_summary = tf.summary.scalar('Loss', cross_entropy) acc_summary = tf.summary.scalar('Accuracy', accuracy) # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) # Training and validation for step in range(FLAGS.max_steps): # Training: Backpropagation using train set (trainImages, trainLabels) = cifar.getTrainBatch() (testImages, testLabels) = cifar.getTestBatch() _, summary_str = sess.run([optimiser, training_summary], feed_dict={x: trainImages, y_: trainLabels, train: True}) if step % (FLAGS.log_frequency + 1) == 0: summary_writer.add_summary(summary_str, step) ## Validation: Monitoring accuracy using validation set if step % FLAGS.log_frequency == 0: accuracy, summary_str = sess.run([acc_op, validation_summary], feed_dict={x: testImages, y_: testLabels, train: False}) print('step %d, accuracy on validation batch: %g' % (step, accuracy)) summary_writer_validation.add_summary(summary_str, step) ## Save the model checkpoint periodically. if step % FLAGS.save_model == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(run_log_dir + '_train', 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step) # Testing # resetting the internal batch indexes cifar.reset() evaluated_images = 0 test_accuracy = 0 batch_count = 0 # don't loop back when we reach the end of the test set while evaluated_images != cifar.nTestSamples: (testImages, testLabels) = cifar.getTestBatch(allowSmallerBatches=True) test_accuracy_temp, _ = sess.run([acc_op, test_summary], feed_dict={x: testImages, y_: testLabels, train: False}) batch_count = batch_count + 1 test_accuracy = test_accuracy + test_accuracy_temp evaluated_images = evaluated_images + testLabels.shape[0] test_accuracy = test_accuracy / batch_count print('test set: accuracy on test set: %0.3f' % test_accuracy) if __name__ == '__main__': tf.app.run(main=main)
[ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.control_dependencies", "tensorflow.global_variables", "tensorflow.map_fn", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.layers.batch_normalization", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.summary.image", "tensorflow.app.flags.DEFINE_integer", "tensorflow.layers.dense", "tensorflow.reset_default_graph", "tensorflow.Session", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.layers.conv2d", "tensorflow.image.random_brightness", "tensorflow.truncated_normal", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge", "tensorflow.constant", "tensorflow.summary.FileWriter", "tensorflow.local_variables_initializer", "tensorflow.reshape", "tensorflow.layers.max_pooling2d", "tensorflow.app.flags.DEFINE_float", "tensorflow.variable_scope" ]
Lab_4_gs15687/cifar_augment.py
[(19, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max-steps"""', '(10000)', '"""Number of mini-batches to train on. (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""log-frequency"""', '(10)', '"""Number of steps between logging results to the console and saving summaries (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save-model"""', '(1000)', '"""Number of steps between model saves (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch-size"""', '(256)', '"""Number of examples per mini-batch (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning-rate"""', '(0.0001)', '"""Learning rate (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""img-width"""', '(32)', '"""Image width (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""img-height"""', '(32)', '"""Image height (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""img-channels"""', '(3)', '"""Image channels (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num-classes"""', '(10)', '"""Number of classes (default: %(default)d)"""'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': '"""weights"""'}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': '"""biases"""'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, FLAGS.img_width, FLAGS.img_height, FLAGS.img_channels]'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.summary.image', 'tf.summary.image', (['"""Input_images"""', 'x_image'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (142, 'cifar10.cifar10', 'cf.cifar10', ([], {'batchSize': 'FLAGS.batch_size', 'downloadDir': 'FLAGS.data_dir'}), True, 'import cifar10 as cf\n'), (161, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'cross_entropy'], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'accuracy'], {}), True, 'import tensorflow as tf\n'), (172, 'tensorflow.summary.merge', 'tf.summary.merge', (['[img_summary, acc_summary]'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.summary.merge', 'tf.summary.merge', (['[img_summary, loss_summary]'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.summary.merge', 'tf.summary.merge', (['[img_summary, acc_summary]'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main'}), True, 'import tensorflow as tf\n'), (12, 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), False, 'import os\n'), (17, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (76, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Conv_1"""'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'x_image', 'filters': '(32)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""conv1"""'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1_bn', 'pool_size': '[2, 2]', 'strides': '(2)', 'name': '"""pool1"""'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'pool1', 'filters': '(64)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""conv2"""'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2_bn', 'pool_size': '[2, 2]', 'strides': '(2)', 'name': '"""pool2"""'}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.reshape', 'tf.reshape', (['pool2', '[-1, 4096]'], {}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'v', 'units': '(1024)', 'activation': 'tf.nn.relu', 'use_bias': '(True)', 'name': '"""fc1"""'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'fc1', 'units': '(1024)', 'activation': 'tf.nn.relu', 'use_bias': '(True)', 'name': '"""fc2"""'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'fc2', 'units': '(10)', 'activation': 'None', 'use_bias': '(False)', 'name': '"""out"""'}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""inputs"""'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, FLAGS.img_width * FLAGS.img_height * FLAGS.img_channels]'], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, FLAGS.num_classes]'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool', '[]'], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""x_entropy"""'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_train')", 'sess.graph'], {'flush_secs': '(5)'}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(run_log_dir + '_validate')", 'sess.graph'], {'flush_secs': '(5)'}), True, 'import tensorflow as tf\n'), (33, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (70, 'tensorflow.map_fn', 'tf.map_fn', (['tf.image.random_flip_left_right', 'x_image'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['conv1'], {'training': 'train'}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['conv2'], {'training': 'train'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'y_', 'logits': 'y_conv'}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.argmax', 'tf.argmax', (['y_'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.argmax', 'tf.argmax', (['y_conv'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['decayed_learning_rate'], {'name': '"""Adam"""'}), True, 'import tensorflow as tf\n'), (206, 'os.path.join', 'os.path.join', (["(run_log_dir + '_train')", '"""model.ckpt"""'], {}), False, 'import os\n'), (71, 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x', '(0.5)'], {}), True, 'import tensorflow as tf\n')]
KennyCandy/HAR
739ede1907374215cfc1dd6bd525d8d5b5f4606e
# Note that the dataset must be already downloaded for this script to work, do: # $ cd data/ # $ python download_dataset.py # quoc_trinh import tensorflow as tf import numpy as np import matplotlib import matplotlib.pyplot as plt from sklearn import metrics import os import sys import datetime # get current file_name as [0] of array file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] print(" File Name:") print(file_name) print("") # FLAG to know that whether this is traning process or not. FLAG = 'train' POOL_X = 16 POOL_Y = 18 N_HIDDEN_CONFIG = 32 save_path_name = file_name + "/model.ckpt" print(datetime.datetime.now()) # Write to file: time to start, type, time to end f = open(file_name + '/time.txt', 'a+') f.write("------------- \n") f.write("This is time \n") f.write("Started at \n") f.write(str(datetime.datetime.now())+'\n') if __name__ == "__main__": # ----------------------------- # step1: load and prepare data # ----------------------------- # Those are separate normalised input features for the neural network INPUT_SIGNAL_TYPES = [ "body_acc_x_", "body_acc_y_", "body_acc_z_", "body_gyro_x_", "body_gyro_y_", "body_gyro_z_", "total_acc_x_", "total_acc_y_", "total_acc_z_" ] # Output classes to learn how to classify LABELS = [ "WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING" ] DATA_PATH = "../data/" DATASET_PATH = DATA_PATH + "UCI HAR Dataset/" print("\n" + "Dataset is now located at: " + DATASET_PATH) # Preparing data set: TRAIN = "train/" TEST = "test/" # Load "X" (the neural network's training and testing inputs) def load_X(X_signals_paths): X_signals = [] for signal_type_path in X_signals_paths: file = open(signal_type_path, 'rb') # Read dataset from disk, dealing with text files' syntax X_signals.append( [np.array(serie, dtype=np.float32) for serie in [ row.replace(' ', ' ').strip().split(' ') for row in file ]] ) file.close() """Examples -------- >> > x = np.arange(4).reshape((2, 2)) >> > x array([[0, 1], [2, 3]]) >> > np.transpose(x) array([[0, 2], [1, 3]]) >> > x = np.ones((1, 2, 3)) >> > np.transpose(x, (1, 0, 2)).shape (2, 1, 3) """ return np.transpose(np.array(X_signals), (1, 2, 0)) X_train_signals_paths = [ DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES ] X_test_signals_paths = [ DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES ] X_train = load_X(X_train_signals_paths) # [7352, 128, 9] X_test = load_X(X_test_signals_paths) # [7352, 128, 9] # print(X_train) print(len(X_train)) # 7352 print(len(X_train[0])) # 128 print(len(X_train[0][0])) # 9 print(type(X_train)) X_train = np.reshape(X_train, [-1, 32, 36]) X_test = np.reshape(X_test, [-1, 32, 36]) print("-----------------X_train---------------") # print(X_train) print(len(X_train)) # 7352 print(len(X_train[0])) # 32 print(len(X_train[0][0])) # 36 print(type(X_train)) # exit() y_train_path = DATASET_PATH + TRAIN + "y_train.txt" y_test_path = DATASET_PATH + TEST + "y_test.txt" def one_hot(label): """convert label from dense to one hot argument: label: ndarray dense label ,shape: [sample_num,1] return: one_hot_label: ndarray one hot, shape: [sample_num,n_class] """ label_num = len(label) new_label = label.reshape(label_num) # shape : [sample_num] # because max is 5, and we will create 6 columns n_values = np.max(new_label) + 1 return np.eye(n_values)[np.array(new_label, dtype=np.int32)] # Load "y" (the neural network's training and testing outputs) def load_y(y_path): file = open(y_path, 'rb') # Read dataset from disk, dealing with text file's syntax y_ = np.array( [elem for elem in [ row.replace(' ', ' ').strip().split(' ') for row in file ]], dtype=np.int32 ) file.close() # Subtract 1 to each output class for friendly 0-based indexing return y_ - 1 y_train = one_hot(load_y(y_train_path)) y_test = one_hot(load_y(y_test_path)) print("---------y_train----------") # print(y_train) print(len(y_train)) # 7352 print(len(y_train[0])) # 6 # ----------------------------------- # step2: define parameters for model # ----------------------------------- class Config(object): """ define a class to store parameters, the input should be feature mat of training and testing """ def __init__(self, X_train, X_test): # Input data self.train_count = len(X_train) # 7352 training series self.test_data_count = len(X_test) # 2947 testing series self.n_steps = len(X_train[0]) # 128 time_steps per series # Training self.learning_rate = 0.0025 self.lambda_loss_amount = 0.0015 self.training_epochs = 300 self.batch_size = 1000 # LSTM structure self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network self.n_classes = 6 # Final output classes self.W = { 'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32] 'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6] } self.biases = { 'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32] 'output': tf.Variable(tf.random_normal([self.n_classes])) # [6] } config = Config(X_train, X_test) # print("Some useful info to get an insight on dataset's shape and normalisation:") # print("features shape, labels shape, each features mean, each features standard deviation") # print(X_test.shape, y_test.shape, # np.mean(X_test), np.std(X_test)) # print("the dataset is therefore properly normalised, as expected.") # # # ------------------------------------------------------ # step3: Let's get serious and build the neural network # ------------------------------------------------------ # [none, 128, 9] X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs]) # [none, 6] Y = tf.placeholder(tf.float32, [None, config.n_classes]) print("-------X Y----------") print(X) X = tf.reshape(X, shape=[-1, 32, 36]) print(X) print(Y) Y = tf.reshape(Y, shape=[-1, 6]) print(Y) # Weight Initialization def weight_variable(shape): # tra ve 1 gia tri random theo thuat toan truncated_ normal initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32) return tf.Variable(initial) def bias_varibale(shape): initial = tf.constant(0.1, shape=shape, name='Bias') return tf.Variable(initial) # Convolution and Pooling def conv2d(x, W): # Must have `strides[0] = strides[3] = 1 `. # For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `. return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d') def max_pool_2x2(x): return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='max_pool') def LSTM_Network(feature_mat, config): """model a LSTM Network, it stacks 2 LSTM layers, each layer has n_hidden=32 cells and 1 output layer, it is a full connet layer argument: feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs] config: class containing config of network return: : matrix output shape [batch_size,n_classes] """ W_conv1 = weight_variable([3, 3, 1, 32]) b_conv1 = bias_varibale([32]) # x_image = tf.reshape(x, shape=[-1, 28, 28, 1]) feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1]) print("----feature_mat_image-----") print(feature_mat_image.get_shape()) h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1) h_pool1 = h_conv1 # Second Convolutional Layer W_conv2 = weight_variable([3, 3, 32, 32]) b_conv2 = weight_variable([32]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = h_conv2 # Third Convolutional Layer W_conv3 = weight_variable([3, 3, 32, 32]) b_conv3 = weight_variable([32]) h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3) h_pool3 = max_pool_2x2(h_conv3) # Forth Convolutional Layer W_conv4 = weight_variable([3, 3, 32, 128]) b_conv4 = weight_variable([128]) h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4) h_pool4 = h_conv4 # Fifth Convolutional Layer W_conv5 = weight_variable([3, 3, 128, 1]) b_conv5 = weight_variable([1]) h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5) h_pool5 = h_conv5 h_pool5 = tf.reshape(h_pool5, shape=[-1, POOL_X, POOL_Y]) feature_mat = h_pool5 print("----feature_mat-----") print(feature_mat) # exit() # W_fc1 = weight_variable([8 * 9 * 1, 1024]) # b_fc1 = bias_varibale([1024]) # h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1]) # h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # print("----h_fc1_drop-----") # print(h_fc1) # exit() # # # keep_prob = tf.placeholder(tf.float32) # keep_prob = tf.placeholder(1.0) # h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob) # print("----h_fc1_drop-----") # print(h_fc1_drop) # exit() # # W_fc2 = weight_variable([1024, 10]) # b_fc2 = bias_varibale([10]) # # y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # print("----y_conv-----") # print(y_conv) # exit() # Exchange dim 1 and dim 0 # Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36] feature_mat = tf.transpose(feature_mat, [1, 0, 2]) # New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9] print("----feature_mat-----") print(feature_mat) # exit() # Temporarily crush the feature_mat's dimensions feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9 # New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9 # Linear activation, reshaping inputs to the LSTM's number of hidden: hidden = tf.nn.relu(tf.matmul( feature_mat, config.W['hidden'] ) + config.biases['hidden']) # New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32] print("--n_steps--") print(config.n_steps) print("--hidden--") print(hidden) # Split the series because the rnn cell needs time_steps features, each of shape: hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32]) # New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden] # Define LSTM cell of first hidden layer: lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0) # Stack two LSTM layers, both layers has the same shape lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2) # Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32) # outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden] print("------------------list-------------------") print(outputs) # Get last time step's output feature for a "many to one" style classifier, # as in the image describing RNNs at the top of this page lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32] print("------------------last outputs-------------------") print (lstm_last_output) # Linear activation return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output'] pred_Y = LSTM_Network(X, config) # shape[?,6] print("------------------pred_Y-------------------") print(pred_Y) # Loss,train_step,evaluation l2 = config.lambda_loss_amount * \ sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) # Softmax loss and L2 cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2 train_step = tf.train.AdamOptimizer( learning_rate=config.learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32)) # -------------------------------------------- # step4: Hooray, now train the neural network # -------------------------------------------- # Note that log_device_placement can be turned ON but will cause console spam. # Initializing the variables init = tf.initialize_all_variables() # Add ops to save and restore all the variables. saver = tf.train.Saver() best_accuracy = 0.0 # sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False)) if (FLAG == 'train') : # If it is the training mode with tf.Session() as sess: # tf.initialize_all_variables().run() sess.run(init) # .run() f.write("---Save model \n") # Start training for each batch and loop epochs for i in range(config.training_epochs): for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500) range(config.batch_size, config.train_count + 1, config.batch_size)): # (1500, 7353, 1500) print(start) print(end) sess.run(train_step, feed_dict={X: X_train[start:end], Y: y_train[start:end]}) # Test completely at every epoch: calculate accuracy pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={ X: X_test, Y: y_test}) print("traing iter: {},".format(i) + \ " test accuracy : {},".format(accuracy_out) + \ " loss : {}".format(loss_out)) best_accuracy = max(best_accuracy, accuracy_out) # Save the model in this session save_path = saver.save(sess, file_name + "/model.ckpt") print("Model saved in file: %s" % save_path) print("") print("final loss: {}").format(loss_out) print("final test accuracy: {}".format(accuracy_out)) print("best epoch's test accuracy: {}".format(best_accuracy)) print("") # Write all output to file f.write("final loss:" + str(format(loss_out)) +" \n") f.write("final test accuracy:" + str(format(accuracy_out)) +" \n") f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n") else : # Running a new session print("Starting 2nd session...") with tf.Session() as sess: # Initialize variables sess.run(init) f.write("---Restore model \n") # Restore model weights from previously saved model saver.restore(sess, file_name+ "/model.ckpt") print("Model restored from file: %s" % save_path_name) # Test completely at every epoch: calculate accuracy pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={ X: X_test, Y: y_test}) # print("traing iter: {}," + \ # " test accuracy : {},".format(accuracy_out) + \ # " loss : {}".format(loss_out)) best_accuracy = max(best_accuracy, accuracy_out) print("") print("final loss: {}").format(loss_out) print("final test accuracy: {}".format(accuracy_out)) print("best epoch's test accuracy: {}".format(best_accuracy)) print("") # Write all output to file f.write("final loss:" + str(format(loss_out)) +" \n") f.write("final test accuracy:" + str(format(accuracy_out)) +" \n") f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n") # # #------------------------------------------------------------------ # # step5: Training is good, but having visual insight is even better # #------------------------------------------------------------------ # # The code is in the .ipynb # # #------------------------------------------------------------------ # # step6: And finally, the multi-class confusion matrix and metrics! # #------------------------------------------------------------------ # # The code is in the .ipynb f.write("Ended at \n") f.write(str(datetime.datetime.now())+'\n') f.write("------------- \n") f.close()
[ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.max_pool", "tensorflow.cast", "numpy.max", "tensorflow.nn.l2_loss", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "tensorflow.Variable", "numpy.reshape", "numpy.eye", "tensorflow.initialize_all_variables", "tensorflow.nn.rnn_cell.MultiRNNCell", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.nn.rnn_cell.BasicLSTMCell", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.placeholder", "tensorflow.split", "numpy.array", "tensorflow.constant", "tensorflow.transpose", "tensorflow.nn.rnn", "tensorflow.reshape", "tensorflow.random_normal" ]
module45/CCCPC_32_32.py
[(30, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (121, 'numpy.reshape', 'np.reshape', (['X_train', '[-1, 32, 36]'], {}), True, 'import numpy as np\n'), (122, 'numpy.reshape', 'np.reshape', (['X_test', '[-1, 32, 36]'], {}), True, 'import numpy as np\n'), (218, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, config.n_steps, config.n_inputs]'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, config.n_classes]'], {}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.reshape', 'tf.reshape', (['X'], {'shape': '[-1, 32, 36]'}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.reshape', 'tf.reshape', (['Y'], {'shape': '[-1, 6]'}), True, 'import tensorflow as tf\n'), (398, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (18, 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), False, 'import os\n'), (234, 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'mean': '(0.0)', 'stddev': '(0.1)', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape', 'name': '"""Bias"""'}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filter': 'W', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""conv_2d"""'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'x', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""max_pool"""'}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.reshape', 'tf.reshape', (['feature_mat'], {'shape': '[-1, 32, 36, 1]'}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.reshape', 'tf.reshape', (['h_pool5'], {'shape': '[-1, POOL_X, POOL_Y]'}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.transpose', 'tf.transpose', (['feature_mat', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (336, 'tensorflow.reshape', 'tf.reshape', (['feature_mat', '[-1, config.n_inputs]'], {}), True, 'import tensorflow as tf\n'), (351, 'tensorflow.split', 'tf.split', (['(0)', '(config.n_steps / 4)', 'hidden'], {}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['config.n_hidden'], {'forget_bias': '(1.0)'}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['([lstm_cell] * 2)'], {}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.nn.rnn', 'tf.nn.rnn', (['lsmt_layers', 'hidden'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (389, 'tensorflow.argmax', 'tf.argmax', (['pred_Y', '(1)'], {}), True, 'import tensorflow as tf\n'), (389, 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), True, 'import tensorflow as tf\n'), (390, 'tensorflow.cast', 'tf.cast', (['correct_prediction'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (405, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (444, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (36, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (103, 'numpy.array', 'np.array', (['X_signals'], {}), True, 'import numpy as np\n'), (146, 'numpy.max', 'np.max', (['new_label'], {}), True, 'import numpy as np\n'), (147, 'numpy.eye', 'np.eye', (['n_values'], {}), True, 'import numpy as np\n'), (374, 'tensorflow.matmul', 'tf.matmul', (['lstm_last_output', "config.W['output']"], {}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['pred_Y', 'Y'], {}), True, 'import tensorflow as tf\n'), (386, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'config.learning_rate'}), True, 'import tensorflow as tf\n'), (482, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (147, 'numpy.array', 'np.array', (['new_label'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (340, 'tensorflow.matmul', 'tf.matmul', (['feature_mat', "config.W['hidden']"], {}), True, 'import tensorflow as tf\n'), (382, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tf_var'], {}), True, 'import tensorflow as tf\n'), (81, 'numpy.array', 'np.array', (['serie'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (198, 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_inputs, self.n_hidden]'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden, self.n_classes]'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_hidden]'], {'mean': '(1.0)'}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.random_normal', 'tf.random_normal', (['[self.n_classes]'], {}), True, 'import tensorflow as tf\n'), (382, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n')]
jacke121/X-Detector
a24e370a5acb6f5c29cd5db81fa4270f2697b8c1
# Copyright 2018 Changan Wang # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys #from scipy.misc import imread, imsave, imshow, imresize import tensorflow as tf from net import xdet_body_v3 from utility import train_helper from dataset import dataset_factory from preprocessing import preprocessing_factory from preprocessing import anchor_manipulator # hardware related configuration tf.app.flags.DEFINE_integer( 'num_readers', 16, 'The number of parallel readers that read data from the dataset.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 48, 'The number of threads used to create the batches.') tf.app.flags.DEFINE_integer( 'num_cpu_threads', 0, 'The number of cpu cores used to train.') tf.app.flags.DEFINE_float( 'gpu_memory_fraction', 1., 'GPU memory fraction to use.') # scaffold related configuration tf.app.flags.DEFINE_string( 'data_dir', '../PASCAL/VOC_TF/VOC0712TF/', 'The directory where the dataset input data is stored.') tf.app.flags.DEFINE_string( 'dataset_name', 'pascalvoc_0712', 'The name of the dataset to load.') tf.app.flags.DEFINE_integer( 'num_classes', 21, 'Number of classes to use in the dataset.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'train', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'model_dir', './logs_v3/', 'The directory where the model will be stored.') tf.app.flags.DEFINE_integer( 'log_every_n_steps', 10, 'The frequency with which logs are print.') tf.app.flags.DEFINE_integer( 'save_summary_steps', 500, 'The frequency with which summaries are saved, in seconds.') tf.app.flags.DEFINE_integer( 'save_checkpoints_secs', 7200, 'The frequency with which the model is saved, in seconds.') # model related configuration tf.app.flags.DEFINE_integer( 'train_image_size', 352, 'The size of the input image for the model to use.') tf.app.flags.DEFINE_integer( 'resnet_size', 50, 'The size of the ResNet model to use.') tf.app.flags.DEFINE_integer( 'train_epochs', None, 'The number of epochs to use for training.') tf.app.flags.DEFINE_integer( 'batch_size', 12, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_string( 'data_format', 'channels_first', # 'channels_first' or 'channels_last' 'A flag to override the data format used in the model. channels_first ' 'provides a performance boost on GPU but is not always compatible ' 'with CPU. If left unspecified, the data format will be chosen ' 'automatically based on whether TensorFlow was built for CPU or GPU.') tf.app.flags.DEFINE_float( 'negative_ratio', 3., 'Negative ratio in the loss function.') tf.app.flags.DEFINE_float( 'match_threshold', 0.56, 'Matching threshold in the loss function.') tf.app.flags.DEFINE_float( 'neg_threshold', 0.4, 'Matching threshold for the negtive examples in the loss function.') # optimizer related configuration tf.app.flags.DEFINE_float( 'weight_decay', 0.0005, 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') tf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') tf.app.flags.DEFINE_float( 'end_learning_rate', 0.00005, 'The minimal end learning rate used by a polynomial decay learning rate.') # for learning rate exponential_decay tf.app.flags.DEFINE_float( 'learning_rate_decay_factor', 0.96, 'Learning rate decay factor.') tf.app.flags.DEFINE_float( 'decay_steps', 1000, 'Number of epochs after which learning rate decays.') # for learning rate piecewise_constant decay tf.app.flags.DEFINE_string( 'decay_boundaries', '60000, 800000', 'Learning rate decay boundaries by global_step (comma-separated list).') tf.app.flags.DEFINE_string( 'lr_decay_factors', '1, 0.6, 0.1', 'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).') # checkpoint related configuration tf.app.flags.DEFINE_string( 'checkpoint_path', './model/resnet50',#None, 'The path to a checkpoint from which to fine-tune.') tf.app.flags.DEFINE_string( 'checkpoint_model_scope', '', 'Model scope in the checkpoint. None if the same as the trained model.') tf.app.flags.DEFINE_string( 'model_scope', 'xdet_resnet', 'Model scope name used to replace the name_scope in checkpoint.') tf.app.flags.DEFINE_string( 'checkpoint_exclude_scopes', 'xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv',#None 'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.') tf.app.flags.DEFINE_boolean( 'ignore_missing_vars', True, 'When restoring a checkpoint would ignore missing variables.') tf.app.flags.DEFINE_boolean( 'run_on_cloud', True, 'Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path").') tf.app.flags.DEFINE_string( 'cloud_checkpoint_path', 'resnet50/model.ckpt', 'The path to a checkpoint from which to fine-tune.') FLAGS = tf.app.flags.FLAGS def input_pipeline(): image_preprocessing_fn = lambda image_, shape_, glabels_, gbboxes_ : preprocessing_factory.get_preprocessing( 'xdet_resnet', is_training=True)(image_, glabels_, gbboxes_, out_shape=[FLAGS.train_image_size] * 2, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC')) anchor_creator = anchor_manipulator.AnchorCreator([FLAGS.train_image_size] * 2, layers_shapes = [(22, 22)], anchor_scales = [[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]], extra_anchor_scales = [[0.1]], anchor_ratios = [[1., 2., 3., .5, 0.3333]], layer_steps = [16]) def input_fn(): all_anchors, num_anchors_list = anchor_creator.get_all_anchors() anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(all_anchors, num_classes = FLAGS.num_classes, allowed_borders = [0.05], positive_threshold = FLAGS.match_threshold, ignore_threshold = FLAGS.neg_threshold, prior_scaling=[0.1, 0.1, 0.2, 0.2]) list_from_batch, _ = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.data_dir, image_preprocessing_fn, file_pattern = None, reader = None, batch_size = FLAGS.batch_size, num_readers = FLAGS.num_readers, num_preprocessing_threads = FLAGS.num_preprocessing_threads, num_epochs = FLAGS.train_epochs, anchor_encoder = anchor_encoder_decoder.encode_all_anchors) return list_from_batch[-1], {'targets': list_from_batch[:-1], 'decode_fn': lambda pred : anchor_encoder_decoder.decode_all_anchors([pred])[0], 'num_anchors_list': num_anchors_list} return input_fn def modified_smooth_l1(bbox_pred, bbox_targets, bbox_inside_weights = 1., bbox_outside_weights = 1., sigma = 1.): """ ResultLoss = outside_weights * SmoothL1(inside_weights * (bbox_pred - bbox_targets)) SmoothL1(x) = 0.5 * (sigma * x)^2, if |x| < 1 / sigma^2 |x| - 0.5 / sigma^2, otherwise """ sigma2 = sigma * sigma inside_mul = tf.multiply(bbox_inside_weights, tf.subtract(bbox_pred, bbox_targets)) smooth_l1_sign = tf.cast(tf.less(tf.abs(inside_mul), 1.0 / sigma2), tf.float32) smooth_l1_option1 = tf.multiply(tf.multiply(inside_mul, inside_mul), 0.5 * sigma2) smooth_l1_option2 = tf.subtract(tf.abs(inside_mul), 0.5 / sigma2) smooth_l1_result = tf.add(tf.multiply(smooth_l1_option1, smooth_l1_sign), tf.multiply(smooth_l1_option2, tf.abs(tf.subtract(smooth_l1_sign, 1.0)))) outside_mul = tf.multiply(bbox_outside_weights, smooth_l1_result) return outside_mul def xdet_model_fn(features, labels, mode, params): """Our model_fn for ResNet to be used with our Estimator.""" num_anchors_list = labels['num_anchors_list'] num_feature_layers = len(num_anchors_list) shape = labels['targets'][-1] glabels = labels['targets'][:num_feature_layers][0] gtargets = labels['targets'][num_feature_layers : 2 * num_feature_layers][0] gscores = labels['targets'][2 * num_feature_layers : 3 * num_feature_layers][0] with tf.variable_scope(params['model_scope'], default_name = None, values = [features], reuse=tf.AUTO_REUSE): backbone = xdet_body_v3.xdet_resnet_v3(params['resnet_size'], params['data_format']) body_cls_output, body_regress_output = backbone(inputs=features, is_training=(mode == tf.estimator.ModeKeys.TRAIN)) cls_pred, location_pred = xdet_body_v3.xdet_head(body_cls_output, body_regress_output, params['num_classes'], num_anchors_list[0], (mode == tf.estimator.ModeKeys.TRAIN), data_format=params['data_format']) if params['data_format'] == 'channels_first': cls_pred = tf.transpose(cls_pred, [0, 2, 3, 1]) location_pred = tf.transpose(location_pred, [0, 2, 3, 1]) bboxes_pred = labels['decode_fn'](location_pred)#(tf.reshape(location_pred, tf.shape(location_pred).as_list()[0:-1] + [-1, 4])) cls_pred = tf.reshape(cls_pred, [-1, params['num_classes']]) location_pred = tf.reshape(location_pred, [-1, 4]) glabels = tf.reshape(glabels, [-1]) gscores = tf.reshape(gscores, [-1]) gtargets = tf.reshape(gtargets, [-1, 4]) # raw mask for positive > 0.5, and for negetive < 0.3 # each positive examples has one label positive_mask = glabels > 0#tf.logical_and(glabels > 0, gscores > params['match_threshold']) fpositive_mask = tf.cast(positive_mask, tf.float32) n_positives = tf.reduce_sum(fpositive_mask) # negtive examples are those max_overlap is still lower than neg_threshold, note that some positive may also has lower jaccard # note those gscores is 0 is either be ignored during anchors encode or anchors have 0 overlap with all ground truth #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(tf.logical_or(positive_mask, glabels < 0)), gscores < params['neg_threshold']), gscores > 0.) negtive_mask = tf.logical_and(tf.equal(glabels, 0), gscores > 0.) #negtive_mask = tf.logical_and(tf.logical_and(tf.logical_not(positive_mask), gscores < params['neg_threshold']), gscores > 0.) #negtive_mask = tf.logical_and(gscores < params['neg_threshold'], tf.logical_not(positive_mask)) fnegtive_mask = tf.cast(negtive_mask, tf.float32) n_negtives = tf.reduce_sum(fnegtive_mask) n_neg_to_select = tf.cast(params['negative_ratio'] * n_positives, tf.int32) n_neg_to_select = tf.minimum(n_neg_to_select, tf.cast(n_negtives, tf.int32)) # hard negative mining for classification predictions_for_bg = tf.nn.softmax(cls_pred)[:, 0] prob_for_negtives = tf.where(negtive_mask, 0. - predictions_for_bg, # ignore all the positives 0. - tf.ones_like(predictions_for_bg)) topk_prob_for_bg, _ = tf.nn.top_k(prob_for_negtives, k=n_neg_to_select) selected_neg_mask = prob_for_negtives > topk_prob_for_bg[-1] # # random select negtive examples for classification # selected_neg_mask = tf.random_uniform(tf.shape(gscores), minval=0, maxval=1.) < tf.where( # tf.greater(n_negtives, 0), # tf.divide(tf.cast(n_neg_to_select, tf.float32), n_negtives), # tf.zeros_like(tf.cast(n_neg_to_select, tf.float32)), # name='rand_select_negtive') # include both selected negtive and all positive examples final_mask = tf.stop_gradient(tf.logical_or(tf.logical_and(negtive_mask, selected_neg_mask), positive_mask)) total_examples = tf.reduce_sum(tf.cast(final_mask, tf.float32)) # add mask for glabels and cls_pred here glabels = tf.boolean_mask(tf.clip_by_value(glabels, 0, FLAGS.num_classes), tf.stop_gradient(final_mask)) cls_pred = tf.boolean_mask(cls_pred, tf.stop_gradient(final_mask)) location_pred = tf.boolean_mask(location_pred, tf.stop_gradient(positive_mask)) gtargets = tf.boolean_mask(gtargets, tf.stop_gradient(positive_mask)) predictions = { 'classes': tf.argmax(cls_pred, axis=-1), 'probabilities': tf.reduce_max(tf.nn.softmax(cls_pred, name='softmax_tensor'), axis=-1), 'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy_loss') tf.summary.scalar('cross_entropy_loss', cross_entropy) loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets)) loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1)) loc_loss = tf.identity(loc_loss, name='location_loss') tf.summary.scalar('location_loss', loc_loss) tf.losses.add_loss(loc_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss') if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [int(_) for _ in params['decay_boundaries']], lr_values) truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype)) # Create a tensor named learning_rate for logging purposes. tf.identity(truncated_learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', truncated_learning_rate) optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate, momentum=params['momentum']) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss, global_step) else: train_op = None cls_accuracy = tf.metrics.accuracy(glabels, predictions['classes']) metrics = {'cls_accuracy': cls_accuracy} # Create a tensor named train_accuracy for logging purposes. tf.identity(cls_accuracy[1], name='cls_accuracy') tf.summary.scalar('cls_accuracy', cls_accuracy[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics, scaffold = tf.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold(FLAGS))) def parse_comma_list(args): return [float(s.strip()) for s in args.split(',')] def main(_): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction) config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options) # Set up a RunConfig to only save checkpoints once per training cycle. run_config = tf.estimator.RunConfig().replace( save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace( save_checkpoints_steps=None).replace( save_summary_steps=FLAGS.save_summary_steps).replace( keep_checkpoint_max=5).replace( log_step_count_steps=FLAGS.log_every_n_steps).replace( session_config=config) xdetector = tf.estimator.Estimator( model_fn=xdet_model_fn, model_dir=FLAGS.model_dir, config=run_config, params={ 'resnet_size': FLAGS.resnet_size, 'data_format': FLAGS.data_format, 'model_scope': FLAGS.model_scope, 'num_classes': FLAGS.num_classes, 'negative_ratio': FLAGS.negative_ratio, 'match_threshold': FLAGS.match_threshold, 'neg_threshold': FLAGS.neg_threshold, 'weight_decay': FLAGS.weight_decay, 'momentum': FLAGS.momentum, 'learning_rate': FLAGS.learning_rate, 'end_learning_rate': FLAGS.end_learning_rate, 'learning_rate_decay_factor': FLAGS.learning_rate_decay_factor, 'decay_steps': FLAGS.decay_steps, 'decay_boundaries': parse_comma_list(FLAGS.decay_boundaries), 'lr_decay_factors': parse_comma_list(FLAGS.lr_decay_factors), }) tensors_to_log = { 'lr': 'learning_rate', 'ce_loss': 'cross_entropy_loss', 'loc_loss': 'location_loss', 'total_loss': 'total_loss', 'cls_acc': 'cls_accuracy', } logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps) print('Starting a training cycle.') xdetector.train(input_fn=input_pipeline(), hooks=[logging_hook]) if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) tf.app.run()
[ "tensorflow.train.LoggingTensorHook", "tensorflow.metrics.accuracy", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.equal", "tensorflow.nn.l2_loss", "tensorflow.app.flags.DEFINE_string", "tensorflow.GPUOptions", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.estimator.RunConfig", "tensorflow.summary.scalar", "tensorflow.get_collection", "tensorflow.app.flags.DEFINE_integer", "tensorflow.train.get_or_create_global_step", "tensorflow.subtract", "tensorflow.ConfigProto", "tensorflow.nn.top_k", "tensorflow.stop_gradient", "tensorflow.train.MomentumOptimizer", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.identity", "tensorflow.zeros_like", "tensorflow.losses.add_loss", "tensorflow.clip_by_value", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.app.flags.DEFINE_float", "tensorflow.estimator.EstimatorSpec", "tensorflow.variable_scope", "tensorflow.abs", "tensorflow.logical_and" ]
xdet_v3_resnet_train.py
[(34, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_readers"""', '(16)', '"""The number of parallel readers that read data from the dataset."""'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_preprocessing_threads"""', '(48)', '"""The number of threads used to create the batches."""'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_cpu_threads"""', '(0)', '"""The number of cpu cores used to train."""'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""gpu_memory_fraction"""', '(1.0)', '"""GPU memory fraction to use."""'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""../PASCAL/VOC_TF/VOC0712TF/"""', '"""The directory where the dataset input data is stored."""'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_name"""', '"""pascalvoc_0712"""', '"""The name of the dataset to load."""'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_classes"""', '(21)', '"""Number of classes to use in the dataset."""'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_split_name"""', '"""train"""', '"""The name of the train/test split."""'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_dir"""', '"""./logs_v3/"""', '"""The directory where the model will be stored."""'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""log_every_n_steps"""', '(10)', '"""The frequency with which logs are print."""'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_summary_steps"""', '(500)', '"""The frequency with which summaries are saved, in seconds."""'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_checkpoints_secs"""', '(7200)', '"""The frequency with which the model is saved, in seconds."""'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""train_image_size"""', '(352)', '"""The size of the input image for the model to use."""'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""resnet_size"""', '(50)', '"""The size of the ResNet model to use."""'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""train_epochs"""', 'None', '"""The number of epochs to use for training."""'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(12)', '"""Batch size for training and evaluation."""'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_format"""', '"""channels_first"""', '"""A flag to override the data format used in the model. channels_first provides a performance boost on GPU but is not always compatible with CPU. If left unspecified, the data format will be chosen automatically based on whether TensorFlow was built for CPU or GPU."""'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""negative_ratio"""', '(3.0)', '"""Negative ratio in the loss function."""'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""match_threshold"""', '(0.56)', '"""Matching threshold in the loss function."""'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""neg_threshold"""', '(0.4)', '"""Matching threshold for the negtive examples in the loss function."""'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""weight_decay"""', '(0.0005)', '"""The weight decay on the model weights."""'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""momentum"""', '(0.9)', '"""The momentum for the MomentumOptimizer and RMSPropOptimizer."""'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.001)', '"""Initial learning rate."""'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""end_learning_rate"""', '(5e-05)', '"""The minimal end learning rate used by a polynomial decay learning rate."""'], {}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate_decay_factor"""', '(0.96)', '"""Learning rate decay factor."""'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""decay_steps"""', '(1000)', '"""Number of epochs after which learning rate decays."""'], {}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""decay_boundaries"""', '"""60000, 800000"""', '"""Learning rate decay boundaries by global_step (comma-separated list)."""'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""lr_decay_factors"""', '"""1, 0.6, 0.1"""', '"""The values of learning_rate decay factor for each segment between boundaries (comma-separated list)."""'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_path"""', '"""./model/resnet50"""', '"""The path to a checkpoint from which to fine-tune."""'], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_model_scope"""', '""""""', '"""Model scope in the checkpoint. None if the same as the trained model."""'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_scope"""', '"""xdet_resnet"""', '"""Model scope name used to replace the name_scope in checkpoint."""'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_exclude_scopes"""', '"""xdet_resnet/xdet_head, xdet_resnet/xdet_multi_path, xdet_resnet/xdet_additional_conv"""', '"""Comma-separated list of scopes of variables to exclude when restoring from a checkpoint."""'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""ignore_missing_vars"""', '(True)', '"""When restoring a checkpoint would ignore missing variables."""'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""run_on_cloud"""', '(True)', '"""Wether we will train on cloud (pre-trained model will be placed in the "data_dir/cloud_checkpoint_path")."""'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""cloud_checkpoint_path"""', '"""resnet50/model.ckpt"""', '"""The path to a checkpoint from which to fine-tune."""'], {}), True, 'import tensorflow as tf\n'), (144, 'preprocessing.anchor_manipulator.AnchorCreator', 'anchor_manipulator.AnchorCreator', (['([FLAGS.train_image_size] * 2)'], {'layers_shapes': '[(22, 22)]', 'anchor_scales': '[[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]]', 'extra_anchor_scales': '[[0.1]]', 'anchor_ratios': '[[1.0, 2.0, 3.0, 0.5, 0.3333]]', 'layer_steps': '[16]'}), False, 'from preprocessing import anchor_manipulator\n'), (193, 'tensorflow.multiply', 'tf.multiply', (['bbox_outside_weights', 'smooth_l1_result'], {}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.reshape', 'tf.reshape', (['cls_pred', "[-1, params['num_classes']]"], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.reshape', 'tf.reshape', (['location_pred', '[-1, 4]'], {}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.reshape', 'tf.reshape', (['glabels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.reshape', 'tf.reshape', (['gscores', '[-1]'], {}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.reshape', 'tf.reshape', (['gtargets', '[-1, 4]'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.cast', 'tf.cast', (['positive_mask', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['fpositive_mask'], {}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.cast', 'tf.cast', (['negtive_mask', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['fnegtive_mask'], {}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.cast', 'tf.cast', (["(params['negative_ratio'] * n_positives)", 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.nn.top_k', 'tf.nn.top_k', (['prob_for_negtives'], {'k': 'n_neg_to_select'}), True, 'import tensorflow as tf\n'), (280, 'tensorflow.identity', 'tf.identity', (['cross_entropy'], {'name': '"""cross_entropy_loss"""'}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cross_entropy_loss"""', 'cross_entropy'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.identity', 'tf.identity', (['loc_loss'], {'name': '"""location_loss"""'}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""location_loss"""', 'loc_loss'], {}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.losses.add_loss', 'tf.losses.add_loss', (['loc_loss'], {}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.identity', 'tf.identity', (['loss'], {'name': '"""total_loss"""'}), True, 'import tensorflow as tf\n'), (319, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['glabels', "predictions['classes']"], {}), True, 'import tensorflow as tf\n'), (323, 'tensorflow.identity', 'tf.identity', (['cls_accuracy[1]'], {'name': '"""cls_accuracy"""'}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cls_accuracy"""', 'cls_accuracy[1]'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'FLAGS.gpu_memory_fraction'}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)', 'intra_op_parallelism_threads': 'FLAGS.num_cpu_threads', 'inter_op_parallelism_threads': 'FLAGS.num_cpu_threads', 'gpu_options': 'gpu_options'}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', ([], {'tensors': 'tensors_to_log', 'every_n_iter': 'FLAGS.log_every_n_steps'}), True, 'import tensorflow as tf\n'), (387, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (388, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (154, 'preprocessing.anchor_manipulator.AnchorEncoder', 'anchor_manipulator.AnchorEncoder', (['all_anchors'], {'num_classes': 'FLAGS.num_classes', 'allowed_borders': '[0.05]', 'positive_threshold': 'FLAGS.match_threshold', 'ignore_threshold': 'FLAGS.neg_threshold', 'prior_scaling': '[0.1, 0.1, 0.2, 0.2]'}), False, 'from preprocessing import anchor_manipulator\n'), (160, 'dataset.dataset_factory.get_dataset', 'dataset_factory.get_dataset', (['FLAGS.dataset_name', 'FLAGS.dataset_split_name', 'FLAGS.data_dir', 'image_preprocessing_fn'], {'file_pattern': 'None', 'reader': 'None', 'batch_size': 'FLAGS.batch_size', 'num_readers': 'FLAGS.num_readers', 'num_preprocessing_threads': 'FLAGS.num_preprocessing_threads', 'num_epochs': 'FLAGS.train_epochs', 'anchor_encoder': 'anchor_encoder_decoder.encode_all_anchors'}), False, 'from dataset import dataset_factory\n'), (185, 'tensorflow.subtract', 'tf.subtract', (['bbox_pred', 'bbox_targets'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.multiply', 'tf.multiply', (['inside_mul', 'inside_mul'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.abs', 'tf.abs', (['inside_mul'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.multiply', 'tf.multiply', (['smooth_l1_option1', 'smooth_l1_sign'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.variable_scope', 'tf.variable_scope', (["params['model_scope']"], {'default_name': 'None', 'values': '[features]', 'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (208, 'net.xdet_body_v3.xdet_resnet_v3', 'xdet_body_v3.xdet_resnet_v3', (["params['resnet_size']", "params['data_format']"], {}), False, 'from net import xdet_body_v3\n'), (211, 'net.xdet_body_v3.xdet_head', 'xdet_body_v3.xdet_head', (['body_cls_output', 'body_regress_output', "params['num_classes']", 'num_anchors_list[0]', '(mode == tf.estimator.ModeKeys.TRAIN)'], {'data_format': "params['data_format']"}), False, 'from net import xdet_body_v3\n'), (214, 'tensorflow.transpose', 'tf.transpose', (['cls_pred', '[0, 2, 3, 1]'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.transpose', 'tf.transpose', (['location_pred', '[0, 2, 3, 1]'], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.equal', 'tf.equal', (['glabels', '(0)'], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.cast', 'tf.cast', (['n_negtives', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['cls_pred'], {}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.cast', 'tf.cast', (['final_mask', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['glabels', '(0)', 'FLAGS.num_classes'], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['final_mask'], {}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['final_mask'], {}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['positive_mask'], {}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['positive_mask'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.argmax', 'tf.argmax', (['cls_pred'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.reshape', 'tf.reshape', (['bboxes_pred', '[-1, 4]'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loc_loss'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.identity', 'tf.identity', (['truncated_learning_rate'], {'name': '"""learning_rate"""'}), True, 'import tensorflow as tf\n'), (307, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'truncated_learning_rate'], {}), True, 'import tensorflow as tf\n'), (309, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'truncated_learning_rate', 'momentum': "params['momentum']"}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), True, 'import tensorflow as tf\n'), (141, 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', (['"""xdet_resnet"""'], {'is_training': '(True)'}), False, 'from preprocessing import preprocessing_factory\n'), (187, 'tensorflow.abs', 'tf.abs', (['inside_mul'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.ones_like', 'tf.ones_like', (['predictions_for_bg'], {}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.logical_and', 'tf.logical_and', (['negtive_mask', 'selected_neg_mask'], {}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['cls_pred'], {'name': '"""softmax_tensor"""'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'glabels', 'logits': 'cls_pred'}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.zeros_like', 'tf.zeros_like', (['location_pred'], {}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.constant', 'tf.constant', (["params['end_learning_rate']"], {'dtype': 'learning_rate.dtype'}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.subtract', 'tf.subtract', (['smooth_l1_sign', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['gtargets'], {}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), True, 'import tensorflow as tf\n'), (332, 'utility.train_helper.get_init_fn_for_scaffold', 'train_helper.get_init_fn_for_scaffold', (['FLAGS'], {}), False, 'from utility import train_helper\n'), (293, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {}), True, 'import tensorflow as tf\n')]
YusukeNagasaka/Batched-SpMM
bb7d1989bbf57fc3a22dfa1483749c4c6a1acad3
import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops class BatchedSpMM: def __init__(self): self.b_module = tf.load_op_library('./batched.so') def call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False): sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices] return self.b_module.bspmm(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b) class BatchedSpMDT: def __init__(self): self.b_module = tf.load_op_library('./batched.so') def call(self, sp_matrices, dense_matrices, adjoint_a=False, adjoint_b=False): sp_indices = [sp_m.indices for sp_m in sp_matrices] sp_values = [sp_m.values for sp_m in sp_matrices] sp_shape = [sp_m.dense_shape for sp_m in sp_matrices] return self.b_module.bspmdt(sp_ids = sp_indices, sp_values = sp_values, sp_shape = sp_shape, rhs = dense_matrices, adjoint_a = adjoint_a, adjoint_b = adjoint_b) b_module = tf.load_op_library('./batched.so') @ops.RegisterGradient("Bspmdt") def _bspmdt_grad(op, *grad): """Gradients for the dense tensors in the SparseTensorDenseMatMul ops. Args: op: the Bspmdt ops grads: the incoming gradients Returns: Gradients for each of the 4 input tensors: (sparse_indices, sparse_values, sparse_shape, dense_tensor) The gradients for indices and shape are None. """ numTensors = (len(op.inputs) - 1) // 3 a_indices = op.inputs[0:numTensors] a_values = op.inputs[numTensors:numTensors*2] a_shape = op.inputs[numTensors*2:numTensors*3] b = op.inputs[numTensors*3] adj_a = op.get_attr("adjoint_a") adj_b = op.get_attr("adjoint_b") # gradient w.r.t. dense a_values_grads = [] b_list = [b[i] for i in range(numTensors)] b_grads = b_module.bspmm(a_indices, a_values, a_shape, grad, adjoint_a=True, adjoint_b=False) bg_row=tf.shape(b_grads[0])[0] bg_col=tf.shape(b_grads[0])[1] b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b: b_grads = [array_ops.transpose(b_g) for b_g in b_grads] for t in range(numTensors): rows = a_indices[t][:, 0] cols = a_indices[t][:, 1] parts_a = array_ops.gather(grad[t], rows if not adj_a else cols) parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows) a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)) return_val = [None for _ in range(numTensors)] + a_values_grads + [None for _ in range(numTensors)] + [b_grads] return tuple(return_val)
[ "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.shape", "tensorflow.reshape", "tensorflow.python.ops.array_ops.gather", "tensorflow.load_op_library", "tensorflow.python.ops.math_ops.reduce_sum" ]
batched_call.py
[(31, 'tensorflow.load_op_library', 'tf.load_op_library', (['"""./batched.so"""'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""Bspmdt"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (64, 'tensorflow.reshape', 'tf.reshape', (['b_grads', '(numTensors * bg_row, bg_col)'], {}), True, 'import tensorflow as tf\n'), (12, 'tensorflow.load_op_library', 'tf.load_op_library', (['"""./batched.so"""'], {}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.load_op_library', 'tf.load_op_library', (['"""./batched.so"""'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.shape', 'tf.shape', (['b_grads[0]'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.shape', 'tf.shape', (['b_grads[0]'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.python.ops.array_ops.gather', 'array_ops.gather', (['grad[t]', '(rows if not adj_a else cols)'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (67, 'tensorflow.python.ops.array_ops.transpose', 'array_ops.transpose', (['b_g'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (74, 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['(parts_a * parts_b)'], {'reduction_indices': '(1)'}), False, 'from tensorflow.python.ops import math_ops\n'), (73, 'tensorflow.python.ops.array_ops.transpose', 'array_ops.transpose', (['b_list[t]'], {}), False, 'from tensorflow.python.ops import array_ops\n')]
Pankajchandan/chatbot
6e2daf1b8aac0259d8e1b1793202d9760ee6a91b
import os, argparse import pandas as pd import tensorflow as tf import numpy as np from gensim.models import Word2Vec from sklearn.model_selection import train_test_split from preprocessing import preprocess_data # parse hyperparameter file hfile = open("hyperparameters.txt","r+") params = hfile.read().split() param_filt = params[14] param_filt = param_filt.split(",") for i in range(len(param_filt)): param_filt[i] = int(param_filt[i]) # define learning rate learning_rate = float(params[2]) # l2 regularization params l2_reg_lambda = float(params[5]) # no of epochs epoch = int(params[8]) # batch size batch_size = int(params[11]) # define the size of filters filter_list = param_filt # define number of filters of each filter size num_filter = int(params[17]) # keep probability for dropout layer keep_prob = float(params[20]) print("parameters being used: learning_rate, l2_reg_lambda, epoch, batch_size, filter_list, num_filter, keep_prob") print("values: ",(learning_rate, l2_reg_lambda, epoch, batch_size, filter_list, num_filter, keep_prob)) print("***********************************************************************************************************") ##batch generator def next_batch(X, Y, batch_size=100): """Batch generator with randomization. Parameters ---------- batch_size : int, optional Size of each minibatch. Returns ------- Xs, ys : np.ndarray, np.ndarray Next batch of inputs and labels (if no labels, then None). """ # Shuffle each epoch current_permutation = np.random.permutation(range(len(X))) epoch_text = X[current_permutation, ...] if Y is not None: epoch_labels = Y[current_permutation, ...] # Then iterate over the epoch current_batch_idx = 0 while current_batch_idx < len(X): end_idx = min(current_batch_idx + batch_size, len(X)) this_batch = { 'text': epoch_text[current_batch_idx:end_idx], 'labels': epoch_labels[current_batch_idx:end_idx] if Y is not None else None } current_batch_idx += batch_size yield this_batch['text'], this_batch['labels'] ##convert into labels and store in dict with open("intent.txt") as file: intent = file.read().strip().split("\n") intent_dict = {} for i, word in enumerate(intent): intent_dict[word] = i # read data from datafile df = pd.read_csv("datafile.csv", header=0, delimiter="\t", quoting=3) # load word2vec model model = Word2Vec.load("trainedWord2vecmodel") # preprocess data_X data_x = preprocess_data(df,model) print("*************") # onehot encode data_y data_y = np.array(df["intent"]) for i, word in enumerate(data_y): data_y[i] = intent_dict[word] data_y = np.array(data_y, dtype=np.int8) nb_classes = len(intent_dict) data_y = np.eye(nb_classes)[data_y] # split into train and test train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.3, random_state=42) # define other non user input params # initialize l2_loss as zero l2_loss = tf.constant(0.0) # define sequence length sequence_length = data_x.shape[1] # define num_features num_feature = data_x.shape[2] # store the weights pooled_outputs = [] # In[15]: # Create the input to the network. This is a 4-dimensional tensor! X = tf.placeholder(name='X', shape=[None,data_x.shape[1], data_x.shape[2], data_x.shape[3]], dtype=tf.float32) # Create the output to the network. This is our one hot encoding of 2 possible values (TODO)! Y = tf.placeholder(name='Y', shape=[None,data_y.shape[1]], dtype=tf.float32) print ("building network ") for i, filter_size in enumerate(filter_list): with tf.variable_scope("conv/stack/{}".format(i), reuse=None): # initialize filter W = tf.get_variable( name='W', shape=[filter_size, num_feature, 1, num_filter], initializer=tf.contrib.layers.xavier_initializer_conv2d()) # convolve w and input conv = tf.nn.conv2d( name='conv', input=X, filter=W, strides=[1, 1, 1, 1], padding='VALID') #add bias of size = out cannels b = tf.get_variable( name='b', shape=[num_filter], initializer=tf.constant_initializer(0.0)) H = tf.nn.bias_add( name='H', value=conv, bias=b) # Apply nonlinearity H = tf.nn.relu(H, name="relu") # max pool pooled = tf.nn.max_pool(H, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name="pool") pooled_outputs.append(pooled) with tf.name_scope("preFc"): # combine all pooled outputs total_filters = num_filter * len(filter_list) # concat all the pooled weights H_pool = tf.concat(pooled_outputs, 3) #flatten it for fully connected layer H_pool_flat = tf.reshape(H_pool, [-1, total_filters]) with tf.name_scope("dropout"): H_drop = tf.nn.dropout(H_pool_flat, keep_prob = keep_prob) # Final (unnormalized) layer with tf.name_scope("output"): W = tf.get_variable("W", shape=[total_filters, nb_classes], initializer=tf.contrib.layers.xavier_initializer()) # add final layer bias b = tf.Variable(tf.constant(0.1, shape=[nb_classes]), name="b") # calc l2 losses l2_loss += tf.nn.l2_loss(W) l2_loss += tf.nn.l2_loss(b) # do logit = W*X+b logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = tf.nn.softmax(logit, name="predictions") #claulate loss and optimizer with tf.variable_scope("FCoptimize", reuse=None): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y) + l2_reg_lambda * l2_loss) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) # calculate accuracy correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") print ("done...") print ("************") path='save/' ckpt_name = 'save/model.ckpt' fname = 'model.tf' dst_nodes = ['output/predictions'] saver = tf.train.Saver() # Create a session and init with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print("training started!!") print("******************") # Now iterate over our dataset n_epoch times for epoch_i in range(epoch): this_loss = 0 its = 0 # mini batches: for Xs_i, ys_i in next_batch(train_x,train_y,1): # Note here: we are running the optimizer so # that the network parameters train! this_loss += sess.run([loss, optimizer], feed_dict={X:Xs_i, Y:ys_i})[0] its += 1 #print(this_loss / its) print('Training loss: ', this_loss / its) # Validation (see how the network does on unseen data). this_accuracy = 0 its = 0 # Do our mini batches: for Xs_i, ys_i in next_batch(test_x,test_y,1): # we measure the accuracy #pred = sess.run(predictions, feed_dict={X:Xs_i, Y:ys_i}) this_accuracy += sess.run(accuracy, feed_dict={X:Xs_i, Y:ys_i}) its += 1 #print ("prediction ",tf.argmax(pred,1).eval(session=sess)) #print ("actual ", tf.argmax(ys_i,1).eval(session=sess)) print('Validation accuracy for epoch {}: '.format(epoch_i+1), this_accuracy / its) print("---------------------------------------") print("***************") print("Training done!!") save_path = saver.save(sess, ckpt_name) print("Model saved in file: %s" % save_path) print ("creating protobuf...") g_1 = tf.get_default_graph() with tf.Session(graph = g_1) as sess: saver = tf.train.import_meta_graph('save/model.ckpt.meta', clear_devices=True) saver.restore(sess, ckpt_name) graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, dst_nodes) tf.train.write_graph(tf.graph_util.extract_sub_graph(graph_def, dst_nodes), path, fname, as_text=False)
[ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.concat", "tensorflow.nn.max_pool", "tensorflow.cast", "tensorflow.nn.l2_loss", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "tensorflow.nn.conv2d", "tensorflow.graph_util.convert_variables_to_constants", "pandas.read_csv", "numpy.eye", "tensorflow.train.import_meta_graph", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.nn.xw_plus_b", "sklearn.model_selection.train_test_split", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.contrib.layers.xavier_initializer_conv2d", "numpy.array", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.graph_util.extract_sub_graph", "tensorflow.reshape", "tensorflow.constant_initializer", "tensorflow.variable_scope" ]
W2V/model.py
[(83, 'pandas.read_csv', 'pd.read_csv', (['"""datafile.csv"""'], {'header': '(0)', 'delimiter': '"""\t"""', 'quoting': '(3)'}), True, 'import pandas as pd\n'), (87, 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""trainedWord2vecmodel"""'], {}), False, 'from gensim.models import Word2Vec\n'), (91, 'preprocessing.preprocess_data', 'preprocess_data', (['df', 'model'], {}), False, 'from preprocessing import preprocess_data\n'), (96, 'numpy.array', 'np.array', (["df['intent']"], {}), True, 'import numpy as np\n'), (99, 'numpy.array', 'np.array', (['data_y'], {'dtype': 'np.int8'}), True, 'import numpy as np\n'), (105, 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_x', 'data_y'], {'test_size': '(0.3)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (110, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""X"""', 'shape': '[None, data_x.shape[1], data_x.shape[2], data_x.shape[3]]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""Y"""', 'shape': '[None, data_y.shape[1]]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (101, 'numpy.eye', 'np.eye', (['nb_classes'], {}), True, 'import numpy as np\n'), (169, 'tensorflow.name_scope', 'tf.name_scope', (['"""preFc"""'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.concat', 'tf.concat', (['pooled_outputs', '(3)'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.reshape', 'tf.reshape', (['H_pool', '[-1, total_filters]'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.name_scope', 'tf.name_scope', (['"""dropout"""'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['H_pool_flat'], {'keep_prob': 'keep_prob'}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.name_scope', 'tf.name_scope', (['"""output"""'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['b'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['H_drop', 'W', 'b'], {'name': '"""scores"""'}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit'], {'name': '"""predictions"""'}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""FCoptimize"""'], {'reuse': 'None'}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.argmax', 'tf.argmax', (['predictions', '(1)'], {}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.cast', 'tf.cast', (['correct_predictions', '"""float"""'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g_1'}), True, 'import tensorflow as tf\n'), (261, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""save/model.ckpt.meta"""'], {'clear_devices': '(True)'}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', 'dst_nodes'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'name': '"""conv"""', 'input': 'X', 'filter': 'W', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', ([], {'name': '"""H"""', 'value': 'conv', 'bias': 'b'}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.nn.relu', 'tf.nn.relu', (['H'], {'name': '"""relu"""'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['H'], {'ksize': '[1, sequence_length - filter_size + 1, 1, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""pool"""'}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': '[nb_classes]'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.graph_util.extract_sub_graph', 'tf.graph_util.extract_sub_graph', (['graph_def', 'dst_nodes'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logit', 'labels': 'Y'}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n')]
zouguojian/Traffic-demand-prediction
17f034efec51391a0febcddf2dbf6924eb1c8a1c
# -- coding: utf-8 -- import numpy as np import tensorflow as tf import scipy.sparse as sp from scipy.sparse import linalg def calculate_normalized_laplacian(adj): """ # L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2 # D = diag(A 1) :param adj: :return: """ adj = sp.coo_matrix(adj) d = np.array(adj.sum(1)) d_inv_sqrt = np.power(d, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = sp.diags(d_inv_sqrt) normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot( d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() return normalized_laplacian def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True): if undirected: adj_mx = np.maximum(adj_mx, adj_mx.T) L = calculate_normalized_laplacian(adj_mx) if lambda_max is None: lambda_max, _ = linalg.eigsh(L, 1, which='LM') lambda_max = lambda_max[0] L = sp.csr_matrix(L) M, _ = L.shape I = sp.identity(M, format='csr', dtype=L.dtype) L = (2 / lambda_max * L) - I return L.astype(np.float32) def calculate_random_walk_matrix(adj_mx): adj_mx = sp.coo_matrix(adj_mx) d = np.array(adj_mx.sum(1)) d_inv = np.power(d, -1).flatten() d_inv[np.isinf(d_inv)] = 0. d_mat_inv = sp.diags(d_inv) random_walk_mx = d_mat_inv.dot(adj_mx).tocoo() return random_walk_mx def calculate_reverse_random_walk_matrix(adj_mx): return calculate_random_walk_matrix(np.transpose(adj_mx)) class gconv(tf.keras.Model): def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, num_proj=None, activation=tf.nn.tanh, reuse=None, filter_type="laplacian"): """ :param num_units: :param adj_mx: :param max_diffusion_step: :param num_nodes: :param input_size: :param num_proj: :param activation: :param reuse: :param filter_type: "laplacian", "random_walk", "dual_random_walk". """ super(gconv, self).__init__() self._activation = activation self._num_nodes = num_nodes self._num_proj = num_proj self._num_units = num_units self._max_diffusion_step = max_diffusion_step self._supports = [] supports = [] if filter_type == "laplacian": supports.append(calculate_scaled_laplacian(adj_mx, lambda_max=None)) for support in supports: self._supports.append(self._build_sparse_matrix(support)) @staticmethod def _build_sparse_matrix(L): L = L.tocoo() indices = np.column_stack((L.row, L.col)) L = tf.SparseTensor(indices, L.data, L.shape) return tf.sparse_reorder(L) @property def output_size(self): output_size = self._num_nodes * self._num_units if self._num_proj is not None: output_size = self._num_nodes * self._num_proj return output_size @staticmethod def _concat(x, x_): x_ = tf.expand_dims(x_, 0) return tf.concat([x, x_], axis=0) def __call__(self, inputs, bias_start=0.0): """Graph convolution between input and the graph matrix. :param args: a 2D Tensor or a list of 2D, batch x n, Tensors. :param output_size: :param bias: :param bias_start: :param scope: :return: """ # Reshape input to (batch_size, num_nodes, input_dim) output_size = self._num_units batch_size = inputs.get_shape()[0].value inputs = tf.reshape(inputs, [batch_size, self._num_nodes, -1]) input_size = inputs.get_shape()[2].value dtype = inputs.dtype x = inputs x0 = tf.transpose(x, perm=[1, 2,0]) # (num_nodes, total_arg_size, batch_size) x0 = tf.reshape(x0, shape=[self._num_nodes, input_size * batch_size]) x = tf.expand_dims(x0, axis=0) scope = tf.get_variable_scope() with tf.variable_scope(scope): if self._max_diffusion_step == 0: pass else: for support in self._supports: x1 = tf.sparse_tensor_dense_matmul(support, x0) x = self._concat(x, x1) for _ in range(2, self._max_diffusion_step + 1): x2 = 2 * tf.sparse_tensor_dense_matmul(support, x1) - x0 x = self._concat(x, x2) x1, x0 = x2, x1 num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself. x = tf.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) x = tf.transpose(x, perm=[3, 1, 2, 0]) # (batch_size, num_nodes, input_size, order) x = tf.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) weights = tf.get_variable( 'weights', [input_size * num_matrices, output_size], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) x = tf.matmul( x, weights) # (batch_size * self._num_nodes, output_size) biases = tf.get_variable("biases", [output_size], dtype=dtype, initializer=tf.constant_initializer( bias_start, dtype=dtype)) x = tf.nn.bias_add(x, biases) # Reshape res back to: (batch_size, num_node, state_dim) return tf.reshape(x, [batch_size, self._num_nodes, output_size])
[ "tensorflow.concat", "tensorflow.sparse_tensor_dense_matmul", "scipy.sparse.linalg.eigsh", "scipy.sparse.coo_matrix", "scipy.sparse.diags", "tensorflow.contrib.layers.xavier_initializer", "numpy.column_stack", "tensorflow.matmul", "numpy.power", "scipy.sparse.csr_matrix", "numpy.transpose", "tensorflow.sparse_reorder", "tensorflow.nn.bias_add", "numpy.maximum", "tensorflow.transpose", "scipy.sparse.eye", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.SparseTensor", "tensorflow.constant_initializer", "scipy.sparse.identity", "tensorflow.variable_scope", "tensorflow.get_variable_scope", "numpy.isinf" ]
OD/comparison_model/gconv.py
[(15, 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), True, 'import scipy.sparse as sp\n'), (19, 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), True, 'import scipy.sparse as sp\n'), (32, 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['L'], {}), True, 'import scipy.sparse as sp\n'), (34, 'scipy.sparse.identity', 'sp.identity', (['M'], {'format': '"""csr"""', 'dtype': 'L.dtype'}), True, 'import scipy.sparse as sp\n'), (40, 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj_mx'], {}), True, 'import scipy.sparse as sp\n'), (44, 'scipy.sparse.diags', 'sp.diags', (['d_inv'], {}), True, 'import scipy.sparse as sp\n'), (20, 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), True, 'import scipy.sparse as sp\n'), (27, 'numpy.maximum', 'np.maximum', (['adj_mx', 'adj_mx.T'], {}), True, 'import numpy as np\n'), (30, 'scipy.sparse.linalg.eigsh', 'linalg.eigsh', (['L', '(1)'], {'which': '"""LM"""'}), False, 'from scipy.sparse import linalg\n'), (50, 'numpy.transpose', 'np.transpose', (['adj_mx'], {}), True, 'import numpy as np\n'), (91, 'numpy.column_stack', 'np.column_stack', (['(L.row, L.col)'], {}), True, 'import numpy as np\n'), (92, 'tensorflow.SparseTensor', 'tf.SparseTensor', (['indices', 'L.data', 'L.shape'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.sparse_reorder', 'tf.sparse_reorder', (['L'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.expand_dims', 'tf.expand_dims', (['x_', '(0)'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.concat', 'tf.concat', (['[x, x_]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.reshape', 'tf.reshape', (['inputs', '[batch_size, self._num_nodes, -1]'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[1, 2, 0]'}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.reshape', 'tf.reshape', (['x0'], {'shape': '[self._num_nodes, input_size * batch_size]'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.expand_dims', 'tf.expand_dims', (['x0'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.reshape', 'tf.reshape', (['x', '[batch_size, self._num_nodes, output_size]'], {}), True, 'import tensorflow as tf\n'), (17, 'numpy.power', 'np.power', (['d', '(-0.5)'], {}), True, 'import numpy as np\n'), (18, 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), True, 'import numpy as np\n'), (42, 'numpy.power', 'np.power', (['d', '(-1)'], {}), True, 'import numpy as np\n'), (43, 'numpy.isinf', 'np.isinf', (['d_inv'], {}), True, 'import numpy as np\n'), (129, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[num_matrices, self._num_nodes, input_size, batch_size]'}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[3, 1, 2, 0]'}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[batch_size * self._num_nodes, input_size * num_matrices]'}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.matmul', 'tf.matmul', (['x', 'weights'], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'biases'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['support', 'x0'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_start'], {'dtype': 'dtype'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['support', 'x1'], {}), True, 'import tensorflow as tf\n')]
Creskendoll/null-pointer-acik-hack
e73f75b891392607e241fe4e14b884cbced0dbe1
from flask import Flask, request from flask_restful import Api from os import environ import json from flask_cors import cross_origin import logging import tensorflow as tf import os from MyModel import MyModel from MyHTMLParser import MyHTMLParser from keras_preprocessing.text import Tokenizer import io import requests from OpenSSL import SSL context = SSL.Context(SSL.TLSv1_2_METHOD) context.use_privatekey_file('./keyac.pem') context.use_certificate_file('./certac.pem') # log = logging.getLogger('werkzeug') # log.setLevel(logging.ERROR) app = Flask(__name__, static_url_path='', static_folder='public') file_path = "./res/out.txt" # text = io.open(file_path, "r", encoding="ISO8859-9").read() text = io.open(file_path, "r", encoding="ISO8859-9").read() tokenizer = Tokenizer() tokenizer.fit_on_texts([text]) encoded = tokenizer.texts_to_sequences([text])[0] word2idx = tokenizer.word_index idx2word = tokenizer.index_word BATCH_SIZE = 256 embedding_dim = 100 units = 512 vocab_size = len(tokenizer.word_index) + 1 model = MyModel(vocab_size, embedding_dim, units, BATCH_SIZE) optimizer = tf.optimizers.Adam() checkpoint_dir = "./models/new_out" checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)).expect_partial() @app.route("/summary", methods=["POST"]) @cross_origin(headers=['Content-Type']) def summary(): res = requests.post("https://turkcemetinozetleme.teaddict.net/ozetle/api/new", data={ "contextOfText":request.data.decode() }, headers={ "content-type": "application/x-www-form-urlencoded; charset=UTF-8;" }) print(res.text) response = app.response_class( response=json.dumps({"summary" : res.json()}), status=200, mimetype='application/json' ) return response, 200 @app.route("/paraphrase", methods=["POST"]) @cross_origin(headers=['Content-Type']) def paraphrase(): base_url = "https://tr.m.wikiquote.org/w/index.php?search=" query = request.data.decode().replace(" ", "+") res = requests.post(base_url+query+"&ns0=1", data={ "contextOfText":request.data.decode() }, headers={ "content-type": "application/x-www-form-urlencoded; charset=UTF-8;" }) parser = MyHTMLParser() # print(res.text) parser.feed(res.text) response = app.response_class( response=json.dumps({"paraphrase" : parser.found}, ensure_ascii=False), status=200, mimetype='application/json' ) return response, 200 @app.route("/", methods=["GET"]) def homepage(): return app.send_static_file("homepage.html") # return "Ne baktın yarram." @app.route("/suggest", methods=["POST"]) @cross_origin(headers=['Content-Type']) def predict(): try: out_string = "" start_string = request.data.decode().lower() n_words = 5 hidden = [tf.zeros((1, units))] for i in range(n_words): start_words = start_string.split() input_eval = [word2idx[i] for i in start_words] input_eval = tf.expand_dims(input_eval, 0) predictions, hidden = model(input_eval, hidden) predicted_id = tf.argmax(predictions[-1]).numpy() start_string += " " + idx2word[predicted_id] out_string += " " + idx2word[predicted_id] print(out_string) response = app.response_class( response=json.dumps({"prediction" : out_string}, ensure_ascii=False), status=200, mimetype='application/json' ) return response, 200 except Exception as e: print(e) print(e.with_traceback()) port = int(environ.get("PORT", 5000)) # app.run(host="0.0.0.0", debug=True, port=port) # app.run(host="0.0.0.0", debug=True, port=port, ssl_context=("certac.pem", "keyac.pem")) app.run(host="0.0.0.0", debug=True, port=port) # app.run(host="0.0.0.0", port=port)
[ "tensorflow.train.latest_checkpoint", "tensorflow.zeros", "tensorflow.train.Checkpoint", "tensorflow.expand_dims", "tensorflow.optimizers.Adam", "tensorflow.argmax" ]
backend/main.py
[(15, 'OpenSSL.SSL.Context', 'SSL.Context', (['SSL.TLSv1_2_METHOD'], {}), False, 'from OpenSSL import SSL\n'), (21, 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '""""""', 'static_folder': '"""public"""'}), False, 'from flask import Flask, request\n'), (28, 'keras_preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), False, 'from keras_preprocessing.text import Tokenizer\n'), (41, 'MyModel.MyModel', 'MyModel', (['vocab_size', 'embedding_dim', 'units', 'BATCH_SIZE'], {}), False, 'from MyModel import MyModel\n'), (42, 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (44, 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""ckpt"""'], {}), False, 'import os\n'), (45, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'optimizer', 'model': 'model'}), True, 'import tensorflow as tf\n'), (50, 'flask_cors.cross_origin', 'cross_origin', ([], {'headers': "['Content-Type']"}), False, 'from flask_cors import cross_origin\n'), (66, 'flask_cors.cross_origin', 'cross_origin', ([], {'headers': "['Content-Type']"}), False, 'from flask_cors import cross_origin\n'), (91, 'flask_cors.cross_origin', 'cross_origin', ([], {'headers': "['Content-Type']"}), False, 'from flask_cors import cross_origin\n'), (75, 'MyHTMLParser.MyHTMLParser', 'MyHTMLParser', ([], {}), False, 'from MyHTMLParser import MyHTMLParser\n'), (122, 'os.environ.get', 'environ.get', (['"""PORT"""', '(5000)'], {}), False, 'from os import environ\n'), (26, 'io.open', 'io.open', (['file_path', '"""r"""'], {'encoding': '"""ISO8859-9"""'}), False, 'import io\n'), (47, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), True, 'import tensorflow as tf\n'), (69, 'flask.request.data.decode', 'request.data.decode', ([], {}), False, 'from flask import Flask, request\n'), (79, 'json.dumps', 'json.dumps', (["{'paraphrase': parser.found}"], {'ensure_ascii': '(False)'}), False, 'import json\n'), (97, 'tensorflow.zeros', 'tf.zeros', (['(1, units)'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.expand_dims', 'tf.expand_dims', (['input_eval', '(0)'], {}), True, 'import tensorflow as tf\n'), (53, 'flask.request.data.decode', 'request.data.decode', ([], {}), False, 'from flask import Flask, request\n'), (71, 'flask.request.data.decode', 'request.data.decode', ([], {}), False, 'from flask import Flask, request\n'), (95, 'flask.request.data.decode', 'request.data.decode', ([], {}), False, 'from flask import Flask, request\n'), (113, 'json.dumps', 'json.dumps', (["{'prediction': out_string}"], {'ensure_ascii': '(False)'}), False, 'import json\n'), (106, 'tensorflow.argmax', 'tf.argmax', (['predictions[-1]'], {}), True, 'import tensorflow as tf\n')]
Pearl-UTexas/ICML2019-TREX
fb63afd13a558061bb537b87388ad3ca22eb96d1
import tensorflow as tf class Conv2d(object) : def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NCHW',padding='SAME') : with tf.variable_scope(name) : assert(data_format == 'NCHW' or data_format == 'NHWC') self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) if( data_format == 'NCHW' ) : self.strides = [1, 1, d_h, d_w] else : self.strides = [1, d_h, d_w, 1] self.data_format = data_format self.padding = padding def __call__(self,input_var,name=None,w=None,b=None,**kwargs) : w = w if w is not None else self.w b = b if b is not None else self.b if( self.data_format =='NCHW' ) : return tf.nn.bias_add( tf.nn.conv2d(input_var, w, use_cudnn_on_gpu=True,data_format='NCHW', strides=self.strides, padding=self.padding), b,data_format='NCHW',name=name) else : return tf.nn.bias_add( tf.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), b,data_format='NHWC',name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormConv2d(object): def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) : with tf.variable_scope(name) : assert data_format == 'NHWC' self.v = tf.get_variable('v', [k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.g = tf.get_variable('g',[output_dim], initializer=tf.constant_initializer(float('nan'))) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(float('nan'))) self.strides = [1, d_h, d_w, 1] self.padding = padding self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : def _init(): v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2]) t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC') mu,var = tf.nn.moments(t,axes=[0,1,2]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2]) return tf.nn.bias_add( tf.nn.conv2d(input_var, w,data_format='NHWC', strides=self.strides, padding=self.padding), self.b,data_format='NHWC',name=name) def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class DepthConv2d(object) : def __init__(self,name,input_dim,channel_multiplier,k_h=4,k_w=4,d_h=2,d_w=2, stddev=0.02, data_format='NCHW', padding='SAME') : with tf.variable_scope(name) : assert(data_format == 'NCHW' or data_format == 'NHWC') self.w = tf.get_variable('w', [k_h, k_w, input_dim, channel_multiplier], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.0)) if( data_format == 'NCHW' ) : self.strides = [1, 1, d_h, d_w] else : self.strides = [1, d_h, d_w, 1] self.data_format = data_format self.padding = padding def __call__(self,input_var,name=None,**xargs) : return tf.nn.bias_add( tf.nn.depthwise_conv2d(input_var, self.w, data_format=self.data_format, strides=self.strides, padding=self.padding), self.b,data_format=self.data_format,name=name) class Conv3d(object) : def __init__(self,name,input_dim,output_dim,k_t=2,k_h=4,k_w=4,d_t=1,d_h=1,d_w=1, stddev=0.02, data_format='NDHWC') : with tf.variable_scope(name) : assert(data_format == 'NDHWC') self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.strides = [d_t,d_h,d_w] def __call__(self,input_var,name=None,w=None,b=None,**kwargs) : w = w if w is not None else self.w b = b if b is not None else self.b #k_t,k_h,k_w,_,_ = self.w.get_shape().as_list() #_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC") return tf.nn.bias_add( tf.nn.convolution(input_var, w, strides=self.strides, data_format='NDHWC', padding='SAME'), b,name=name) def get_variables(self): return {'w':self.w,'b':self.b} class DilatedConv3D(object) : def __init__(self,name,input_dim,output_dim,k_t=2,k_h=3,k_w=3,d_t=2,d_h=1,d_w=1, stddev=0.02, data_format='NDHWC') : with tf.variable_scope(name) : assert(data_format == 'NDHWC') self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.strides = [1,1,1] self.dilates = [d_t, d_h, d_w] def __call__(self,input_var,name=None) : k_t,k_h,k_w,_,_ = self.w.get_shape().as_list() _t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC") return tf.nn.bias_add( tf.nn.convolution(_t, self.w, strides=self.strides, dilation_rate=self.dilates, padding='VALID'), self.b,name=name) class Linear(object) : def __init__(self,name,input_dim,output_dim,stddev=0.02) : with tf.variable_scope(name) : self.w = tf.get_variable('w',[input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) def __call__(self,input_var,name=None,w=None,b=None,**kwargs) : w = w if w is not None else self.w b = b if b is not None else self.b if( input_var.shape.ndims > 2 ) : dims = tf.reduce_prod(tf.shape(input_var)[1:]) return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b else : return tf.matmul(input_var,w)+b def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormLinear(object): def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) : with tf.variable_scope(name) : self.v = tf.get_variable('v',[input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.g = tf.get_variable('g',[output_dim], initializer=tf.constant_initializer(float('nan'))) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(float('nan'))) self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : if( input_var.shape.ndims > 2 ) : dims = tf.reduce_prod(tf.shape(input_var)[1:]) input_var = tf.reshape(input_var,[-1,dims]) def _init(): v_norm = tf.nn.l2_normalize(self.v,axis=0) t = tf.matmul(input_var,v_norm) mu,var = tf.nn.moments(t,axes=[0]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0) return tf.matmul(input_var,w)+self.b def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class SymPadConv2d(object): #Resize and Convolution(upsacle by 2) def __init__(self,name,input_dim,output_dim, k_h=3,k_w=3,stddev=0.02) : assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size' with tf.variable_scope(name) : self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ] def __call__(self,input_var,name=None,**kwargs): _,h,w,c = input_var.shape.as_list() _t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2]) _t = tf.pad(_t,self.padding, mode='SYMMETRIC') return tf.nn.bias_add( tf.nn.conv2d(_t, self.w, data_format='NHWC', #we can't use cudnn due to resize method... strides=[1,1,1,1], padding="VALID"), self.b,data_format='NHWC',name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormSymPadConv2d(object): #Resize and Convolution(upsacle by 2) def __init__(self,name,input_dim,output_dim, k_h=3,k_w=3,stddev=0.02) : assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size' with tf.variable_scope(name) : self.conv2d = WeightNormConv2d('conv',input_dim,output_dim,k_h,k_w,1,1,data_format='NHWC',padding='VALID') self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ] def __call__(self,input_var,name=None,**kwargs): _,h,w,c = input_var.shape.as_list() _t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2]) _t = tf.pad(_t,self.padding, mode='SYMMETRIC') return self.conv2d(_t) def get_variables(self): return self.conv2d.get_variables() class TransposedConv2d(object): def __init__(self,name,input_dim,out_dim, k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') : with tf.variable_scope(name) : self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim], initializer=tf.random_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[out_dim], initializer=tf.constant_initializer(0.0)) self.data_format = data_format if( data_format =='NCHW' ): self.strides = [1, 1, d_h, d_w] else: self.strides = [1, d_h, d_w, 1] def __call__(self,input_var,name=None,**xargs): shapes = tf.shape(input_var) if( self.data_format == 'NCHW' ): shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]]) else: shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]]) return tf.nn.bias_add( tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes, data_format=self.data_format, strides=self.strides,padding='SAME'), self.b,data_format=self.data_format,name=name) def get_variables(self): return {'w':self.w,'b':self.b} class WeightNormTransposedConv2d(object): def __init__(self,name,input_dim,out_dim, k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) : with tf.variable_scope(name) : assert data_format == 'NHWC' self.v = tf.get_variable('v', [k_h, k_w, out_dim, input_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.g = tf.get_variable('g',[out_dim], initializer=tf.constant_initializer(float('nan'))) self.b = tf.get_variable('b',[out_dim], initializer=tf.constant_initializer(float('nan'))) self.strides = [1, d_h, d_w, 1] self.epsilon = epsilon def __call__(self,input_var,name=None,**kwargs) : shapes = tf.shape(input_var) shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]]) def _init(): v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3]) t = tf.nn.conv2d_transpose(input_var,v_norm, output_shape=shapes, strides=self.strides, padding='SAME', data_format='NHWC') mu,var = tf.nn.moments(t,axes=[0,1,2]) std = tf.sqrt(var+self.epsilon) return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)] require_init = tf.reduce_any(tf.is_nan(self.g)) init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b]) with tf.control_dependencies(init_ops): w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3]) return tf.nn.bias_add( tf.nn.conv2d_transpose(input_var,w, output_shape=shapes, strides=self.strides, padding='SAME', data_format='NHWC'), self.b,data_format='NHWC',name=name) def get_variables(self): #TODO: self.v should be l2-normalized or not? / currently not. return {'v':self.v,'b':self.b,'g':self.g} class LayerNorm(): def __init__(self,name,axis,out_dim=None,epsilon=1e-7,data_format='NHWC') : """ out_dim: Recentering by adding bias again. The previous bias can be ignored while normalization. (when you normalize over channel only) """ assert data_format=='NCHW' or data_format=='NHWC' assert len(axis) != 1 or (len(axis) == 1 and out_dim != None) """ TODO: Track Moving mean and variance, and use this statistics. with tf.variable_scope(name): self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) """ if out_dim is not None: with tf.variable_scope(name) : self.gamma= tf.get_variable('gamma',[1,1,1,out_dim], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[out_dim], initializer=tf.constant_initializer(0.0)) else: self.gamma = None self.beta = None self.axis = axis self.epsilon = epsilon self.data_format = data_format self.name = name def __call__(self,input_var,**kwargs) : mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True) ret = (input_var - mean) / tf.sqrt(var+self.epsilon) if self.gamma is None : return ret else: return tf.nn.bias_add(ret*self.gamma, self.beta,data_format=self.data_format) def get_variables(self): return {'gamma':self.gamma,'beta':self.beta} if self.gamma is not None else {} class InstanceNorm(): def __init__(self,name,format='NCHW',epsilon=1e-5) : assert(format=='NCHW' or format=='NHWC') self.axis = [2,3] if format == 'NCHW' else [1,2] self.epsilon = epsilon self.name = name def __call__(self,input_var) : mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True) return (input_var - mean) / tf.sqrt(var+self.epsilon) class BatchNorm(object): def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) : self.momentum = momentum self.epsilon = epsilon self.axis = axis self.center=center self.scale=scale with tf.variable_scope(name) as scope: with tf.variable_scope('bn') : self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0)) self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0)) self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False) self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False) self.scope = scope def __call__(self,input_var,is_training,**xargs) : with tf.variable_scope(self.scope) : return tf.layers.batch_normalization( input_var, axis=self.axis, momentum=self.momentum, epsilon=self.epsilon, center=self.center, scale=self.scale, training=is_training, reuse=True, name='bn') """ ---Do NOT forget to add update_ops dependencies for your loss function.--- update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,tf.get_default_graph().get_name_scope()) #And, do not make any scope inside map_fn, since scope.name will not work...(it is corrupted by map_fn.) print(update_ops) with tf.control_dependencies(update_ops): """ def get_variables(self): return {} class Lrelu(object): def __init__(self,leak=0.2,name='lrelu') : self.leak = leak self.name = name def __call__(self, x, **kwargs) : return tf.maximum(x, self.leak*x, name=self.name) def get_variables(self): return {} class ResidualBlock() : def __init__(self,name,filters,filter_size=3,non_linearity=Lrelu,normal_method=InstanceNorm) : self.conv_1 = Conv2d(name+'_1',filters,filters,filter_size,filter_size,1,1) self.normal = normal_method(name+'_norm') self.nl = non_linearity() self.conv_2 = Conv2d(name+'_2',filters,filters,filter_size,filter_size,1,1) def __call__(self,input_var) : _t = self.conv_1(input_var) _t = self.normal(_t) _t = self.nl(_t) _t = self.conv_2(_t) return input_var + _t
[ "tensorflow.is_nan", "tensorflow.cond", "tensorflow.control_dependencies", "tensorflow.nn.conv2d_transpose", "tensorflow.pad", "tensorflow.nn.depthwise_conv2d", "tensorflow.nn.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.nn.moments", "tensorflow.truncated_normal_initializer", "tensorflow.random_normal_initializer", "tensorflow.nn.l2_normalize", "tensorflow.nn.convolution", "tensorflow.matmul", "tensorflow.image.resize_nearest_neighbor", "tensorflow.shape", "tensorflow.nn.bias_add", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.assign", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.sqrt" ]
mujoco/tf_commons/ops.py
[(61, 'tensorflow.cond', 'tf.cond', (['require_init', '_init', '(lambda : [self.g, self.b])'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.pad', 'tf.pad', (['input_var', '[[0, 0], [0, 0], [k_h // 2, k_h // 2], [k_w // 2, k_w // 2], [0, 0]]', '"""SYMMETRIC"""'], {}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.cond', 'tf.cond', (['require_init', '_init', '(lambda : [self.g, self.b])'], {}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['input_var', '[h * 2, w * 2]'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.pad', 'tf.pad', (['_t', 'self.padding'], {'mode': '"""SYMMETRIC"""'}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['input_var', '[h * 2, w * 2]'], {}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.pad', 'tf.pad', (['_t', 'self.padding'], {'mode': '"""SYMMETRIC"""'}), True, 'import tensorflow as tf\n'), (246, 'tensorflow.shape', 'tf.shape', (['input_var'], {}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.shape', 'tf.shape', (['input_var'], {}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.cond', 'tf.cond', (['require_init', '_init', '(lambda : [self.g, self.b])'], {}), True, 'import tensorflow as tf\n'), (340, 'tensorflow.nn.moments', 'tf.nn.moments', (['input_var', 'self.axis'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.nn.moments', 'tf.nn.moments', (['input_var', 'self.axis'], {'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (406, 'tensorflow.maximum', 'tf.maximum', (['x', '(self.leak * x)'], {'name': 'self.name'}), True, 'import tensorflow as tf\n'), (6, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.v'], {'axis': '[0, 1, 2]'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_var', 'v_norm', 'self.strides', 'self.padding'], {'data_format': '"""NHWC"""'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.nn.moments', 'tf.nn.moments', (['t'], {'axes': '[0, 1, 2]'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.sqrt', 'tf.sqrt', (['(var + self.epsilon)'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.is_nan', 'tf.is_nan', (['self.g'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['init_ops'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.nn.depthwise_conv2d', 'tf.nn.depthwise_conv2d', (['input_var', 'self.w'], {'data_format': 'self.data_format', 'strides': 'self.strides', 'padding': 'self.padding'}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.nn.convolution', 'tf.nn.convolution', (['input_var', 'w'], {'strides': 'self.strides', 'data_format': '"""NDHWC"""', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.nn.convolution', 'tf.nn.convolution', (['_t', 'self.w'], {'strides': 'self.strides', 'dilation_rate': 'self.dilates', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.reshape', 'tf.reshape', (['input_var', '[-1, dims]'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.v'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.matmul', 'tf.matmul', (['input_var', 'v_norm'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.nn.moments', 'tf.nn.moments', (['t'], {'axes': '[0]'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.sqrt', 'tf.sqrt', (['(var + self.epsilon)'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.is_nan', 'tf.is_nan', (['self.g'], {}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['init_ops'], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['_t', 'self.w'], {'data_format': '"""NHWC"""', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['input_var', 'self.w'], {'output_shape': 'shapes', 'data_format': 'self.data_format', 'strides': 'self.strides', 'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (281, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.v'], {'axis': '[0, 1, 3]'}), True, 'import tensorflow as tf\n'), (282, 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['input_var', 'v_norm'], {'output_shape': 'shapes', 'strides': 'self.strides', 'padding': '"""SAME"""', 'data_format': '"""NHWC"""'}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.nn.moments', 'tf.nn.moments', (['t'], {'axes': '[0, 1, 2]'}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.sqrt', 'tf.sqrt', (['(var + self.epsilon)'], {}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.is_nan', 'tf.is_nan', (['self.g'], {}), True, 'import tensorflow as tf\n'), (294, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['init_ops'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.sqrt', 'tf.sqrt', (['(var + self.epsilon)'], {}), True, 'import tensorflow as tf\n'), (346, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['(ret * self.gamma)', 'self.beta'], {'data_format': 'self.data_format'}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.sqrt', 'tf.sqrt', (['(var + self.epsilon)'], {}), True, 'import tensorflow as tf\n'), (371, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['input_var'], {'axis': 'self.axis', 'momentum': 'self.momentum', 'epsilon': 'self.epsilon', 'center': 'self.center', 'scale': 'self.scale', 'training': 'is_training', 'reuse': '(True)', 'name': '"""bn"""'}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_var', 'w'], {'use_cudnn_on_gpu': '(True)', 'data_format': '"""NCHW"""', 'strides': 'self.strides', 'padding': 'self.padding'}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_var', 'w'], {'data_format': '"""NHWC"""', 'strides': 'self.strides', 'padding': 'self.padding'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.assign', 'tf.assign', (['self.g', '(1 / std)'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.assign', 'tf.assign', (['self.b', '(-1.0 * mu / std)'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.v'], {'axis': '[0, 1, 2]'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_var', 'w'], {'data_format': '"""NHWC"""', 'strides': 'self.strides', 'padding': 'self.padding'}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.matmul', 'tf.matmul', (['input_var', 'w'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.assign', 'tf.assign', (['self.g', '(1 / std)'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.assign', 'tf.assign', (['self.b', '(-1.0 * mu / std)'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.g'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.v'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.matmul', 'tf.matmul', (['input_var', 'w'], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.assign', 'tf.assign', (['self.g', '(1 / std)'], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.assign', 'tf.assign', (['self.b', '(-1.0 * mu / std)'], {}), True, 'import tensorflow as tf\n'), (296, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['self.v'], {'axis': '[0, 1, 3]'}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['input_var', 'w'], {'output_shape': 'shapes', 'strides': 'self.strides', 'padding': '"""SAME"""', 'data_format': '"""NHWC"""'}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bn"""'], {}), True, 'import tensorflow as tf\n'), (9, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (10, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.shape', 'tf.shape', (['input_var'], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.reshape', 'tf.reshape', (['input_var', '[-1, dims]'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.shape', 'tf.shape', (['input_var'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'stddev'}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.shape', 'tf.shape', (['self.b'], {}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.shape', 'tf.shape', (['self.b'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.shape', 'tf.shape', (['self.b'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (373, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (376, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.shape', 'tf.shape', (['self.v'], {}), True, 'import tensorflow as tf\n'), (296, 'tensorflow.shape', 'tf.shape', (['self.v'], {}), True, 'import tensorflow as tf\n')]
Abdumaleek/infinity-mirror
b493c5602d9e4bcf374b748e9b80e7c85be54a88
from __future__ import division from __future__ import print_function from src.autoencoders.evaluation import get_roc_score, clustering_latent_space, get_prob_mat_from_emb from src.autoencoders.input_data import load_data, load_label from src.autoencoders.kcore import compute_kcore, expand_embedding from src.autoencoders.model import * from src.autoencoders.optimizer import OptimizerAE, OptimizerVAE from src.autoencoders.preprocessing import * import networkx as nx import numpy as np from collections import namedtuple import os import scipy.sparse as sp import tensorflow as tf import time os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) flags = namedtuple('FLAGS', ['dataset', 'task', 'model', 'dropout', 'epochs', 'features', 'learning_rate', 'hidden', 'dimension', 'nb_run', 'prop_val', 'prop_test', 'validation', 'verbose', 'kcore', 'k', 'nb_iterations']) FLAGS = flags('custom', 'link_prediction', 'gcn_ae', 0., 200, False, 0.01, 32, 16, 1, 5., 10., False, True, False, 2, 10) def fit_model(g, model_name): # Lists to collect average results mean_roc = [] mean_ap = [] mean_time = [] # Load graph dataset adj_init = nx.adjacency_matrix(g) features_init = sp.eye(g.order(), g.size()) print(f"Loading data... {g.name} n: {g.order()}, m: {g.size()}") # The entire training+test process is repeated FLAGS.nb_run times for i in range(FLAGS.nb_run): if FLAGS.task == 'link_prediction' : print("Masking test edges...") # Edge Masking for Link Prediction: compute Train/Validation/Test set while True: try: adj, val_edges, val_edges_false, test_edges, test_edges_false = \ mask_test_edges(adj_init, FLAGS.prop_test, FLAGS.prop_val) except Exception: continue else: break else: raise ValueError('Undefined task!') # Start computation of running times t_start = time.time() # Preprocessing and initialization print("Preprocessing and Initializing...") # Compute number of nodes num_nodes = adj.shape[0] # If features are not used, replace feature matrix by identity matrix if not FLAGS.features: features = sp.identity(adj.shape[0]) # Preprocessing on node features features = sparse_to_tuple(features) num_features = features[2][1] features_nonzero = features[1].shape[0] # Define placeholders placeholders = { 'features': tf.sparse_placeholder(tf.float32), 'adj': tf.sparse_placeholder(tf.float32), 'adj_orig': tf.sparse_placeholder(tf.float32), 'dropout': tf.placeholder_with_default(0., shape = ()) } # Create model model = None if model_name == 'gcn_ae': # Standard Graph Autoencoder model = GCNModelAE(placeholders, num_features, features_nonzero) elif model_name == 'gcn_vae': # Standard Graph Variational Autoencoder model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero) elif model_name == 'linear_ae': # Linear Graph Autoencoder model = LinearModelAE(placeholders, num_features, features_nonzero) elif model_name == 'linear_vae': # Linear Graph Variational Autoencoder model = LinearModelVAE(placeholders, num_features, num_nodes, features_nonzero) elif model_name == 'deep_gcn_ae': # Deep (3-layer GCN) Graph Autoencoder model = DeepGCNModelAE(placeholders, num_features, features_nonzero) elif model_name == 'deep_gcn_vae': # Deep (3-layer GCN) Graph Variational Autoencoder model = DeepGCNModelVAE(placeholders, num_features, num_nodes, features_nonzero) else: raise ValueError('Undefined model!') # Optimizer pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2) with tf.name_scope('optimizer'): # Optimizer for Non-Variational Autoencoders if model_name in ('gcn_ae', 'linear_ae', 'deep_gcn_ae'): opt = OptimizerAE(preds = model.reconstructions, labels = tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices = False), [-1]), pos_weight = pos_weight, norm = norm) # Optimizer for Variational Autoencoders elif model_name in ('gcn_vae', 'linear_vae', 'deep_gcn_vae'): opt = OptimizerVAE(preds = model.reconstructions, labels = tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices = False), [-1]), model = model, num_nodes = num_nodes, pos_weight = pos_weight, norm = norm) # Normalization and preprocessing on adjacency matrix adj_norm = preprocess_graph(adj) adj_label = sparse_to_tuple(adj + sp.eye(adj.shape[0])) # Initialize TF session sess = tf.Session() sess.run(tf.global_variables_initializer()) # Model training print(f"Training {model_name}...") t = time.time() print_every = 50 for epoch in range(FLAGS.epochs): # Flag to compute running time for each epoch # Construct feed dictionary feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Weights update outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict = feed_dict) # Compute average loss avg_cost = outs[1] if epoch > 0 and epoch % print_every == 0 and FLAGS.verbose: # Display epoch information print("Epoch:", '%04d' % (epoch), "train_loss=", "{:.5f}".format(avg_cost), "time/epoch: {:.5f}s".format((time.time() - t) / print_every)) t = time.time() # reset the clock if not FLAGS.kcore and FLAGS.validation and FLAGS.task == 'link_prediction': feed_dict.update({placeholders['dropout']: 0}) emb = sess.run(model.z_mean, feed_dict = feed_dict) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) val_roc, val_ap = get_roc_score(val_edges, val_edges_false, emb) print("val_roc=", "{:.5f}".format(val_roc), "val_ap=", "{:.5f}".format(val_ap)) # Flag to compute Graph AE/VAE training time t_model = time.time() # Compute embedding # Get embedding from model emb = sess.run(model.z_mean, feed_dict = feed_dict) mean_time.append(time.time() - t_start) # Test model print("Testing model...") # Link Prediction: classification edges/non-edges if FLAGS.task == 'link_prediction': # Get ROC and AP scores roc_score, ap_score = get_roc_score(test_edges, test_edges_false, emb) # Report scores mean_roc.append(roc_score) mean_ap.append(ap_score) sess.close() # close the session and free up resouces ### SS: compute final graph prob_mat, thresh_mat = get_prob_mat_from_emb(emb) return prob_mat, thresh_mat if __name__ == '__main__': g = nx.karate_club_graph() g.name = 'karate' model_name = 'gcn_ae' prob_mat = fit_model(g, model_name) gen_g = nx.from_numpy_matrix(prob_mat, create_using=nx.Graph()) print(f'{g.name} orig: n={g.order()} m={g.size()} | gen: n={gen_g.order()} m={gen_g.size()}')
[ "tensorflow.sparse_placeholder", "scipy.sparse.eye", "tensorflow.placeholder_with_default", "tensorflow.sparse_tensor_to_dense", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.global_variables_initializer", "scipy.sparse.identity", "tensorflow.name_scope", "tensorflow.Session" ]
src/autoencoders/fit.py
[(18, 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), True, 'import tensorflow as tf\n'), (20, 'collections.namedtuple', 'namedtuple', (['"""FLAGS"""', "['dataset', 'task', 'model', 'dropout', 'epochs', 'features',\n 'learning_rate', 'hidden', 'dimension', 'nb_run', 'prop_val',\n 'prop_test', 'validation', 'verbose', 'kcore', 'k', 'nb_iterations']"], {}), False, 'from collections import namedtuple\n'), (32, 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['g'], {}), True, 'import networkx as nx\n'), (179, 'src.autoencoders.evaluation.get_prob_mat_from_emb', 'get_prob_mat_from_emb', (['emb'], {}), False, 'from src.autoencoders.evaluation import get_roc_score, clustering_latent_space, get_prob_mat_from_emb\n'), (183, 'networkx.karate_club_graph', 'nx.karate_club_graph', ([], {}), True, 'import networkx as nx\n'), (54, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (129, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (135, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (160, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (62, 'scipy.sparse.identity', 'sp.identity', (['adj.shape[0]'], {}), True, 'import scipy.sparse as sp\n'), (70, 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)'], {'shape': '()'}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (172, 'src.autoencoders.evaluation.get_roc_score', 'get_roc_score', (['test_edges', 'test_edges_false', 'emb'], {}), False, 'from src.autoencoders.evaluation import get_roc_score, clustering_latent_space, get_prob_mat_from_emb\n'), (187, 'networkx.Graph', 'nx.Graph', ([], {}), True, 'import networkx as nx\n'), (126, 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), True, 'import scipy.sparse as sp\n'), (152, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (165, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (157, 'src.autoencoders.evaluation.get_roc_score', 'get_roc_score', (['val_edges', 'val_edges_false', 'emb'], {}), False, 'from src.autoencoders.evaluation import get_roc_score, clustering_latent_space, get_prob_mat_from_emb\n'), (110, 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["placeholders['adj_orig']"], {'validate_indices': '(False)'}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["placeholders['adj_orig']"], {'validate_indices': '(False)'}), True, 'import tensorflow as tf\n'), (151, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
LiquidInkCo/ImageClassifier
c0d471a55a70b3118178488db3c005a9277baade
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. """ import collections as _collections from google.protobuf import text_format as _text_format from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library _hard_routing_function_outputs = ["path_probability", "path"] _HardRoutingFunctionOutput = _collections.namedtuple( "HardRoutingFunction", _hard_routing_function_outputs) def hard_routing_function(input_data, tree_parameters, tree_biases, max_nodes, tree_depth, name=None): r""" Chooses a single path for each instance in `input_data` and returns the leaf the probability of the path and the path taken. tree_depth: The depth of the decision tree. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. path_probility: `path_probability[i]` gives the probability of reaching each node in `path[i]`. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. max_nodes: An `int`. tree_depth: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (path_probability, path). path_probability: A `Tensor` of type `float32`. path: A `Tensor` of type `int32`. """ result = _op_def_lib.apply_op("HardRoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, max_nodes=max_nodes, tree_depth=tree_depth, name=name) return _HardRoutingFunctionOutput._make(result) _k_feature_gradient_outputs = ["routing_gradient", "data_gradient", "weight_gradient"] _KFeatureGradientOutput = _collections.namedtuple( "KFeatureGradient", _k_feature_gradient_outputs) def k_feature_gradient(input_data, tree_parameters, tree_biases, routes, layer_num, random_seed, name=None): r""" Computes the derivative of the routing loss with respect to each decision node. Each decision node is constrained to make a decision based on only k features. layer_num: The layer number of this tree. random_seed: The base random seed. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. routes: The routes computed by routing_function_op. routing_gradient: `routing_gradient` provides du / df, where u is the routing function and f is the (vector of) decision functions. A decision function f_i computes the routing decision at node i. data_gradient: `data_gradient` provides df / dx, where f is the (vector of) decision functions and x is a batch of data. weights_gradient: `weights_gradient` provides df / dw, where f is the (vector of) decision functions and w is the matrix of parameters that determine how instances are routed through a tree. f_i, the decision function at node i, is parameterized by t_i (parameters) and b_i (bias) and takes data x as input. This op is called in training_ops.py to compute du / df, and we use that to compute du / dx = du / df * df / dx, du / dt = du / df * df / dt, and du / db = du / df * df / db. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. routes: A `Tensor` of type `float32`. layer_num: An `int`. random_seed: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (routing_gradient, data_gradient, weight_gradient). routing_gradient: A `Tensor` of type `float32`. data_gradient: A `Tensor` of type `float32`. weight_gradient: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("KFeatureGradient", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, routes=routes, layer_num=layer_num, random_seed=random_seed, name=name) return _KFeatureGradientOutput._make(result) def k_feature_routing_function(input_data, tree_parameters, tree_biases, layer_num, max_nodes, num_features_per_node, random_seed, name=None): r""" Returns the probability that each input will reach each leaf node. Each decision is made based on k features. layer_num: The layer number of this tree. max_nodes: The number of nodes in the tree. num_features_per_node: The number of features each node can use to make a decision. random_seed: The base random seed. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. tree_features: `tree_features[i]` gives the decision feature for node i. probabilities: `probabilities[i][j]` is the probability that input i will reach node j. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. layer_num: An `int`. max_nodes: An `int`. num_features_per_node: An `int`. random_seed: An `int`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("KFeatureRoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, layer_num=layer_num, max_nodes=max_nodes, num_features_per_node=num_features_per_node, random_seed=random_seed, name=name) return result def routing_function(input_data, tree_parameters, tree_biases, max_nodes, name=None): r""" Returns the probability that each input will reach each leaf node. max_nodes: The number of nodes in the tree. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. probabilities: `probabilities[i][j]` is the probability that input i will reach node j. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. max_nodes: An `int`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("RoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, max_nodes=max_nodes, name=name) return result def routing_gradient(input_data, tree_parameters, tree_biases, routes, max_nodes, name=None): r""" Computes the derivative of the routing loss with respect to each decision node. max_nodes: The number of nodes in the tree. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. routes: The routes computed by routing_function_op. routing_gradient: `routing_gradient` provides du / df, where u is the routing function and f is the (vector of) decision functions. A decision function f_i computes the routing decision at node i. f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as input. This op is called in training_ops.py to compute du / df, and we use that to compute du / dx = du / df * df / dx, du / dt = du / df * df / dt, and du / db = du / df * df / db. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. routes: A `Tensor` of type `float32`. max_nodes: An `int`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("RoutingGradient", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, routes=routes, max_nodes=max_nodes, name=name) return result _stochastic_hard_routing_function_outputs = ["path_probability", "path"] _StochasticHardRoutingFunctionOutput = _collections.namedtuple( "StochasticHardRoutingFunction", _stochastic_hard_routing_function_outputs) def stochastic_hard_routing_function(input_data, tree_parameters, tree_biases, tree_depth, random_seed, name=None): r""" Samples a path for each instance in `input_data` and returns the probability of the path and the path taken. tree_depth: The depth of the decision tree. random_seed: The base random seed. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input. tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. path_probility: `path_probability[i]` gives the probability of reaching each node in `path[i]`. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. tree_depth: An `int`. random_seed: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (path_probability, path). path_probability: A `Tensor` of type `float32`. path: A `Tensor` of type `int32`. """ result = _op_def_lib.apply_op("StochasticHardRoutingFunction", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, tree_depth=tree_depth, random_seed=random_seed, name=name) return _StochasticHardRoutingFunctionOutput._make(result) _stochastic_hard_routing_gradient_outputs = ["routing_gradient", "data_gradient", "parameter_gradient", "bias_gradient"] _StochasticHardRoutingGradientOutput = _collections.namedtuple( "StochasticHardRoutingGradient", _stochastic_hard_routing_gradient_outputs) def stochastic_hard_routing_gradient(input_data, tree_parameters, tree_biases, path_probability, path, tree_depth, name=None): r""" Computes the derivative of the routing loss with respect to each decision node. tree_depth: The depth of the decision tree. input_data: The training batch's features as a 2-d tensor; `input_data[i][j]` gives the j-th feature of the i-th input tree_parameters: `tree_parameters[i]` gives the weight of the logistic regression model that translates from node features to probabilities. tree_biases: `tree_biases[i]` gives the bias of the logistic regression model that translates from node features to probabilities. path_probility: `path_probability[i]` gives the probability of reaching each node in `path[i]`. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. routing_gradient: `routing_gradient` provides du / df, where u is the routing function and f is the (vector of) decision functions. A decision function f_i computes the routing decision at node i. data_gradient: `data_gradient` provides df / dx, where f is the (vector of) decision functions and x is a batch of data. parameter_gradient: `parameter_gradient` provides df / dw, where f is the (vector of) decision functions and w is the matrix of parameters that determine how instances are routed through a tree. bias_gradient: `bias_gradient` provides df / db, where f is the (vector of) decision functions and b is the vector of bias parameters that determine how instances are routed through a tree. f_i is parameterized by t_i (parameters) and b_i (bias) and takes data x as input. This op is called in training_ops.py to compute du / df, and we use that to compute du / dx = du / df * df / dx, du / dt = du / df * df / dt, and du / db = du / df * df / db. Args: input_data: A `Tensor` of type `float32`. tree_parameters: A `Tensor` of type `float32`. tree_biases: A `Tensor` of type `float32`. path_probability: A `Tensor` of type `float32`. path: A `Tensor` of type `int32`. tree_depth: An `int`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (routing_gradient, data_gradient, parameter_gradient, bias_gradient). routing_gradient: A `Tensor` of type `float32`. data_gradient: A `Tensor` of type `float32`. parameter_gradient: A `Tensor` of type `float32`. bias_gradient: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("StochasticHardRoutingGradient", input_data=input_data, tree_parameters=tree_parameters, tree_biases=tree_biases, path_probability=path_probability, path=path, tree_depth=tree_depth, name=name) return _StochasticHardRoutingGradientOutput._make(result) def unpack_path(path, path_values, name=None): r""" Takes a batch of paths through a tree and a batch of values along those paths and returns a batch_size by num_nodes encoding of the path values. path: `path[i][j]` gives the jth node in the path taken by the ith data instance. path_values: `path_values[i][j]` gives the value associated with node j in the path defined by the ith instance unpacked_paths: `unpacked_paths[i][path[i][k]]` is path_values[i][k] for k in [0, tree_depth). All other elements of unpacked_paths are zero. Args: path: A `Tensor` of type `int32`. path_values: A `Tensor` of type `float32`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ result = _op_def_lib.apply_op("UnpackPath", path=path, path_values=path_values, name=name) return result def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "HardRoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "path_probability" type: DT_FLOAT } output_arg { name: "path" type: DT_INT32 } attr { name: "max_nodes" type: "int" } attr { name: "tree_depth" type: "int" } } op { name: "KFeatureGradient" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } input_arg { name: "routes" type: DT_FLOAT } output_arg { name: "routing_gradient" type: DT_FLOAT } output_arg { name: "data_gradient" type: DT_FLOAT } output_arg { name: "weight_gradient" type: DT_FLOAT } attr { name: "layer_num" type: "int" } attr { name: "random_seed" type: "int" } } op { name: "KFeatureRoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "probabilities" type: DT_FLOAT } attr { name: "layer_num" type: "int" } attr { name: "max_nodes" type: "int" } attr { name: "num_features_per_node" type: "int" } attr { name: "random_seed" type: "int" } } op { name: "RoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "probabilities" type: DT_FLOAT } attr { name: "max_nodes" type: "int" } } op { name: "RoutingGradient" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } input_arg { name: "routes" type: DT_FLOAT } output_arg { name: "routing_gradient" type: DT_FLOAT } attr { name: "max_nodes" type: "int" } } op { name: "StochasticHardRoutingFunction" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } output_arg { name: "path_probability" type: DT_FLOAT } output_arg { name: "path" type: DT_INT32 } attr { name: "tree_depth" type: "int" } attr { name: "random_seed" type: "int" } } op { name: "StochasticHardRoutingGradient" input_arg { name: "input_data" type: DT_FLOAT } input_arg { name: "tree_parameters" type: DT_FLOAT } input_arg { name: "tree_biases" type: DT_FLOAT } input_arg { name: "path_probability" type: DT_FLOAT } input_arg { name: "path" type: DT_INT32 } output_arg { name: "routing_gradient" type: DT_FLOAT } output_arg { name: "data_gradient" type: DT_FLOAT } output_arg { name: "parameter_gradient" type: DT_FLOAT } output_arg { name: "bias_gradient" type: DT_FLOAT } attr { name: "tree_depth" type: "int" } } op { name: "UnpackPath" input_arg { name: "path" type: DT_INT32 } input_arg { name: "path_values" type: DT_FLOAT } output_arg { name: "unpacked_path" type: DT_FLOAT } } """ _op_def_lib = _InitOpDefLibrary()
[ "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.framework.op_def_registry.register_op_list", "tensorflow.python.framework.op_def_library.OpDefLibrary" ]
python/Lib/site-packages/tensorflow/contrib/tensor_forest/hybrid/ops/gen_training_ops.py
[(20, 'collections.namedtuple', '_collections.namedtuple', (['"""HardRoutingFunction"""', '_hard_routing_function_outputs'], {}), True, 'import collections as _collections\n'), (70, 'collections.namedtuple', '_collections.namedtuple', (['"""KFeatureGradient"""', '_k_feature_gradient_outputs'], {}), True, 'import collections as _collections\n'), (272, 'collections.namedtuple', '_collections.namedtuple', (['"""StochasticHardRoutingFunction"""', '_stochastic_hard_routing_function_outputs'], {}), True, 'import collections as _collections\n'), (328, 'collections.namedtuple', '_collections.namedtuple', (['"""StochasticHardRoutingGradient"""', '_stochastic_hard_routing_gradient_outputs'], {}), True, 'import collections as _collections\n'), (429, 'tensorflow.core.framework.op_def_pb2.OpList', '_op_def_pb2.OpList', ([], {}), True, 'from tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n'), (430, 'google.protobuf.text_format.Merge', '_text_format.Merge', (['_InitOpDefLibrary.op_list_ascii', 'op_list'], {}), True, 'from google.protobuf import text_format as _text_format\n'), (431, 'tensorflow.python.framework.op_def_registry.register_op_list', '_op_def_registry.register_op_list', (['op_list'], {}), True, 'from tensorflow.python.framework import op_def_registry as _op_def_registry\n'), (432, 'tensorflow.python.framework.op_def_library.OpDefLibrary', '_op_def_library.OpDefLibrary', ([], {}), True, 'from tensorflow.python.framework import op_def_library as _op_def_library\n')]
wyzh98/BipedalWalker_NUS
7958c1d6c78566211651931369f35668bfbb944e
import tensorflow as tf import numpy as np import os from time import time class Base: def choose_state(self, state, training=True): if training: a, v = self.sess.run([self.sample_action, self.vf_eval], {self.state: [state]}) else: a, v = self.sess.run([self.eval_action, self.vf_eval], {self.state: [state]}) return a[0], np.squeeze(v) def save_model(self, model_path, step=None): save_path = self.saver.save(self.sess, os.path.join(model_path, 'model.ckpt'), global_step=step) return save_path def restore_model(self, model_path): self.saver.restore(self.sess, model_path) print('Model restored from', model_path) class PPO(Base): def __init__(self, env, summary_dir='./', gpu=False): self.LR = 1e-4 self.MINIBATCH = 64 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call ppo net pi_old, pi_old_params = self.build_anet(batch['state'], 'oldpi') pi, pi_params = self.build_anet(batch['state'], 'pi') pi_eval, _ = self.build_anet(self.state, 'pi', reuse=True) vf_old, vf_old_params = self.build_cnet(batch['state'], 'oldvf') self.vf, vf_params = self.build_cnet(batch['state'], 'vf') self.vf_eval, _ = self.build_cnet(self.state, 'vf', reuse=True) self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = tf.train.get_or_create_global_step() self.saver = tf.train.Saver() # Loss functions and training epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=0) ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2)) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params # Update the network def train(self, s, a, r, adv): start = time() self.sess.run([self.pi_new_params, self.vf_new_params, self.data_iter.initializer], feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) while True: try: summary, step, _ = self.sess.run([self.summarise, self.global_step, self.train_op]) except tf.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary class PPO_HC(PPO): def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params class PPO_LSTM(Base): def __init__(self, env, summary_dir='./', gpu=False): self.LR = 1e-4 self.MINIBATCH = 64 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'rewards') self.keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.batch(self.MINIBATCH, drop_remainder=True) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call ppo net pi_old, pi_old_params, _, _ = self.build_anet(batch['state'], 'oldpi') pi, pi_params, self.pi_state_init, self.pi_state_final = self.build_anet(batch['state'], 'pi') pi_eval, _, self.pi_eval_state_init, self.pi_eval_state_final = self.build_anet(self.state, 'pi', reuse=True, batch_size=1) vf_old, vf_old_params, _, _ = self.build_cnet(batch['state'], 'oldvf') self.vf, vf_params, self.vf_state_init, self.vf_state_final = self.build_cnet(batch['state'], 'vf') self.vf_eval, _, self.vf_eval_state_init, self.vf_eval_state_final = self.build_cnet(self.state, 'vf', reuse=True, batch_size=1) self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = tf.train.get_or_create_global_step() self.saver = tf.train.Saver() # Loss functions and training epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=1) ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2)) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', loss_entropy) tf.summary.scalar('Loss/Total', loss) tf.summary.scalar('Var/Epsilon', epsilon_decay) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False, batch_size=64): reg = None with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_a = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_a = tf.nn.rnn_cell.DropoutWrapper(lstm_a, output_keep_prob=self.keep_prob) state_init_a = lstm_a.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_ain = tf.expand_dims(layer_a2, axis=1) out_a, state_final_a = tf.nn.dynamic_rnn(cell=lstm_a, inputs=lstm_ain, initial_state=state_init_a) cell_out_a = tf.reshape(out_a, [-1, 256]) mu = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) sigma = tf.layers.dense(cell_out_a, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) # sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params, state_init_a, state_final_a def build_cnet(self, state_in, name, reuse=False, batch_size=64): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) lstm_c = tf.nn.rnn_cell.LSTMCell(num_units=256) lstm_c = tf.nn.rnn_cell.DropoutWrapper(lstm_c, output_keep_prob=self.keep_prob) state_init_c = lstm_c.zero_state(batch_size=batch_size, dtype=tf.float32) lstm_cin = tf.expand_dims(layer_c2, axis=1) out_c, state_final_c = tf.nn.dynamic_rnn(cell=lstm_c, inputs=lstm_cin, initial_state=state_init_c) cell_out_c = tf.reshape(out_c, [-1, 256]) vf = tf.layers.dense(cell_out_c, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params, state_init_c, state_final_c # Update the network def train(self, rollout): start = time() self.sess.run([self.pi_new_params, self.vf_new_params]) for _ in range(self.EPOCHS): np.random.shuffle(rollout) for s, a, r, adv in rollout: self.sess.run(self.data_iter.initializer, feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) state_a, state_c = self.sess.run([self.pi_state_init, self.vf_state_init]) ops = [self.summarise, self.global_step, self.pi_state_final, self.vf_state_final, self.train_op] while True: try: summary, step, state_a, state_c, _ = self.sess.run(ops, feed_dict={self.pi_state_init: state_a, self.vf_state_init: state_c, self.keep_prob: 0.8}) except tf.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary def choose_state(self, state, state_lstm, training=True): if training: op = [self.sample_action, self.vf_eval, self.pi_eval_state_final, self.vf_eval_state_final] else: op = [self.eval_action, self.vf_eval, self.pi_eval_state_final, self.vf_eval_state_final] a, v, state_a, state_c = self.sess.run(op, feed_dict={self.state: [state], self.pi_eval_state_init: state_lstm[0], self.vf_eval_state_init: state_lstm[1], self.keep_prob: 1.0}) return a[0], np.squeeze(v), (state_a, state_c) class A2C(Base): def __init__(self, env, summary_dir='./', gpu=False): self.LR = 1e-4 self.MINIBATCH = 32 self.EPOCHS = 8 self.EPSILON = 0.2 self.EPS_LEN = 100000 # GPU setup os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, device_count={'GPU': gpu}) config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.5 # Placeholders self.sess = tf.Session(config=config) self.s_dim, self.a_dim = env.observation_space.shape, env.action_space.shape[0] self.a_bound = (env.action_space.high - env.action_space.low) / 2 self.actions = tf.placeholder(tf.float32, [None, self.a_dim], 'action') self.state = tf.placeholder(tf.float32, [None, self.s_dim[0]], 'state') self.advantage = tf.placeholder(tf.float32, [None, 1], 'advantage') self.rewards = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # Dateset with experiennce replay self.dataset = tf.data.Dataset.from_tensor_slices({'state': self.state, 'actions': self.actions, 'rewards': self.rewards, 'advantage': self.advantage}) self.dataset = self.dataset.shuffle(buffer_size=10000) self.dataset = self.dataset.batch(self.MINIBATCH) self.dataset = self.dataset.cache() self.dataset = self.dataset.repeat(self.EPOCHS) self.data_iter = self.dataset.make_initializable_iterator() batch = self.data_iter.get_next() # Call A2C net pi, self.pi_params = self.build_anet(batch['state'], 'pi') pi_eval, _ = self.build_anet(self.state, 'pi', reuse=True) self.vf, self.vf_params = self.build_cnet(batch['state'], 'vf') self.vf_eval, _ = self.build_cnet(self.state, 'vf', reuse=True) self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = tf.train.get_or_create_global_step() self.saver = tf.train.Saver() # Loss functions and training loss_pg = - tf.reduce_mean(pi.log_prob(batch['actions']) * batch['advantage']) - 0.01 * tf.reduce_mean(pi.entropy()) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) self.a_grads = tf.gradients(loss_pg, self.pi_params) self.c_grads = tf.gradients(loss_vf, self.vf_params) self.a_grads, _ = tf.clip_by_global_norm(self.a_grads, 20.0) self.c_grads, _ = tf.clip_by_global_norm(self.c_grads, 20.0) opt = tf.train.AdamOptimizer(self.LR) self.update_a_op = opt.apply_gradients(zip(self.a_grads, self.pi_params)) self.update_c_op = opt.apply_gradients(zip(self.c_grads, self.vf_params)) self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir) tf.summary.scalar('Loss/Policy', loss_pg) tf.summary.scalar('Loss/Value', loss_vf) tf.summary.scalar('Loss/Entropy', - 0.01 * tf.reduce_mean(pi.entropy())) tf.summary.scalar('Var/Policy Mode', tf.reduce_mean(pi.mode())) tf.summary.scalar('Var/Policy Sigma', tf.reduce_mean(pi.stddev())) tf.summary.scalar('Var/Value', tf.reduce_mean(self.vf)) self.summarise = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES)) # AC net def build_anet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_a1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_a2 = tf.layers.dense(layer_a1, 256, tf.nn.relu, kernel_regularizer=reg) mu = tf.layers.dense(layer_a2, self.a_dim, tf.nn.tanh, kernel_regularizer=reg) # sigma = tf.layers.dense(layer_a2, self.a_dim, tf.nn.softplus, kernel_regularizer=reg) sigma = tf.get_variable(name='pi_sigma', shape=self.a_dim, initializer=tf.constant_initializer(0.5)) sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return vf, params # Update the network def train(self, s, a, r, adv): start = time() self.sess.run([self.pi_params, self.vf_params, self.data_iter.initializer], feed_dict={self.state: s, self.actions: a, self.rewards: r, self.advantage: adv}) while True: try: summary, step, _, _ = self.sess.run([self.summarise, self.global_step, self.update_a_op, self.update_c_op]) except tf.errors.OutOfRangeError: break print('\rTrained in %.3fs. Global step %i' % (time() - start, step+1)) return summary
[ "tensorflow.nn.dynamic_rnn", "numpy.squeeze", "tensorflow.minimum", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.layers.dense", "tensorflow.train.get_or_create_global_step", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.square", "tensorflow.train.Saver", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.polynomial_decay", "tensorflow.clip_by_value", "tensorflow.nn.rnn_cell.DropoutWrapper", "tensorflow.summary.FileWriter", "tensorflow.distributions.Normal", "tensorflow.reduce_mean", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.reshape", "tensorflow.expand_dims", "numpy.random.shuffle", "tensorflow.constant_initializer", "tensorflow.clip_by_global_norm", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.variable_scope" ]
models.py
[(33, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)', 'device_count': "{'GPU': gpu}"}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.a_dim]', '"""action"""'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.s_dim[0]]', '"""state"""'], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""advantage"""'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""discounted_r"""'], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["{'state': self.state, 'actions': self.actions, 'rewards': self.rewards,\n 'advantage': self.advantage}"], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['self.EPSILON', 'self.global_step', 'self.EPS_LEN', '(0.1)'], {'power': '(0)'}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(0)', '(10)'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.LR'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Policy"""', 'loss_pg'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Value"""', 'loss_vf'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Entropy"""', 'loss_entropy'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Total"""', 'loss'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Var/Epsilon"""', 'epsilon_decay'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (121, 'time.time', 'time', ([], {}), False, 'from time import time\n'), (135, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)', 'device_count': "{'GPU': gpu}"}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.a_dim]', '"""action"""'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.s_dim[0]]', '"""state"""'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""advantage"""'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""rewards"""'], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""dropout_keep_prob"""'}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["{'state': self.state, 'actions': self.actions, 'rewards': self.rewards,\n 'advantage': self.advantage}"], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['self.EPSILON', 'self.global_step', 'self.EPS_LEN', '(0.1)'], {'power': '(1)'}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(0)', '(10)'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.LR'], {}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Policy"""', 'loss_pg'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Value"""', 'loss_vf'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Entropy"""', 'loss_entropy'], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Total"""', 'loss'], {}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Var/Epsilon"""', 'epsilon_decay'], {}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (256, 'time.time', 'time', ([], {}), False, 'from time import time\n'), (296, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)', 'device_count': "{'GPU': gpu}"}), True, 'import tensorflow as tf\n'), (300, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.a_dim]', '"""action"""'], {}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.s_dim[0]]', '"""state"""'], {}), True, 'import tensorflow as tf\n'), (305, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""advantage"""'], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""discounted_r"""'], {}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["{'state': self.state, 'actions': self.actions, 'rewards': self.rewards,\n 'advantage': self.advantage}"], {}), True, 'import tensorflow as tf\n'), (325, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (326, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (330, 'tensorflow.gradients', 'tf.gradients', (['loss_pg', 'self.pi_params'], {}), True, 'import tensorflow as tf\n'), (331, 'tensorflow.gradients', 'tf.gradients', (['loss_vf', 'self.vf_params'], {}), True, 'import tensorflow as tf\n'), (332, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['self.a_grads', '(20.0)'], {}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['self.c_grads', '(20.0)'], {}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.LR'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Policy"""', 'loss_pg'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss/Value"""', 'loss_vf'], {}), True, 'import tensorflow as tf\n'), (352, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (375, 'time.time', 'time', ([], {}), False, 'from time import time\n'), (12, 'numpy.squeeze', 'np.squeeze', (['v'], {}), True, 'import numpy as np\n'), (15, 'os.path.join', 'os.path.join', (['model_path', '"""model.ckpt"""'], {}), False, 'import os\n'), (71, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(1 - epsilon_decay)', '(1 + epsilon_decay)'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_dir'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.vf'], {}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', '(512)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a1', '(256)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a2', 'self.a_dim', 'tf.nn.tanh'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['sigma', '(0.0)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.distributions.Normal', 'tf.distributions.Normal', ([], {'loc': '(mu * self.a_bound)', 'scale': 'sigma'}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', '(512)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_c1', '(256)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_c2', '(1)'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', '(512)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a1', '(256)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a2', 'self.a_dim', 'tf.nn.tanh'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a2', 'self.a_dim', 'tf.nn.softplus'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['sigma', '(0.0)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.distributions.Normal', 'tf.distributions.Normal', ([], {'loc': '(mu * self.a_bound)', 'scale': 'sigma'}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(1 - epsilon_decay)', '(1 + epsilon_decay)'], {}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_dir'], {}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.vf'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', '(512)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a1', '(256)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', ([], {'num_units': '(256)'}), True, 'import tensorflow as tf\n'), (226, 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['lstm_a'], {'output_keep_prob': 'self.keep_prob'}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.expand_dims', 'tf.expand_dims', (['layer_a2'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'lstm_a', 'inputs': 'lstm_ain', 'initial_state': 'state_init_a'}), True, 'import tensorflow as tf\n'), (230, 'tensorflow.reshape', 'tf.reshape', (['out_a', '[-1, 256]'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.layers.dense', 'tf.layers.dense', (['cell_out_a', 'self.a_dim', 'tf.nn.tanh'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.layers.dense', 'tf.layers.dense', (['cell_out_a', 'self.a_dim', 'tf.nn.softplus'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['sigma', '(0.0)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.distributions.Normal', 'tf.distributions.Normal', ([], {'loc': '(mu * self.a_bound)', 'scale': 'sigma'}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', '(512)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_c1', '(256)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', ([], {'num_units': '(256)'}), True, 'import tensorflow as tf\n'), (245, 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['lstm_c'], {'output_keep_prob': 'self.keep_prob'}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.expand_dims', 'tf.expand_dims', (['layer_c2'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'lstm_c', 'inputs': 'lstm_cin', 'initial_state': 'state_init_c'}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.reshape', 'tf.reshape', (['out_c', '[-1, 256]'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.layers.dense', 'tf.layers.dense', (['cell_out_c', '(1)'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (259, 'numpy.random.shuffle', 'np.random.shuffle', (['rollout'], {}), True, 'import numpy as np\n'), (282, 'numpy.squeeze', 'np.squeeze', (['v'], {}), True, 'import numpy as np\n'), (337, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_dir'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.vf'], {}), True, 'import tensorflow as tf\n'), (348, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), True, 'import tensorflow as tf\n'), (353, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (354, 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', '(512)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (355, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a1', '(256)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (356, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_a2', 'self.a_dim', 'tf.nn.tanh'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (359, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['sigma', '(0.0)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.distributions.Normal', 'tf.distributions.Normal', ([], {'loc': '(mu * self.a_bound)', 'scale': 'sigma'}), True, 'import tensorflow as tf\n'), (366, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (367, 'tensorflow.layers.dense', 'tf.layers.dense', (['state_in', '(512)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_c1', '(256)', 'tf.nn.relu'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.layers.dense', 'tf.layers.dense', (['layer_c2', '(1)'], {'kernel_regularizer': 'reg'}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.minimum', 'tf.minimum', (['surr1', 'surr2'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.square', 'tf.square', (["(batch['rewards'] - self.vf)"], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.minimum', 'tf.minimum', (['surr1', 'surr2'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.square', 'tf.square', (["(batch['rewards'] - self.vf)"], {}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.square', 'tf.square', (["(batch['rewards'] - self.vf)"], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.5)'], {}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.5)'], {}), True, 'import tensorflow as tf\n'), (129, 'time.time', 'time', ([], {}), False, 'from time import time\n'), (271, 'time.time', 'time', ([], {}), False, 'from time import time\n'), (383, 'time.time', 'time', ([], {}), False, 'from time import time\n')]
zhrlove/seq2seq_attention_1
6535820c9381467508ba8dfeb8971173b3998510
import tensorflow as tf import math from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM from translate.rnn import get_state_size from translate.beam_search import get_weights from translate import utils, beam_search from translate.conv_lstm import BasicConvLSTMCell def auto_reuse(fun): """ Wrapper that automatically handles the `reuse' parameter. This is rather risky, as it can lead to reusing variables by mistake. """ def fun_(*args, **kwargs): try: return fun(*args, **kwargs) except ValueError as e: if 'reuse' in str(e): with tf.variable_scope(tf.get_variable_scope(), reuse=True): return fun(*args, **kwargs) else: raise e return fun_ get_variable = auto_reuse(tf.get_variable) dense = auto_reuse(tf.layers.dense) class CellWrapper(RNNCell): """ Wrapper around LayerNormBasicLSTMCell, BasicLSTMCell and MultiRNNCell, to keep the state_is_tuple=False behavior (soon to be deprecated). """ def __init__(self, cell): super(CellWrapper, self).__init__() self.cell = cell self.num_splits = len(cell.state_size) if isinstance(cell.state_size, tuple) else 1 @property def state_size(self): return sum(self.cell.state_size) @property def output_size(self): return self.cell.output_size def __call__(self, inputs, state, scope=None): state = tf.split(value=state, num_or_size_splits=self.num_splits, axis=1) new_h, new_state = self.cell(inputs, state, scope=scope) return new_h, tf.concat(new_state, 1) def multi_encoder(encoder_inputs, encoders, encoder_input_length, other_inputs=None, training=True, **kwargs): """ Build multiple encoders according to the configuration in `encoders`, reading from `encoder_inputs`. The result is a list of the outputs produced by those encoders (for each time-step), and their final state. :param encoder_inputs: list of tensors of shape (batch_size, input_length), one tensor for each encoder. :param encoders: list of encoder configurations :param encoder_input_length: list of tensors of shape (batch_size,) (one tensor for each encoder) :return: encoder outputs: a list of tensors of shape (batch_size, input_length, encoder_cell_size), hidden states of the encoders. encoder state: concatenation of the final states of all encoders, tensor of shape (batch_size, sum_of_state_sizes) new_encoder_input_length: list of tensors of shape (batch_size,) with the true length of the encoder outputs. May be different than `encoder_input_length` because of maxout strides, and time pooling. """ encoder_states = [] encoder_outputs = [] new_encoder_input_length = [] for i, encoder in enumerate(encoders): # create embeddings in the global scope (allows sharing between encoder and decoder) weight_scale = encoder.embedding_weight_scale or encoder.weight_scale if weight_scale is None: initializer = None # FIXME elif encoder.embedding_initializer == 'uniform' or (encoder.embedding_initializer is None and encoder.initializer == 'uniform'): initializer = tf.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale) else: initializer = tf.random_normal_initializer(stddev=weight_scale) with tf.device('/cpu:0'): # embeddings can take a very large amount of memory, so # storing them in GPU memory can be impractical if encoder.binary: embeddings = None # inputs are token ids, which need to be mapped to vectors (embeddings) else: embedding_shape = [encoder.vocab_size, encoder.embedding_size] embeddings = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape, initializer=initializer) if encoder.pos_embedding_size: pos_embedding_shape = [encoder.max_len + 1, encoder.pos_embedding_size] pos_embeddings = get_variable('pos_embedding_{}'.format(encoder.name), shape=pos_embedding_shape, initializer=initializer) else: pos_embeddings = None if encoder.use_lstm is False: encoder.cell_type = 'GRU' cell_output_size, cell_state_size = get_state_size(encoder.cell_type, encoder.cell_size, encoder.lstm_proj_size) with tf.variable_scope('encoder_{}'.format(encoder.name)): encoder_inputs_ = encoder_inputs[i] initial_inputs = encoder_inputs_ encoder_input_length_ = encoder_input_length[i] def get_cell(input_size=None, reuse=False): if encoder.cell_type.lower() == 'lstm': cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse)) elif encoder.cell_type.lower() == 'plstm': cell = PLSTM(encoder.cell_size, reuse=reuse, fact_size=encoder.lstm_fact_size, proj_size=encoder.lstm_proj_size) elif encoder.cell_type.lower() == 'dropoutgru': cell = DropoutGRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm, input_size=input_size, input_keep_prob=encoder.rnn_input_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob) else: cell = GRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm) if encoder.use_dropout and encoder.cell_type.lower() != 'dropoutgru': cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob, output_keep_prob=encoder.rnn_output_keep_prob, state_keep_prob=encoder.rnn_state_keep_prob, variational_recurrent=encoder.pervasive_dropout, dtype=tf.float32, input_size=input_size) return cell batch_size = tf.shape(encoder_inputs_)[0] time_steps = tf.shape(encoder_inputs_)[1] if embeddings is not None: flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)]) flat_inputs = tf.nn.embedding_lookup(embeddings, flat_inputs) encoder_inputs_ = tf.reshape(flat_inputs, tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value])) if pos_embeddings is not None: pos_inputs_ = tf.range(time_steps, dtype=tf.int32) pos_inputs_ = tf.nn.embedding_lookup(pos_embeddings, pos_inputs_) pos_inputs_ = tf.tile(tf.expand_dims(pos_inputs_, axis=0), [batch_size, 1, 1]) encoder_inputs_ = tf.concat([encoder_inputs_, pos_inputs_], axis=2) if other_inputs is not None: encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2) if encoder.use_dropout: noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1] encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob, noise_shape=noise_shape) size = tf.shape(encoder_inputs_)[2] noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size] encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob, noise_shape=noise_shape) if encoder.input_layers: for j, layer_size in enumerate(encoder.input_layers): if encoder.input_layer_activation is not None and encoder.input_layer_activation.lower() == 'relu': activation = tf.nn.relu else: activation = tf.tanh if encoder.batch_norm: encoder_inputs_ = tf.layers.batch_normalization(encoder_inputs_, training=training, name='input_batch_norm_{}'.format(j + 1)) encoder_inputs_ = dense(encoder_inputs_, layer_size, activation=activation, use_bias=True, name='layer_{}'.format(j)) if encoder.use_dropout: encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob) if encoder.conv_filters: encoder_inputs_ = tf.expand_dims(encoder_inputs_, axis=3) for k, out_channels in enumerate(encoder.conv_filters, 1): in_channels = encoder_inputs_.get_shape()[-1].value filter_height, filter_width = encoder.conv_size strides = encoder.conv_strides or [1, 1] strides = [1] + strides + [1] filter_ = get_variable('filter_{}'.format(k), [filter_height, filter_width, in_channels, out_channels]) encoder_inputs_ = tf.nn.conv2d(encoder_inputs_, filter_, strides, padding='SAME') if encoder.batch_norm: encoder_inputs_ = tf.layers.batch_normalization(encoder_inputs_, training=training, name='conv_batch_norm_{}'.format(k)) if encoder.conv_activation is not None and encoder.conv_activation.lower() == 'relu': encoder_inputs_ = tf.nn.relu(encoder_inputs_) encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / strides[1])) feature_size = encoder_inputs_.shape[2].value channels = encoder_inputs_.shape[3].value time_steps = tf.shape(encoder_inputs_)[1] encoder_inputs_ = tf.reshape(encoder_inputs_, [batch_size, time_steps, feature_size * channels]) conv_outputs_ = encoder_inputs_ if encoder.conv_lstm_size: cell = BasicConvLSTMCell([feature_size, channels], encoder.conv_lstm_size, 1) encoder_inputs_, _ = tf.nn.bidirectional_dynamic_rnn( cell, cell, encoder_inputs_, dtype=tf.float32 ) encoder_inputs_ = tf.concat(encoder_inputs_, axis=2) if encoder.convolutions: if encoder.binary: raise NotImplementedError pad = tf.nn.embedding_lookup(embeddings, utils.BOS_ID) pad = tf.expand_dims(tf.expand_dims(pad, axis=0), axis=1) pad = tf.tile(pad, [batch_size, 1, 1]) # Fully Character-Level NMT without Explicit Segmentation, Lee et al. 2016 inputs = [] for w, filter_size in enumerate(encoder.convolutions, 1): filter_ = get_variable('filter_{}'.format(w), [w, encoder.embedding_size, filter_size]) if w > 1: right = (w - 1) // 2 left = (w - 1) - right pad_right = tf.tile(pad, [1, right, 1]) pad_left = tf.tile(pad, [1, left, 1]) inputs_ = tf.concat([pad_left, encoder_inputs_, pad_right], axis=1) else: inputs_ = encoder_inputs_ inputs_ = tf.nn.convolution(inputs_, filter=filter_, padding='VALID') inputs.append(inputs_) encoder_inputs_ = tf.concat(inputs, axis=2) # if encoder.convolution_activation.lower() == 'relu': encoder_inputs_ = tf.nn.relu(encoder_inputs_) if encoder.maxout_stride: if encoder.binary: raise NotImplementedError stride = encoder.maxout_stride k = tf.to_int32(tf.ceil(time_steps / stride) * stride) - time_steps # TODO: simpler pad = tf.zeros([batch_size, k, tf.shape(encoder_inputs_)[2]]) encoder_inputs_ = tf.concat([encoder_inputs_, pad], axis=1) encoder_inputs_ = tf.nn.pool(encoder_inputs_, window_shape=[stride], pooling_type='MAX', padding='VALID', strides=[stride]) encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / stride)) if encoder.highway_layers: x = encoder_inputs_ for j in range(encoder.highway_layers): size = x.shape[2].value with tf.variable_scope('highway_{}'.format(j + 1)): g = tf.layers.dense(x, size, activation=tf.nn.sigmoid, use_bias=True, name='g') y = tf.layers.dense(x, size, activation=tf.nn.relu, use_bias=True, name='y') x = g * y + (1 - g) * x encoder_inputs_ = x # Contrary to Theano's RNN implementation, states after the sequence length are zero # (while Theano repeats last state) inter_layer_keep_prob = None if not encoder.use_dropout else encoder.inter_layer_keep_prob parameters = dict( inputs=encoder_inputs_, sequence_length=encoder_input_length_, dtype=tf.float32, parallel_iterations=encoder.parallel_iterations, inter_layers=encoder.inter_layers, inter_layer_activation=encoder.inter_layer_activation, batch_norm=encoder.batch_norm, inter_layer_keep_prob=inter_layer_keep_prob, pervasive_dropout=encoder.pervasive_dropout, training=training ) input_size = encoder_inputs_.get_shape()[2].value def get_initial_state(name='initial_state'): if encoder.train_initial_states: initial_state = get_variable(name, initializer=tf.zeros(cell_state_size)) return tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1]) else: return None if encoder.bidir: rnn = lambda reuse: stack_bidirectional_dynamic_rnn( cells_fw=[get_cell(input_size if j == 0 else 2 * cell_output_size, reuse=reuse) for j in range(encoder.layers)], cells_bw=[get_cell(input_size if j == 0 else 2 * cell_output_size, reuse=reuse) for j in range(encoder.layers)], initial_states_fw=[get_initial_state('initial_state_fw')] * encoder.layers, initial_states_bw=[get_initial_state('initial_state_bw')] * encoder.layers, time_pooling=encoder.time_pooling, pooling_avg=encoder.pooling_avg, **parameters) initializer = CellInitializer(encoder.cell_size) if encoder.orthogonal_init else None with tf.variable_scope(tf.get_variable_scope(), initializer=initializer): try: encoder_outputs_, _, encoder_states_ = rnn(reuse=False) except ValueError: # Multi-task scenario where we're reusing the same RNN parameters encoder_outputs_, _, encoder_states_ = rnn(reuse=True) else: if encoder.time_pooling or encoder.final_state == 'concat_last': raise NotImplementedError if encoder.layers > 1: cell = MultiRNNCell([get_cell(input_size if j == 0 else cell_output_size) for j in range(encoder.layers)]) initial_state = (get_initial_state(),) * encoder.layers else: cell = get_cell(input_size) initial_state = get_initial_state() encoder_outputs_, encoder_states_ = auto_reuse(tf.nn.dynamic_rnn)(cell=cell, initial_state=initial_state, **parameters) if encoder.time_pooling: for stride in encoder.time_pooling[:encoder.layers - 1]: encoder_input_length_ = (encoder_input_length_ + stride - 1) // stride # rounding up last_backward = encoder_outputs_[:, 0, cell_output_size:] indices = tf.stack([tf.range(batch_size), encoder_input_length_ - 1], axis=1) last_forward = tf.gather_nd(encoder_outputs_[:, :, :cell_output_size], indices) last_forward.set_shape([None, cell_output_size]) if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states) encoder_state_ = tf.concat(encoder_states_, axis=1) elif encoder.final_state == 'average': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.final_state == 'average_inputs': mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32) mask = tf.expand_dims(mask, axis=2) encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1) elif encoder.bidir and encoder.final_state == 'last_both': encoder_state_ = tf.concat([last_forward, last_backward], axis=1) elif encoder.final_state == 'none': encoder_state_ = tf.zeros(shape=[batch_size, 0]) elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state encoder_state_ = last_backward else: # last forward hidden state encoder_state_ = last_forward if encoder.bidir and encoder.bidir_projection: encoder_outputs_ = dense(encoder_outputs_, cell_output_size, use_bias=False, name='bidir_projection') if encoder.attend_inputs: encoder_outputs.append(encoder_inputs_) elif encoder.attend_both: encoder_outputs.append(tf.concat([encoder_inputs_, encoder_outputs_], axis=2)) else: encoder_outputs.append(encoder_outputs_) encoder_states.append(encoder_state_) new_encoder_input_length.append(encoder_input_length_) encoder_state = tf.concat(encoder_states, 1) return encoder_outputs, encoder_state, new_encoder_input_length def compute_energy(hidden, state, encoder, time=None, input_length=None, prev_weights=None, **kwargs): batch_size = tf.shape(hidden)[0] time_steps = tf.shape(hidden)[1] if encoder.attn_keep_prob is not None: state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape) if encoder.mult_attn: state = dense(state, encoder.attn_size, use_bias=False, name='state') hidden = dense(hidden, encoder.attn_size, use_bias=False, name='hidden') return tf.einsum('ijk,ik->ij', hidden, state) y = dense(state, encoder.attn_size, use_bias=not encoder.layer_norm, name='W_a') y = tf.expand_dims(y, axis=1) if encoder.layer_norm: y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state') hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden') y += dense(hidden, encoder.attn_size, use_bias=False, name='U_a') if encoder.position_bias and input_length is not None and time is not None: src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1]) trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = tf.log(1 + pos_feats) y += dense(pos_feats, encoder.attn_size, use_bias=False, name='P_a') if encoder.attn_filters: filter_shape = [encoder.attn_filter_length * 2 + 1, 1, 1, encoder.attn_filters] filter_ = get_variable('filter', filter_shape) prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1])) conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME') conv = tf.squeeze(conv, axis=2) y += dense(conv, encoder.attn_size, use_bias=False, name='C_a') v = get_variable('v_a', [encoder.attn_size]) return tf.reduce_sum(v * tf.tanh(y), axis=2) def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs): with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)): if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32) e *= mask if encoder.attn_norm_fun == 'none': weights = e elif encoder.attn_norm_fun == 'sigmoid': weights = tf.nn.sigmoid(e) elif encoder.attn_norm_fun == 'max': weights = tf.one_hot(tf.argmax(e, -1), depth=tf.shape(e)[1]) else: e -= tf.reduce_max(e, axis=1, keep_dims=True) T = encoder.attn_temperature or 1.0 exp = tf.exp(e / T) * mask weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True) weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1) return weighted_average, weights def no_attention(state, hidden_states, *args, **kwargs): batch_size = tf.shape(state)[0] weighted_average = tf.zeros(shape=tf.stack([batch_size, 0])) weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]]) return weighted_average, weights def average_attention(hidden_states, encoder_input_length, *args, **kwargs): # attention with fixed weights (average of all hidden states) lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1)) mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1]) weights = tf.to_float(mask) / lengths weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs): weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1]) weights = tf.to_float(weights) weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1) return weighted_average, weights def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None, context=None, **kwargs): batch_size = tf.shape(state)[0] attn_length = tf.shape(hidden_states)[1] if context is not None and encoder.use_context: state = tf.concat([state, context], axis=1) state_size = state.get_shape()[1].value with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)): encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1)) if pos is not None: pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) if pos is not None and encoder.attn_window_size > 0: # `pred_edits` scenario, where we know the aligned pos # when the windows size is non-zero, we concatenate consecutive encoder states # and map it to the right attention vector size. weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = [] for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1): pos_ = pos + offset pos_ = tf.minimum(pos_, encoder_input_length - 1) pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S> weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length)) weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1) weighted_average.append(weighted_average_) weighted_average = tf.concat(weighted_average, axis=1) weighted_average = dense(weighted_average, encoder.attn_size) elif pos is not None: weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length)) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) else: # Local attention of Luong et al. (http://arxiv.org/abs/1508.04025) wp = get_variable('Wp', [state_size, state_size]) vp = get_variable('vp', [state_size, 1]) pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp)) pos = tf.floor(encoder_input_length * pos) pos = tf.reshape(pos, [-1, 1]) pos = tf.minimum(pos, encoder_input_length - 1) idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size])) idx = tf.reshape(idx, [-1, attn_length]) low = pos - encoder.attn_window_size high = pos + encoder.attn_window_size mlow = tf.to_float(idx < low) mhigh = tf.to_float(idx > high) m = mlow + mhigh m += tf.to_float(idx >= encoder_input_length) mask = tf.to_float(tf.equal(m, 0.0)) e = compute_energy(hidden_states, state, encoder, input_length=encoder_input_length, **kwargs) weights = softmax(e, mask=mask) if encoder.attn_window_size > 0: sigma = encoder.attn_window_size / 2 numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32)) div = tf.truediv(numerator, 2 * sigma ** 2) weights *= tf.exp(div) # result of the truncated normal distribution # normalize to keep a probability distribution # weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12) weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1) return weighted_average, weights def attention(encoder, scope=None, **kwargs): attention_functions = { 'global': global_attention, 'local': local_attention, 'none': no_attention, 'average': average_attention, 'last_state': last_state_attention } attention_function = attention_functions.get(encoder.attention_type, global_attention) context_vectors = [] weights = [] attn_heads = encoder.attn_heads or 1 scope = scope or 'attention_{}'.format(encoder.name) for i in range(attn_heads): scope_ = scope if i == 0 else scope + '_{}'.format(i + 1) context_vector, weights_ = attention_function(encoder=encoder, scope=scope_, **kwargs) context_vectors.append(context_vector) weights.append(weights_) context_vector = tf.concat(context_vectors, axis=-1) weights = sum(weights) / len(weights) if encoder.attn_mapping: with tf.variable_scope(scope): context_vector = dense(context_vector, encoder.attn_mapping, use_bias=False, name='output') return context_vector, weights def multi_attention(state, hidden_states, encoders, encoder_input_length, pos=None, aggregation_method='sum', prev_weights=None, **kwargs): attns = [] weights = [] context_vector = None for i, (hidden, encoder, input_length) in enumerate(zip(hidden_states, encoders, encoder_input_length)): pos_ = pos[i] if pos is not None else None prev_weights_ = prev_weights[i] if prev_weights is not None else None hidden = beam_search.resize_like(hidden, state) input_length = beam_search.resize_like(input_length, state) context_vector, weights_ = attention(state=state, hidden_states=hidden, encoder=encoder, encoder_input_length=input_length, pos=pos_, context=context_vector, prev_weights=prev_weights_, **kwargs) attns.append(context_vector) weights.append(weights_) if aggregation_method == 'sum': context_vector = tf.reduce_sum(tf.stack(attns, axis=2), axis=2) else: context_vector = tf.concat(attns, axis=1) return context_vector, weights def attention_decoder(decoder_inputs, initial_state, attention_states, encoders, decoder, encoder_input_length, feed_previous=0.0, align_encoder_id=0, feed_argmax=True, training=True, **kwargs): """ :param decoder_inputs: int32 tensor of shape (batch_size, output_length) :param initial_state: initial state of the decoder (usually the final state of the encoder), as a float32 tensor of shape (batch_size, initial_state_size). This state is mapped to the correct state size for the decoder. :param attention_states: list of tensors of shape (batch_size, input_length, encoder_cell_size), the hidden states of the encoder(s) (one tensor for each encoder). :param encoders: configuration of the encoders :param decoder: configuration of the decoder :param encoder_input_length: list of int32 tensors of shape (batch_size,), tells for each encoder, the true length of each sequence in the batch (sequences in the same batch are padded to all have the same length). :param feed_previous: scalar tensor corresponding to the probability to use previous decoder output instead of the ground truth as input for the decoder (1 when decoding, between 0 and 1 when training) :param feed_argmax: boolean tensor, when True the greedy decoder outputs the word with the highest probability (argmax). When False, it samples a word from the probability distribution (softmax). :param align_encoder_id: outputs attention weights for this encoder. Also used when predicting edit operations (pred_edits), to specifify which encoder reads the sequence to post-edit (MT). :return: outputs of the decoder as a tensor of shape (batch_size, output_length, decoder_cell_size) attention weights as a tensor of shape (output_length, encoders, batch_size, input_length) """ cell_output_size, cell_state_size = get_state_size(decoder.cell_type, decoder.cell_size, decoder.lstm_proj_size, decoder.layers) assert not decoder.pred_maxout_layer or cell_output_size % 2 == 0, 'cell size must be a multiple of 2' if decoder.use_lstm is False: decoder.cell_type = 'GRU' embedding_shape = [decoder.vocab_size, decoder.embedding_size] weight_scale = decoder.embedding_weight_scale or decoder.weight_scale if weight_scale is None: initializer = None # FIXME elif decoder.embedding_initializer == 'uniform' or (decoder.embedding_initializer is None and decoder.initializer == 'uniform'): initializer = tf.random_uniform_initializer(minval=-weight_scale, maxval=weight_scale) else: initializer = tf.random_normal_initializer(stddev=weight_scale) with tf.device('/cpu:0'): embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer) input_shape = tf.shape(decoder_inputs) batch_size = input_shape[0] time_steps = input_shape[1] scope_name = 'decoder_{}'.format(decoder.name) scope_name += '/' + '_'.join(encoder.name for encoder in encoders) def embed(input_): embedded_input = tf.nn.embedding_lookup(embedding, input_) if decoder.use_dropout and decoder.word_keep_prob is not None: noise_shape = [1, 1] if decoder.pervasive_dropout else [tf.shape(input_)[0], 1] embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape) if decoder.use_dropout and decoder.embedding_keep_prob is not None: size = tf.shape(embedded_input)[1] noise_shape = [1, size] if decoder.pervasive_dropout else [tf.shape(input_)[0], size] embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob, noise_shape=noise_shape) return embedded_input def get_cell(input_size=None, reuse=False): cells = [] for j in range(decoder.layers): input_size_ = input_size if j == 0 else cell_output_size if decoder.cell_type.lower() == 'lstm': cell = CellWrapper(BasicLSTMCell(decoder.cell_size, reuse=reuse)) elif decoder.cell_type.lower() == 'plstm': cell = PLSTM(decoder.cell_size, reuse=reuse, fact_size=decoder.lstm_fact_size, proj_size=decoder.lstm_proj_size) elif decoder.cell_type.lower() == 'dropoutgru': cell = DropoutGRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm, input_size=input_size_, input_keep_prob=decoder.rnn_input_keep_prob, state_keep_prob=decoder.rnn_state_keep_prob) else: cell = GRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm) if decoder.use_dropout and decoder.cell_type.lower() != 'dropoutgru': cell = DropoutWrapper(cell, input_keep_prob=decoder.rnn_input_keep_prob, output_keep_prob=decoder.rnn_output_keep_prob, state_keep_prob=decoder.rnn_state_keep_prob, variational_recurrent=decoder.pervasive_dropout, dtype=tf.float32, input_size=input_size_) cells.append(cell) if len(cells) == 1: return cells[0] else: return CellWrapper(MultiRNNCell(cells)) def look(time, state, input_, prev_weights=None, pos=None, context=None): prev_weights_ = [prev_weights if i == align_encoder_id else None for i in range(len(encoders))] pos_ = None if decoder.pred_edits: pos_ = [pos if i == align_encoder_id else None for i in range(len(encoders))] if decoder.attn_prev_word: state = tf.concat([state, input_], axis=1) if decoder.attn_prev_attn and context is not None: state = tf.concat([state, context], axis=1) if decoder.hidden_state_scaling: attention_states_ = [states * decoder.hidden_state_scaling for states in attention_states] else: attention_states_ = attention_states parameters = dict(hidden_states=attention_states_, encoder_input_length=encoder_input_length, encoders=encoders, aggregation_method=decoder.aggregation_method) context, new_weights = multi_attention(state, time=time, pos=pos_, prev_weights=prev_weights_, **parameters) if decoder.context_mapping: with tf.variable_scope(scope_name): activation = tf.nn.tanh if decoder.context_mapping_activation == 'tanh' else None use_bias = not decoder.context_mapping_no_bias context = dense(context, decoder.context_mapping, use_bias=use_bias, activation=activation, name='context_mapping') return context, new_weights[align_encoder_id] def update(state, input_, context=None, symbol=None): if context is not None and decoder.rnn_feed_attn: input_ = tf.concat([input_, context], axis=1) input_size = input_.get_shape()[1].value initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None with tf.variable_scope(tf.get_variable_scope(), initializer=initializer): try: output, new_state = get_cell(input_size)(input_, state) except ValueError: # auto_reuse doesn't work with LSTM cells output, new_state = get_cell(input_size, reuse=True)(input_, state) if decoder.skip_update and decoder.pred_edits and symbol is not None: is_del = tf.equal(symbol, utils.DEL_ID) new_state = tf.where(is_del, state, new_state) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: output = new_state return output, new_state def update_pos(pos, symbol, max_pos=None): if not decoder.pred_edits: return pos is_keep = tf.equal(symbol, utils.KEEP_ID) is_del = tf.equal(symbol, utils.DEL_ID) is_not_ins = tf.logical_or(is_keep, is_del) pos = beam_search.resize_like(pos, symbol) max_pos = beam_search.resize_like(max_pos, symbol) pos += tf.to_float(is_not_ins) if max_pos is not None: pos = tf.minimum(pos, tf.to_float(max_pos)) return pos def generate(state, input_, context): if decoder.pred_use_lstm_state is False: # for back-compatibility state = state[:,-cell_output_size:] projection_input = [state, context] if decoder.use_previous_word: projection_input.insert(1, input_) # for back-compatibility output_ = tf.concat(projection_input, axis=1) if decoder.pred_deep_layer: deep_layer_size = decoder.pred_deep_layer_size or decoder.embedding_size if decoder.layer_norm: output_ = dense(output_, deep_layer_size, use_bias=False, name='deep_output') output_ = tf.contrib.layers.layer_norm(output_, activation_fn=tf.nn.tanh, scope='output_layer_norm') else: output_ = dense(output_, deep_layer_size, activation=tf.tanh, use_bias=True, name='deep_output') if decoder.use_dropout: size = tf.shape(output_)[1] noise_shape = [1, size] if decoder.pervasive_dropout else None output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape) else: if decoder.pred_maxout_layer: maxout_size = decoder.maxout_size or cell_output_size output_ = dense(output_, maxout_size, use_bias=True, name='maxout') if decoder.old_maxout: # for back-compatibility with old models output_ = tf.nn.pool(tf.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX', padding='SAME', strides=[2]) output_ = tf.squeeze(output_, axis=2) else: output_ = tf.maximum(*tf.split(output_, num_or_size_splits=2, axis=1)) if decoder.pred_embed_proj: # intermediate projection to embedding size (before projecting to vocabulary size) # this is useful to reduce the number of parameters, and # to use the output embeddings for output projection (tie_embeddings parameter) output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0') if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer): bias = get_variable('softmax1/bias', shape=[decoder.vocab_size]) output_ = tf.matmul(output_, tf.transpose(embedding)) + bias else: output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1') return output_ if decoder.use_dropout: # FIXME: why no pervasive dropout here? initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob) with tf.variable_scope(scope_name): activation_fn = None if decoder.initial_state == 'linear' else tf.nn.tanh if decoder.initial_state == 'trained': initial_state = get_variable(shape=[cell_state_size], name='initial_state') initial_state = tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1]) elif decoder.initial_state == 'zero': initial_state = tf.zeros(shape=[batch_size, cell_state_size]) elif decoder.layer_norm: initial_state = dense(initial_state, cell_state_size, use_bias=False, name='initial_state_projection') initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=activation_fn, scope='initial_state_layer_norm') else: initial_state = dense(initial_state, cell_state_size, use_bias=True, name='initial_state_projection', activation=activation_fn) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: initial_output = initial_state else: # Last layer's state is the right-most part. Output is the left-most part of an LSTM's state. initial_output = initial_state[:, -cell_output_size:] time = tf.constant(0, dtype=tf.int32, name='time') outputs = tf.TensorArray(dtype=tf.float32, size=time_steps) samples = tf.TensorArray(dtype=tf.int64, size=time_steps) inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs))) states = tf.TensorArray(dtype=tf.float32, size=time_steps) weights = tf.TensorArray(dtype=tf.float32, size=time_steps) attns = tf.TensorArray(dtype=tf.float32, size=time_steps) initial_symbol = inputs.read(0) # first symbol is BOS initial_input = embed(initial_symbol) initial_pos = tf.zeros([batch_size], tf.float32) initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2]) zero_context = tf.zeros(shape=tf.shape(attention_states[align_encoder_id][:,0])) # FIXME with tf.variable_scope('decoder_{}'.format(decoder.name)): initial_context, _ = look(0, initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights, context=zero_context) initial_data = tf.concat([initial_state, initial_context, tf.expand_dims(initial_pos, axis=1), initial_weights], axis=1) context_size = initial_context.shape[1].value def get_logits(state, ids, time): # for beam-search decoding with tf.variable_scope('decoder_{}'.format(decoder.name)): state, context, pos, prev_weights = tf.split(state, [cell_state_size, context_size, 1, -1], axis=1) input_ = embed(ids) pos = tf.squeeze(pos, axis=1) pos = tf.cond(tf.equal(time, 0), lambda: pos, lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id])) if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state: output = state else: # Output is always the right-most part of the state (even with multi-layer RNNs) # However, this only works at test time, because different dropout operations can be used # on state and output. output = state[:, -cell_output_size:] if decoder.conditional_rnn: with tf.variable_scope('conditional_1'): output, state = update(state, input_) elif decoder.update_first: output, state = update(state, input_, None, ids) elif decoder.generate_first: output, state = tf.cond(tf.equal(time, 0), lambda: (output, state), lambda: update(state, input_, context, ids)) context, new_weights = look(time, output, input_, pos=pos, prev_weights=prev_weights, context=context) if decoder.conditional_rnn: with tf.variable_scope('conditional_2'): output, state = update(state, context) elif not decoder.generate_first: output, state = update(state, input_, context, ids) logits = generate(output, input_, context) pos = tf.expand_dims(pos, axis=1) state = tf.concat([state, context, pos, new_weights], axis=1) return state, logits def _time_step(time, input_, input_symbol, pos, state, output, outputs, states, weights, attns, prev_weights, samples, context): if decoder.conditional_rnn: with tf.variable_scope('conditional_1'): output, state = update(state, input_) elif decoder.update_first: output, state = update(state, input_, None, input_symbol) context, new_weights = look(time, output, input_, pos=pos, prev_weights=prev_weights, context=context) if decoder.conditional_rnn: with tf.variable_scope('conditional_2'): output, state = update(state, context) elif not decoder.generate_first: output, state = update(state, input_, context, input_symbol) output_ = generate(output, input_, context) argmax = lambda: tf.argmax(output_, 1) target = lambda: inputs.read(time + 1) softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1), axis=1) use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous) predicted_symbol = tf.case([ (use_target, target), (tf.logical_not(feed_argmax), softmax)], default=argmax) # default case is useful for beam-search predicted_symbol.set_shape([None]) predicted_symbol = tf.stop_gradient(predicted_symbol) input_ = embed(predicted_symbol) pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id]) samples = samples.write(time, predicted_symbol) attns = attns.write(time, context) weights = weights.write(time, new_weights) states = states.write(time, state) outputs = outputs.write(time, output_) if not decoder.conditional_rnn and not decoder.update_first and decoder.generate_first: output, state = update(state, input_, context, predicted_symbol) return (time + 1, input_, predicted_symbol, pos, state, output, outputs, states, weights, attns, new_weights, samples, context) with tf.variable_scope('decoder_{}'.format(decoder.name)): _, _, _, new_pos, new_state, _, outputs, states, weights, attns, new_weights, samples, _ = tf.while_loop( cond=lambda time, *_: time < time_steps, body=_time_step, loop_vars=(time, initial_input, initial_symbol, initial_pos, initial_state, initial_output, outputs, weights, states, attns, initial_weights, samples, initial_context), parallel_iterations=decoder.parallel_iterations, swap_memory=decoder.swap_memory) outputs = outputs.stack() weights = weights.stack() # batch_size, encoders, output time, input time states = states.stack() attns = attns.stack() samples = samples.stack() # put batch_size as first dimension outputs = tf.transpose(outputs, perm=(1, 0, 2)) weights = tf.transpose(weights, perm=(1, 0, 2)) states = tf.transpose(states, perm=(1, 0, 2)) attns = tf.transpose(attns, perm=(1, 0, 2)) samples = tf.transpose(samples) return outputs, weights, states, attns, samples, get_logits, initial_data def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0, encoder_input_length=None, feed_argmax=True, rewards=None, use_baseline=True, training=True, global_step=None, monotonicity_weight=None, monotonicity_dist=None, monotonicity_decay=None, **kwargs): decoder = decoders[0] targets = targets[0] # single decoder if encoder_input_length is None: encoder_input_length = [] for encoder_inputs_ in encoder_inputs: mask = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True) encoder_input_length.append(tf.to_int32(tf.reduce_sum(mask, axis=1))) parameters = dict(encoders=encoders, decoder=decoder, encoder_inputs=encoder_inputs, feed_argmax=feed_argmax, training=training) attention_states, encoder_state, encoder_input_length = multi_encoder( encoder_input_length=encoder_input_length, **parameters) outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[:, :-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length, **parameters ) if use_baseline: baseline_rewards = reinforce_baseline(outputs, rewards) # FIXME: use logits or decoder outputs? baseline_weights = get_weights(samples, utils.EOS_ID, include_first_eos=False) baseline_loss_ = baseline_loss(rewards=baseline_rewards, weights=baseline_weights) else: baseline_rewards = rewards baseline_loss_ = tf.constant(0.0) reinforce_weights = get_weights(samples, utils.EOS_ID, include_first_eos=True) reinforce_loss = sequence_loss(logits=outputs, targets=samples, weights=reinforce_weights, rewards=baseline_rewards) trg_mask = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True) xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=trg_mask) if monotonicity_weight: monotonicity_dist = monotonicity_dist or 1.0 batch_size = tf.shape(attention_weights)[0] src_len = tf.shape(attention_weights)[2] trg_len = tf.shape(attention_weights)[1] src_indices = tf.tile(tf.reshape(tf.range(src_len), shape=[1, 1, src_len]), [batch_size, trg_len, 1]) trg_indices = tf.tile(tf.reshape(tf.range(trg_len), shape=[1, trg_len, 1]), [batch_size, 1, src_len]) source_length = encoder_input_length[0] target_length = tf.to_int32(tf.reduce_sum(trg_mask, axis=1)) true_src_len = tf.reshape(source_length, shape=[batch_size, 1, 1]) - 1 true_trg_len = tf.reshape(target_length, shape=[batch_size, 1, 1]) - 1 src_mask = tf.to_float(tf.sequence_mask(source_length, maxlen=src_len)) mask = tf.matmul(tf.expand_dims(trg_mask, axis=2), tf.expand_dims(src_mask, axis=1)) monotonous = tf.sqrt(((true_trg_len * src_indices - true_src_len * trg_indices) ** 2) / (true_trg_len**2 + true_src_len**2)) monotonous = tf.to_float(monotonous < monotonicity_dist) non_monotonous = (1 - monotonous) * mask attn_loss = tf.reduce_sum(attention_weights * tf.stop_gradient(non_monotonous)) / tf.to_float(batch_size) if monotonicity_decay: decay = tf.stop_gradient(0.5 ** (tf.to_float(global_step) / monotonicity_decay)) else: decay = 1.0 xent_loss += monotonicity_weight * decay * attn_loss losses = [xent_loss, reinforce_loss, baseline_loss_] return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def reconstruction_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, encoder_input_length=None, training=True, reconstruction_weight=1.0, reconstruction_attn_weight=0.05, **kwargs): encoders = encoders[:1] if encoder_input_length is None: weights = get_weights(encoder_inputs[0], utils.EOS_ID, include_first_eos=True) encoder_input_length = [tf.to_int32(tf.reduce_sum(weights, axis=1))] attention_states, encoder_state, encoder_input_length = multi_encoder( encoder_input_length=encoder_input_length, encoders=encoders, encoder_inputs=encoder_inputs, training=training) outputs, attention_weights, states, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[0][:, :-1], encoder_input_length=encoder_input_length, decoder=decoders[0], training=training, encoders=encoders ) target_weights = get_weights(targets[0][:, 1:], utils.EOS_ID, include_first_eos=True) target_length = [tf.to_int32(tf.reduce_sum(target_weights, axis=1))] xent_loss = sequence_loss(logits=outputs, targets=targets[0][:, 1:], weights=target_weights) reconstructed_outputs, reconstructed_weights, _, _, _, _, _ = attention_decoder( attention_states=[states], initial_state=states[:,-1,:], feed_previous=feed_previous, decoder_inputs=targets[1][:, :-1], encoder_input_length=target_length, decoder=decoders[1], training=training, encoders=decoders[:1] ) target_weights = get_weights(targets[1][:, 1:], utils.EOS_ID, include_first_eos=True) xent_loss += reconstruction_weight * sequence_loss(logits=reconstructed_outputs, targets=targets[1][:, 1:], weights=target_weights) max_src_len = tf.shape(reconstructed_weights)[1] batch_size = tf.shape(reconstructed_weights)[0] attn_loss = tf.matmul(reconstructed_weights, attention_weights) - tf.eye(max_src_len) src_mask = tf.sequence_mask(encoder_input_length[0], maxlen=max_src_len, dtype=tf.float32) src_mask = tf.einsum('ij,ik->ijk', src_mask, src_mask) attn_loss *= tf.to_float(src_mask) # don't take padding words into account attn_loss = tf.norm(attn_loss) / tf.to_float(batch_size) xent_loss += reconstruction_attn_weight * attn_loss attention_weights = [attention_weights, reconstructed_weights] losses = [xent_loss, None, None] return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def chained_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, chaining_strategy=None, align_encoder_id=0, chaining_non_linearity=False, chaining_loss_ratio=1.0, chaining_stop_gradient=False, training=True, **kwargs): decoder = decoders[0] targets = targets[0] # single decoder assert len(encoders) == 2 encoder_input_length = [] input_weights = [] for encoder_inputs_ in encoder_inputs: weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True) input_weights.append(weights) encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1))) target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True) parameters = dict(encoders=encoders[1:], decoder=encoders[0], training=training) attention_states, encoder_state, encoder_input_length[1:] = multi_encoder( encoder_inputs[1:], encoder_input_length=encoder_input_length[1:], **parameters) decoder_inputs = encoder_inputs[0][:, :-1] batch_size = tf.shape(decoder_inputs)[0] pad = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32) * utils.BOS_ID decoder_inputs = tf.concat([pad, decoder_inputs], axis=1) outputs, _, states, attns, _, _, _ = attention_decoder( attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs, encoder_input_length=encoder_input_length[1:], **parameters ) chaining_loss = sequence_loss(logits=outputs, targets=encoder_inputs[0], weights=input_weights[0]) if 'lstm' in decoder.cell_type.lower(): size = states.get_shape()[2].value decoder_outputs = states[:, :, size // 2:] else: decoder_outputs = states if chaining_strategy == 'share_states': other_inputs = states elif chaining_strategy == 'share_outputs': other_inputs = decoder_outputs else: other_inputs = None if other_inputs is not None and chaining_stop_gradient: other_inputs = tf.stop_gradient(other_inputs) parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1], other_inputs=other_inputs, training=training) attention_states, encoder_state, encoder_input_length[:1] = multi_encoder( encoder_input_length=encoder_input_length[:1], **parameters) if chaining_stop_gradient: attns = tf.stop_gradient(attns) states = tf.stop_gradient(states) decoder_outputs = tf.stop_gradient(decoder_outputs) if chaining_strategy == 'concat_attns': attention_states[0] = tf.concat([attention_states[0], attns], axis=2) elif chaining_strategy == 'concat_states': attention_states[0] = tf.concat([attention_states[0], states], axis=2) elif chaining_strategy == 'sum_attns': attention_states[0] += attns elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'): if chaining_strategy == 'map_attns': x = attns elif chaining_strategy == 'map_outputs': x = decoder_outputs else: x = states shape = [x.get_shape()[-1], attention_states[0].get_shape()[-1]] w = tf.get_variable("map_attns/matrix", shape=shape) b = tf.get_variable("map_attns/bias", shape=shape[-1:]) x = tf.einsum('ijk,kl->ijl', x, w) + b if chaining_non_linearity: x = tf.nn.tanh(x) attention_states[0] += x outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder( attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous, decoder_inputs=targets[:,:-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length[:1], **parameters ) xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=target_weights) if chaining_loss is not None and chaining_loss_ratio: xent_loss += chaining_loss_ratio * chaining_loss losses = [xent_loss, None, None] return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data def softmax(logits, dim=-1, mask=None): e = tf.exp(logits) if mask is not None: e *= mask return e / tf.clip_by_value(tf.reduce_sum(e, axis=dim, keep_dims=True), 10e-37, 10e+37) def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True, rewards=None): batch_size = tf.shape(targets)[0] time_steps = tf.shape(targets)[1] logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value])) targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size])) crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_) crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps])) if rewards is not None: crossent *= tf.stop_gradient(rewards) log_perp = tf.reduce_sum(crossent * weights, axis=1) if average_across_timesteps: total_size = tf.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights log_perp /= total_size cost = tf.reduce_sum(log_perp) if average_across_batch: return cost / tf.to_float(batch_size) else: return cost def reinforce_baseline(decoder_states, reward): """ Center the reward by computing a baseline reward over decoder states. :param decoder_states: internal states of the decoder, tensor of shape (batch_size, time_steps, state_size) :param reward: reward for each time step, tensor of shape (batch_size, time_steps) :return: reward - computed baseline, tensor of shape (batch_size, time_steps) """ # batch_size = tf.shape(decoder_states)[0] # time_steps = tf.shape(decoder_states)[1] # state_size = decoder_states.get_shape()[2] # states = tf.reshape(decoder_states, shape=tf.stack([batch_size * time_steps, state_size])) baseline = dense(tf.stop_gradient(decoder_states), units=1, activation=None, name='reward_baseline', kernel_initializer=tf.constant_initializer(0.01)) baseline = tf.squeeze(baseline, axis=2) # baseline = tf.reshape(baseline, shape=tf.stack([batch_size, time_steps])) return reward - baseline def baseline_loss(rewards, weights, average_across_timesteps=False, average_across_batch=True): """ :param rewards: tensor of shape (batch_size, time_steps) :param weights: tensor of shape (batch_size, time_steps) """ batch_size = tf.shape(rewards)[0] cost = rewards ** 2 cost = tf.reduce_sum(cost * weights, axis=1) if average_across_timesteps: total_size = tf.reduce_sum(weights, axis=1) total_size += 1e-12 # just to avoid division by 0 for all-0 weights cost /= total_size cost = tf.reduce_sum(cost) if average_across_batch: cost /= tf.to_float(batch_size) return cost
[ "tensorflow.convert_to_tensor", "tensorflow.device", "tensorflow.get_variable", "tensorflow.concat", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.equal", "tensorflow.minimum", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.tanh", "tensorflow.where", "tensorflow.nn.conv2d", "tensorflow.while_loop", "tensorflow.random_uniform_initializer", "tensorflow.logical_or", "tensorflow.floor", "tensorflow.squeeze", "tensorflow.layers.dense", "tensorflow.stop_gradient", "tensorflow.contrib.rnn.MultiRNNCell", "tensorflow.to_float", "tensorflow.ceil", "tensorflow.argmax", "tensorflow.random_normal_initializer", "tensorflow.logical_not", "tensorflow.tile", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.norm", "tensorflow.nn.sigmoid", "tensorflow.gather_nd", "tensorflow.nn.convolution", "tensorflow.shape", "tensorflow.TensorArray", "tensorflow.nn.softmax", "tensorflow.exp", "tensorflow.nn.tanh", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.split", "tensorflow.sequence_mask", "tensorflow.nn.embedding_lookup", "tensorflow.nn.relu", "tensorflow.reduce_max", "tensorflow.truediv", "tensorflow.constant", "tensorflow.transpose", "tensorflow.range", "tensorflow.nn.pool", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.maximum", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.reshape", "tensorflow.multiply", "tensorflow.expand_dims", "tensorflow.eye", "tensorflow.einsum", "tensorflow.constant_initializer", "tensorflow.contrib.layers.layer_norm", "tensorflow.log", "tensorflow.variable_scope", "tensorflow.sqrt", "tensorflow.get_variable_scope", "tensorflow.random_uniform" ]
translate/models.py
[(364, 'tensorflow.concat', 'tf.concat', (['encoder_states', '(1)'], {}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.expand_dims', 'tf.expand_dims', (['y'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (458, 'tensorflow.to_float', 'tf.to_float', (['weights'], {}), True, 'import tensorflow as tf\n'), (562, 'tensorflow.concat', 'tf.concat', (['context_vectors'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (625, 'translate.rnn.get_state_size', 'get_state_size', (['decoder.cell_type', 'decoder.cell_size', 'decoder.lstm_proj_size', 'decoder.layers'], {}), False, 'from translate.rnn import get_state_size\n'), (646, 'tensorflow.shape', 'tf.shape', (['decoder_inputs'], {}), True, 'import tensorflow as tf\n'), (834, 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32', 'name': '"""time"""'}), True, 'import tensorflow as tf\n'), (835, 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), True, 'import tensorflow as tf\n'), (836, 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.int64', 'size': 'time_steps'}), True, 'import tensorflow as tf\n'), (839, 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), True, 'import tensorflow as tf\n'), (840, 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), True, 'import tensorflow as tf\n'), (841, 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'time_steps'}), True, 'import tensorflow as tf\n'), (845, 'tensorflow.zeros', 'tf.zeros', (['[batch_size]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (961, 'tensorflow.transpose', 'tf.transpose', (['outputs'], {'perm': '(1, 0, 2)'}), True, 'import tensorflow as tf\n'), (962, 'tensorflow.transpose', 'tf.transpose', (['weights'], {'perm': '(1, 0, 2)'}), True, 'import tensorflow as tf\n'), (963, 'tensorflow.transpose', 'tf.transpose', (['states'], {'perm': '(1, 0, 2)'}), True, 'import tensorflow as tf\n'), (964, 'tensorflow.transpose', 'tf.transpose', (['attns'], {'perm': '(1, 0, 2)'}), True, 'import tensorflow as tf\n'), (965, 'tensorflow.transpose', 'tf.transpose', (['samples'], {}), True, 'import tensorflow as tf\n'), (1003, 'translate.beam_search.get_weights', 'get_weights', (['samples', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1007, 'translate.beam_search.get_weights', 'get_weights', (['targets[:, 1:]', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1065, 'translate.beam_search.get_weights', 'get_weights', (['targets[0][:, 1:]', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1076, 'translate.beam_search.get_weights', 'get_weights', (['targets[1][:, 1:]', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1085, 'tensorflow.sequence_mask', 'tf.sequence_mask', (['encoder_input_length[0]'], {'maxlen': 'max_src_len', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1086, 'tensorflow.einsum', 'tf.einsum', (['"""ij,ik->ijk"""', 'src_mask', 'src_mask'], {}), True, 'import tensorflow as tf\n'), (1087, 'tensorflow.to_float', 'tf.to_float', (['src_mask'], {}), True, 'import tensorflow as tf\n'), (1112, 'translate.beam_search.get_weights', 'get_weights', (['targets[:, 1:]', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1123, 'tensorflow.concat', 'tf.concat', (['[pad, decoder_inputs]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1203, 'tensorflow.exp', 'tf.exp', (['logits'], {}), True, 'import tensorflow as tf\n'), (1217, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits_', 'labels': 'targets_'}), True, 'import tensorflow as tf\n'), (1223, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(crossent * weights)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1230, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['log_perp'], {}), True, 'import tensorflow as tf\n'), (1253, 'tensorflow.squeeze', 'tf.squeeze', (['baseline'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (1267, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(cost * weights)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1274, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cost'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.split', 'tf.split', ([], {'value': 'state', 'num_or_size_splits': 'self.num_splits', 'axis': '(1)'}), True, 'import tensorflow as tf\n'), (106, 'translate.rnn.get_state_size', 'get_state_size', (['encoder.cell_type', 'encoder.cell_size', 'encoder.lstm_proj_size'], {}), False, 'from translate.rnn import get_state_size\n'), (369, 'tensorflow.shape', 'tf.shape', (['hidden'], {}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.shape', 'tf.shape', (['hidden'], {}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['state'], {'keep_prob': 'encoder.attn_keep_prob', 'noise_shape': 'state_noise_shape'}), True, 'import tensorflow as tf\n'), (376, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hidden'], {'keep_prob': 'encoder.attn_keep_prob', 'noise_shape': 'hidden_noise_shape'}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.einsum', 'tf.einsum', (['"""ijk,ik->ij"""', 'hidden', 'state'], {}), True, 'import tensorflow as tf\n'), (387, 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['y'], {'scope': '"""layer_norm_state"""'}), True, 'import tensorflow as tf\n'), (388, 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['hidden'], {'center': '(False)', 'scope': '"""layer_norm_hidden"""'}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.log', 'tf.log', (['(1 + pos_feats)'], {}), True, 'import tensorflow as tf\n'), (405, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['prev_weights', 'filter_', '[1, 1, 1, 1]', '"""SAME"""'], {}), True, 'import tensorflow as tf\n'), (406, 'tensorflow.squeeze', 'tf.squeeze', (['conv'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (441, 'tensorflow.shape', 'tf.shape', (['state'], {}), True, 'import tensorflow as tf\n'), (449, 'tensorflow.expand_dims', 'tf.expand_dims', (['encoder_input_length'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (451, 'tensorflow.to_float', 'tf.to_float', (['mask'], {}), True, 'import tensorflow as tf\n'), (465, 'tensorflow.shape', 'tf.shape', (['state'], {}), True, 'import tensorflow as tf\n'), (466, 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), True, 'import tensorflow as tf\n'), (469, 'tensorflow.concat', 'tf.concat', (['[state, context]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (582, 'translate.beam_search.resize_like', 'beam_search.resize_like', (['hidden', 'state'], {}), False, 'from translate import utils, beam_search\n'), (583, 'translate.beam_search.resize_like', 'beam_search.resize_like', (['input_length', 'state'], {}), False, 'from translate import utils, beam_search\n'), (594, 'tensorflow.concat', 'tf.concat', (['attns'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (643, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (654, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'input_'], {}), True, 'import tensorflow as tf\n'), (752, 'tensorflow.equal', 'tf.equal', (['symbol', 'utils.KEEP_ID'], {}), True, 'import tensorflow as tf\n'), (753, 'tensorflow.equal', 'tf.equal', (['symbol', 'utils.DEL_ID'], {}), True, 'import tensorflow as tf\n'), (754, 'tensorflow.logical_or', 'tf.logical_or', (['is_keep', 'is_del'], {}), True, 'import tensorflow as tf\n'), (756, 'translate.beam_search.resize_like', 'beam_search.resize_like', (['pos', 'symbol'], {}), False, 'from translate import utils, beam_search\n'), (757, 'translate.beam_search.resize_like', 'beam_search.resize_like', (['max_pos', 'symbol'], {}), False, 'from translate import utils, beam_search\n'), (759, 'tensorflow.to_float', 'tf.to_float', (['is_not_ins'], {}), True, 'import tensorflow as tf\n'), (772, 'tensorflow.concat', 'tf.concat', (['projection_input'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (811, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['initial_state'], {'keep_prob': 'decoder.initial_state_keep_prob'}), True, 'import tensorflow as tf\n'), (813, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), True, 'import tensorflow as tf\n'), (928, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['predicted_symbol'], {}), True, 'import tensorflow as tf\n'), (946, 'tensorflow.while_loop', 'tf.while_loop', ([], {'cond': '(lambda time, *_: time < time_steps)', 'body': '_time_step', 'loop_vars': '(time, initial_input, initial_symbol, initial_pos, initial_state,\n initial_output, outputs, weights, states, attns, initial_weights,\n samples, initial_context)', 'parallel_iterations': 'decoder.parallel_iterations', 'swap_memory': 'decoder.swap_memory'}), True, 'import tensorflow as tf\n'), (997, 'translate.beam_search.get_weights', 'get_weights', (['samples', 'utils.EOS_ID'], {'include_first_eos': '(False)'}), False, 'from translate.beam_search import get_weights\n'), (1001, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (1028, 'tensorflow.sqrt', 'tf.sqrt', (['((true_trg_len * src_indices - true_src_len * trg_indices) ** 2 / (\n true_trg_len ** 2 + true_src_len ** 2))'], {}), True, 'import tensorflow as tf\n'), (1030, 'tensorflow.to_float', 'tf.to_float', (['(monotonous < monotonicity_dist)'], {}), True, 'import tensorflow as tf\n'), (1052, 'translate.beam_search.get_weights', 'get_weights', (['encoder_inputs[0]', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1080, 'tensorflow.shape', 'tf.shape', (['reconstructed_weights'], {}), True, 'import tensorflow as tf\n'), (1081, 'tensorflow.shape', 'tf.shape', (['reconstructed_weights'], {}), True, 'import tensorflow as tf\n'), (1083, 'tensorflow.matmul', 'tf.matmul', (['reconstructed_weights', 'attention_weights'], {}), True, 'import tensorflow as tf\n'), (1083, 'tensorflow.eye', 'tf.eye', (['max_src_len'], {}), True, 'import tensorflow as tf\n'), (1089, 'tensorflow.norm', 'tf.norm', (['attn_loss'], {}), True, 'import tensorflow as tf\n'), (1089, 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (1108, 'translate.beam_search.get_weights', 'get_weights', (['encoder_inputs_', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1120, 'tensorflow.shape', 'tf.shape', (['decoder_inputs'], {}), True, 'import tensorflow as tf\n'), (1146, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['other_inputs'], {}), True, 'import tensorflow as tf\n'), (1155, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['attns'], {}), True, 'import tensorflow as tf\n'), (1156, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['states'], {}), True, 'import tensorflow as tf\n'), (1157, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['decoder_outputs'], {}), True, 'import tensorflow as tf\n'), (1160, 'tensorflow.concat', 'tf.concat', (['[attention_states[0], attns]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (1211, 'tensorflow.shape', 'tf.shape', (['targets'], {}), True, 'import tensorflow as tf\n'), (1212, 'tensorflow.shape', 'tf.shape', (['targets'], {}), True, 'import tensorflow as tf\n'), (1215, 'tensorflow.stack', 'tf.stack', (['[time_steps * batch_size]'], {}), True, 'import tensorflow as tf\n'), (1218, 'tensorflow.stack', 'tf.stack', (['[batch_size, time_steps]'], {}), True, 'import tensorflow as tf\n'), (1221, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['rewards'], {}), True, 'import tensorflow as tf\n'), (1226, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1251, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['decoder_states'], {}), True, 'import tensorflow as tf\n'), (1264, 'tensorflow.shape', 'tf.shape', (['rewards'], {}), True, 'import tensorflow as tf\n'), (1270, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1277, 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.concat', 'tf.concat', (['new_state', '(1)'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.gather_nd', 'tf.gather_nd', (['encoder_outputs_[:, :, :cell_output_size]', 'indices'], {}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.reshape', 'tf.reshape', (['time', '[1, 1]'], {}), True, 'import tensorflow as tf\n'), (395, 'tensorflow.expand_dims', 'tf.expand_dims', (['input_length'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (396, 'tensorflow.stack', 'tf.stack', (['[src_pos, trg_pos, src_len]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.stack', 'tf.stack', (['[batch_size, time_steps, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.tanh', 'tf.tanh', (['y'], {}), True, 'import tensorflow as tf\n'), (417, 'tensorflow.concat', 'tf.concat', (['[state, context]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (442, 'tensorflow.stack', 'tf.stack', (['[batch_size, 0]'], {}), True, 'import tensorflow as tf\n'), (452, 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (457, 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), True, 'import tensorflow as tf\n'), (460, 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (474, 'tensorflow.expand_dims', 'tf.expand_dims', (['encoder_input_length'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (477, 'tensorflow.reshape', 'tf.reshape', (['pos', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (478, 'tensorflow.minimum', 'tf.minimum', (['pos', '(encoder_input_length - 1)'], {}), True, 'import tensorflow as tf\n'), (495, 'tensorflow.concat', 'tf.concat', (['weighted_average'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (566, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.stack', 'tf.stack', (['attns'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (639, 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-weight_scale)', 'maxval': 'weight_scale'}), True, 'import tensorflow as tf\n'), (641, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'weight_scale'}), True, 'import tensorflow as tf\n'), (658, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['embedded_input'], {'keep_prob': 'decoder.word_keep_prob', 'noise_shape': 'noise_shape'}), True, 'import tensorflow as tf\n'), (662, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['embedded_input'], {'keep_prob': 'decoder.embedding_keep_prob', 'noise_shape': 'noise_shape'}), True, 'import tensorflow as tf\n'), (704, 'tensorflow.concat', 'tf.concat', (['[state, input_]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (707, 'tensorflow.concat', 'tf.concat', (['[state, context]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (729, 'tensorflow.concat', 'tf.concat', (['[input_, context]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (732, 'translate.rnn.CellInitializer', 'CellInitializer', (['decoder.cell_size'], {}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (740, 'tensorflow.equal', 'tf.equal', (['symbol', 'utils.DEL_ID'], {}), True, 'import tensorflow as tf\n'), (741, 'tensorflow.where', 'tf.where', (['is_del', 'state', 'new_state'], {}), True, 'import tensorflow as tf\n'), (837, 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.int64', 'size': 'time_steps'}), True, 'import tensorflow as tf\n'), (837, 'tensorflow.transpose', 'tf.transpose', (['decoder_inputs'], {}), True, 'import tensorflow as tf\n'), (846, 'tensorflow.shape', 'tf.shape', (['attention_states[align_encoder_id]'], {}), True, 'import tensorflow as tf\n'), (847, 'tensorflow.shape', 'tf.shape', (['attention_states[align_encoder_id][:, (0)]'], {}), True, 'import tensorflow as tf\n'), (852, 'tensorflow.expand_dims', 'tf.expand_dims', (['initial_pos'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (858, 'tensorflow.split', 'tf.split', (['state', '[cell_state_size, context_size, 1, -1]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (861, 'tensorflow.squeeze', 'tf.squeeze', (['pos'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (894, 'tensorflow.expand_dims', 'tf.expand_dims', (['pos'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (895, 'tensorflow.concat', 'tf.concat', (['[state, context, pos, new_weights]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (916, 'tensorflow.argmax', 'tf.argmax', (['output_', '(1)'], {}), True, 'import tensorflow as tf\n'), (980, 'translate.beam_search.get_weights', 'get_weights', (['encoder_inputs_', 'utils.EOS_ID'], {'include_first_eos': '(True)'}), False, 'from translate.beam_search import get_weights\n'), (1013, 'tensorflow.shape', 'tf.shape', (['attention_weights'], {}), True, 'import tensorflow as tf\n'), (1014, 'tensorflow.shape', 'tf.shape', (['attention_weights'], {}), True, 'import tensorflow as tf\n'), (1015, 'tensorflow.shape', 'tf.shape', (['attention_weights'], {}), True, 'import tensorflow as tf\n'), (1021, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['trg_mask'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1022, 'tensorflow.reshape', 'tf.reshape', (['source_length'], {'shape': '[batch_size, 1, 1]'}), True, 'import tensorflow as tf\n'), (1023, 'tensorflow.reshape', 'tf.reshape', (['target_length'], {'shape': '[batch_size, 1, 1]'}), True, 'import tensorflow as tf\n'), (1025, 'tensorflow.sequence_mask', 'tf.sequence_mask', (['source_length'], {'maxlen': 'src_len'}), True, 'import tensorflow as tf\n'), (1026, 'tensorflow.expand_dims', 'tf.expand_dims', (['trg_mask'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (1026, 'tensorflow.expand_dims', 'tf.expand_dims', (['src_mask'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1032, 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (1066, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['target_weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1162, 'tensorflow.concat', 'tf.concat', (['[attention_states[0], states]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (1207, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['e'], {'axis': 'dim', 'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (1233, 'tensorflow.to_float', 'tf.to_float', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (1252, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.01)'], {}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-weight_scale)', 'maxval': 'weight_scale'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'weight_scale'}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'flat_inputs'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.range', 'tf.range', (['time_steps'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['pos_embeddings', 'pos_inputs_'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.concat', 'tf.concat', (['[encoder_inputs_, pos_inputs_]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.concat', 'tf.concat', (['[encoder_inputs_, other_inputs]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_inputs_'], {'keep_prob': 'encoder.word_keep_prob', 'noise_shape': 'noise_shape'}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_inputs_'], {'keep_prob': 'encoder.embedding_keep_prob', 'noise_shape': 'noise_shape'}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.expand_dims', 'tf.expand_dims', (['encoder_inputs_'], {'axis': '(3)'}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.reshape', 'tf.reshape', (['encoder_inputs_', '[batch_size, time_steps, feature_size * channels]'], {}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'utils.BOS_ID'], {}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.tile', 'tf.tile', (['pad', '[batch_size, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.concat', 'tf.concat', (['inputs'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.nn.relu', 'tf.nn.relu', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.concat', 'tf.concat', (['[encoder_inputs_, pad]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.nn.pool', 'tf.nn.pool', (['encoder_inputs_'], {'window_shape': '[stride]', 'pooling_type': '"""MAX"""', 'padding': '"""VALID"""', 'strides': '[stride]'}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.concat', 'tf.concat', (['encoder_states_'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (393, 'tensorflow.range', 'tf.range', (['time_steps'], {}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['e'], {}), True, 'import tensorflow as tf\n'), (435, 'tensorflow.expand_dims', 'tf.expand_dims', (['weights', '(2)'], {}), True, 'import tensorflow as tf\n'), (450, 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), True, 'import tensorflow as tf\n'), (489, 'tensorflow.minimum', 'tf.minimum', (['pos_', '(encoder_input_length - 1)'], {}), True, 'import tensorflow as tf\n'), (490, 'tensorflow.maximum', 'tf.maximum', (['pos_', '(0)'], {}), True, 'import tensorflow as tf\n'), (506, 'tensorflow.floor', 'tf.floor', (['(encoder_input_length * pos)'], {}), True, 'import tensorflow as tf\n'), (507, 'tensorflow.reshape', 'tf.reshape', (['pos', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (508, 'tensorflow.minimum', 'tf.minimum', (['pos', '(encoder_input_length - 1)'], {}), True, 'import tensorflow as tf\n'), (511, 'tensorflow.reshape', 'tf.reshape', (['idx', '[-1, attn_length]'], {}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.to_float', 'tf.to_float', (['(idx < low)'], {}), True, 'import tensorflow as tf\n'), (517, 'tensorflow.to_float', 'tf.to_float', (['(idx > high)'], {}), True, 'import tensorflow as tf\n'), (519, 'tensorflow.to_float', 'tf.to_float', (['(idx >= encoder_input_length)'], {}), True, 'import tensorflow as tf\n'), (660, 'tensorflow.shape', 'tf.shape', (['embedded_input'], {}), True, 'import tensorflow as tf\n'), (686, 'tensorflow.contrib.rnn.DropoutWrapper', 'DropoutWrapper', (['cell'], {'input_keep_prob': 'decoder.rnn_input_keep_prob', 'output_keep_prob': 'decoder.rnn_output_keep_prob', 'state_keep_prob': 'decoder.rnn_state_keep_prob', 'variational_recurrent': 'decoder.pervasive_dropout', 'dtype': 'tf.float32', 'input_size': 'input_size_'}), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (696, 'tensorflow.contrib.rnn.MultiRNNCell', 'MultiRNNCell', (['cells'], {}), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (719, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), True, 'import tensorflow as tf\n'), (733, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (761, 'tensorflow.to_float', 'tf.to_float', (['max_pos'], {}), True, 'import tensorflow as tf\n'), (778, 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['output_'], {'activation_fn': 'tf.nn.tanh', 'scope': '"""output_layer_norm"""'}), True, 'import tensorflow as tf\n'), (785, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_'], {'keep_prob': 'decoder.deep_layer_keep_prob', 'noise_shape': 'noise_shape'}), True, 'import tensorflow as tf\n'), (817, 'tensorflow.expand_dims', 'tf.expand_dims', (['initial_state'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (819, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[batch_size, cell_state_size]'}), True, 'import tensorflow as tf\n'), (862, 'tensorflow.equal', 'tf.equal', (['time', '(0)'], {}), True, 'import tensorflow as tf\n'), (901, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_1"""'], {}), True, 'import tensorflow as tf\n'), (909, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_2"""'], {}), True, 'import tensorflow as tf\n'), (921, 'tensorflow.random_uniform', 'tf.random_uniform', (['[]'], {}), True, 'import tensorflow as tf\n'), (1017, 'tensorflow.range', 'tf.range', (['src_len'], {}), True, 'import tensorflow as tf\n'), (1018, 'tensorflow.range', 'tf.range', (['trg_len'], {}), True, 'import tensorflow as tf\n'), (1053, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1110, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1122, 'tensorflow.stack', 'tf.stack', (['[batch_size, 1]'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.contrib.rnn.DropoutWrapper', 'DropoutWrapper', (['cell'], {'input_keep_prob': 'encoder.rnn_input_keep_prob', 'output_keep_prob': 'encoder.rnn_output_keep_prob', 'state_keep_prob': 'encoder.rnn_state_keep_prob', 'variational_recurrent': 'encoder.pervasive_dropout', 'dtype': 'tf.float32', 'input_size': 'input_size'}), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (146, 'tensorflow.expand_dims', 'tf.expand_dims', (['pos_inputs_'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['encoder_inputs_', 'filter_', 'strides'], {'padding': '"""SAME"""'}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n'), (208, 'translate.conv_lstm.BasicConvLSTMCell', 'BasicConvLSTMCell', (['[feature_size, channels]', 'encoder.conv_lstm_size', '(1)'], {}), False, 'from translate.conv_lstm import BasicConvLSTMCell\n'), (209, 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['cell', 'cell', 'encoder_inputs_'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.concat', 'tf.concat', (['encoder_inputs_'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.expand_dims', 'tf.expand_dims', (['pad'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.nn.convolution', 'tf.nn.convolution', (['inputs_'], {'filter': 'filter_', 'padding': '"""VALID"""'}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.ceil', 'tf.ceil', (['(encoder_input_length_ / stride)'], {}), True, 'import tensorflow as tf\n'), (301, 'translate.rnn.CellInitializer', 'CellInitializer', (['encoder.cell_size'], {}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (328, 'tensorflow.range', 'tf.range', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (336, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (373, 'tensorflow.shape', 'tf.shape', (['state'], {}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.shape', 'tf.shape', (['hidden'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.reduce_max', 'tf.reduce_max', (['e'], {'axis': '(1)', 'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (443, 'tensorflow.shape', 'tf.shape', (['hidden_states'], {}), True, 'import tensorflow as tf\n'), (510, 'tensorflow.stack', 'tf.stack', (['[batch_size]'], {}), True, 'import tensorflow as tf\n'), (521, 'tensorflow.equal', 'tf.equal', (['m', '(0.0)'], {}), True, 'import tensorflow as tf\n'), (529, 'tensorflow.truediv', 'tf.truediv', (['numerator', '(2 * sigma ** 2)'], {}), True, 'import tensorflow as tf\n'), (531, 'tensorflow.exp', 'tf.exp', (['div'], {}), True, 'import tensorflow as tf\n'), (674, 'tensorflow.contrib.rnn.BasicLSTMCell', 'BasicLSTMCell', (['decoder.cell_size'], {'reuse': 'reuse'}), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (676, 'translate.rnn.PLSTM', 'PLSTM', (['decoder.cell_size'], {'reuse': 'reuse', 'fact_size': 'decoder.lstm_fact_size', 'proj_size': 'decoder.lstm_proj_size'}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (783, 'tensorflow.shape', 'tf.shape', (['output_'], {}), True, 'import tensorflow as tf\n'), (793, 'tensorflow.squeeze', 'tf.squeeze', (['output_'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (805, 'tensorflow.transpose', 'tf.transpose', (['embedding'], {}), True, 'import tensorflow as tf\n'), (822, 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['initial_state'], {'activation_fn': 'activation_fn', 'scope': '"""initial_state_layer_norm"""'}), True, 'import tensorflow as tf\n'), (875, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_1"""'], {}), True, 'import tensorflow as tf\n'), (887, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conditional_2"""'], {}), True, 'import tensorflow as tf\n'), (924, 'tensorflow.logical_not', 'tf.logical_not', (['feed_argmax'], {}), True, 'import tensorflow as tf\n'), (981, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1032, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['non_monotonous'], {}), True, 'import tensorflow as tf\n'), (1175, 'tensorflow.get_variable', 'tf.get_variable', (['"""map_attns/matrix"""'], {'shape': 'shape'}), True, 'import tensorflow as tf\n'), (1176, 'tensorflow.get_variable', 'tf.get_variable', (['"""map_attns/bias"""'], {'shape': 'shape[-1:]'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.contrib.rnn.BasicLSTMCell', 'BasicLSTMCell', (['encoder.cell_size'], {'reuse': 'reuse'}), False, 'from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell\n'), (118, 'translate.rnn.PLSTM', 'PLSTM', (['encoder.cell_size'], {'reuse': 'reuse', 'fact_size': 'encoder.lstm_fact_size', 'proj_size': 'encoder.lstm_proj_size'}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (139, 'tensorflow.multiply', 'tf.multiply', (['batch_size', 'time_steps'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['encoder_inputs_'], {'keep_prob': 'encoder.input_layer_keep_prob'}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.nn.relu', 'tf.nn.relu', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.ceil', 'tf.ceil', (['(encoder_input_length_ / strides[1])'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.tile', 'tf.tile', (['pad', '[1, right, 1]'], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.tile', 'tf.tile', (['pad', '[1, left, 1]'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.concat', 'tf.concat', (['[pad_left, encoder_inputs_, pad_right]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'size'], {'activation': 'tf.nn.sigmoid', 'use_bias': '(True)', 'name': '"""g"""'}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'size'], {'activation': 'tf.nn.relu', 'use_bias': '(True)', 'name': '"""y"""'}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.expand_dims', 'tf.expand_dims', (['initial_state'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mask * encoder_outputs_)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (340, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (357, 'tensorflow.concat', 'tf.concat', (['[encoder_inputs_, encoder_outputs_]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.argmax', 'tf.argmax', (['e', '(-1)'], {}), True, 'import tensorflow as tf\n'), (432, 'tensorflow.exp', 'tf.exp', (['(e / T)'], {}), True, 'import tensorflow as tf\n'), (433, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['exp'], {'axis': '(-1)', 'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (484, 'tensorflow.squeeze', 'tf.squeeze', (['pos'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (492, 'tensorflow.expand_dims', 'tf.expand_dims', (['weights_'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (499, 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (510, 'tensorflow.range', 'tf.range', (['attn_length'], {}), True, 'import tensorflow as tf\n'), (535, 'tensorflow.expand_dims', 'tf.expand_dims', (['weights'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.shape', 'tf.shape', (['input_'], {}), True, 'import tensorflow as tf\n'), (661, 'tensorflow.shape', 'tf.shape', (['input_'], {}), True, 'import tensorflow as tf\n'), (679, 'translate.rnn.DropoutGRUCell', 'DropoutGRUCell', (['decoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'decoder.layer_norm', 'input_size': 'input_size_', 'input_keep_prob': 'decoder.rnn_input_keep_prob', 'state_keep_prob': 'decoder.rnn_state_keep_prob'}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (683, 'translate.rnn.GRUCell', 'GRUCell', (['decoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'decoder.layer_norm'}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (791, 'tensorflow.expand_dims', 'tf.expand_dims', (['output_'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (918, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output_'], {}), True, 'import tensorflow as tf\n'), (1035, 'tensorflow.to_float', 'tf.to_float', (['global_step'], {}), True, 'import tensorflow as tf\n'), (1178, 'tensorflow.einsum', 'tf.einsum', (['"""ijk,kl->ijl"""', 'x', 'w'], {}), True, 'import tensorflow as tf\n'), (1180, 'tensorflow.nn.tanh', 'tf.nn.tanh', (['x'], {}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (121, 'translate.rnn.DropoutGRUCell', 'DropoutGRUCell', (['encoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'encoder.layer_norm', 'input_size': 'input_size', 'input_keep_prob': 'encoder.rnn_input_keep_prob', 'state_keep_prob': 'encoder.rnn_state_keep_prob'}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (125, 'translate.rnn.GRUCell', 'GRUCell', (['encoder.cell_size'], {'reuse': 'reuse', 'layer_norm': 'encoder.layer_norm'}), False, 'from translate.rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell, PLSTM\n'), (250, 'tensorflow.ceil', 'tf.ceil', (['(time_steps / stride)'], {}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.zeros', 'tf.zeros', (['cell_state_size'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mask * encoder_inputs_)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.concat', 'tf.concat', (['[last_forward, last_backward]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (491, 'tensorflow.squeeze', 'tf.squeeze', (['pos_'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (498, 'tensorflow.squeeze', 'tf.squeeze', (['pos'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (505, 'tensorflow.matmul', 'tf.matmul', (['state', 'wp'], {}), True, 'import tensorflow as tf\n'), (528, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(2)'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (795, 'tensorflow.split', 'tf.split', (['output_'], {'num_or_size_splits': '(2)', 'axis': '(1)'}), True, 'import tensorflow as tf\n'), (880, 'tensorflow.equal', 'tf.equal', (['time', '(0)'], {}), True, 'import tensorflow as tf\n'), (335, 'tensorflow.shape', 'tf.shape', (['encoder_outputs_'], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[batch_size, 0]'}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.shape', 'tf.shape', (['e'], {}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.shape', 'tf.shape', (['encoder_inputs_'], {}), True, 'import tensorflow as tf\n')]
lenhattan86/tf_bench
8b2b363fb6a819d84b3b11552c2ea97886188a18
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark script for TensorFlow. See the README for more information. """ from __future__ import print_function import argparse from collections import defaultdict import os import threading import time import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.python.client import timeline from tensorflow.python.layers import convolutional as conv_layers from tensorflow.python.layers import core as core_layers from tensorflow.python.layers import pooling as pooling_layers from tensorflow.python.ops import data_flow_ops from tensorflow.python.platform import gfile import benchmark_storage import cnn_util import datasets import model_config import preprocessing import variable_mgr tf.flags.DEFINE_string('model', 'trivial', 'name of the model to run') # The code will first check if it's running under benchmarking mode # or evaluation mode, depending on FLAGS.eval: # Under the evaluation mode, this script will read a saved model, # and compute the accuracy of the model against a validation dataset. # Additional ops for accuracy and top_k predictors are only used under this # mode. # Under the benchmarking mode, user can specify whether nor not to use # the forward-only option, which will only compute the loss function. # forward-only cannot be enabled with eval at the same time. tf.flags.DEFINE_boolean('eval', False, 'whether use eval or benchmarking') tf.flags.DEFINE_boolean('forward_only', False, """whether use forward-only or training for benchmarking""") tf.flags.DEFINE_integer('batch_size', 0, 'batch size per compute device') tf.flags.DEFINE_integer('num_batches', 100, 'number of batches to run, excluding warmup') tf.flags.DEFINE_integer('num_warmup_batches', None, 'number of batches to run before timing') tf.flags.DEFINE_integer('autotune_threshold', None, 'The autotune threshold for the models') tf.flags.DEFINE_integer('num_gpus', 1, 'the number of GPUs to run on') tf.flags.DEFINE_integer('display_every', 10, """Number of local steps after which progress is printed out""") tf.flags.DEFINE_string('data_dir', None, """Path to dataset in TFRecord format (aka Example protobufs). If not specified, synthetic data will be used.""") tf.flags.DEFINE_string('data_name', None, """Name of dataset: imagenet or flowers. If not specified, it is automatically guessed based on --data_dir.""") tf.flags.DEFINE_string('resize_method', 'bilinear', """Method for resizing input images: crop,nearest,bilinear,bicubic or area. The 'crop' mode requires source images to be at least as large as the network input size, while the other modes support any sizes and apply random bbox distortions before resizing (even with --nodistortions).""") tf.flags.DEFINE_boolean('distortions', True, """Enable/disable distortions during image preprocessing. These include bbox and color distortions.""") tf.flags.DEFINE_string('local_parameter_device', 'gpu', """Device to use as parameter server: cpu or gpu. For distributed training, it can affect where caching of variables happens.""") tf.flags.DEFINE_string('device', 'gpu', """Device to use for computation: cpu or gpu""") #tf.flags.DEFINE_string('data_format', 'NCHW', tf.flags.DEFINE_string('data_format', 'NHWC', """Data layout to use: NHWC (TF native) or NCHW (cuDNN native).""") tf.flags.DEFINE_integer('num_intra_threads', 1, """Number of threads to use for intra-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_integer('num_inter_threads', 0, """Number of threads to use for inter-op parallelism. If set to 0, the system will pick an appropriate number.""") tf.flags.DEFINE_string('trace_file', None, """Enable TensorFlow tracing and write trace to this file.""") tf.flags.DEFINE_string('graph_file', None, """Write the model's graph definition to this file. Defaults to binary format unless filename ends in 'txt'.""") tf.flags.DEFINE_string('optimizer', 'sgd', 'Optimizer to use: momentum or sgd or rmsprop') tf.flags.DEFINE_float('learning_rate', None, """Initial learning rate for training.""") tf.flags.DEFINE_float('num_epochs_per_decay', 0, """Steps after which learning rate decays.""") tf.flags.DEFINE_float('learning_rate_decay_factor', 0.94, """Learning rate decay factor.""") tf.flags.DEFINE_float('momentum', 0.9, """Momentum for training.""") tf.flags.DEFINE_float('rmsprop_decay', 0.9, """Decay term for RMSProp.""") tf.flags.DEFINE_float('rmsprop_momentum', 0.9, """Momentum in RMSProp.""") tf.flags.DEFINE_float('rmsprop_epsilon', 1.0, """Epsilon term for RMSProp.""") tf.flags.DEFINE_float('gradient_clip', None, """Gradient clipping magnitude. Disabled by default.""") tf.flags.DEFINE_float('weight_decay', 0.00004, """Weight decay factor for training.""") # Performance tuning flags. tf.flags.DEFINE_boolean('winograd_nonfused', True, """Enable/disable using the Winograd non-fused algorithms.""") tf.flags.DEFINE_boolean('sync_on_finish', False, """Enable/disable whether the devices are synced after each step.""") tf.flags.DEFINE_boolean('staged_vars', False, """whether the variables are staged from the main computation""") tf.flags.DEFINE_boolean('force_gpu_compatible', True, """whether to enable force_gpu_compatible in GPU_Options""") # The method for managing variables: # parameter_server: variables are stored on a parameter server that holds # the master copy of the variable. In local execution, a local device # acts as the parameter server for each variable; in distributed # execution, the parameter servers are separate processes in the cluster. # For each step, each tower gets a copy of the variables from the # parameter server, and sends its gradients to the param server. # replicated: each GPU has its own copy of the variables. To apply gradients, # nccl all-reduce or regular cross-device aggregation is used to replicate # the combined gradients to all towers (depending on --use_nccl option). # independent: each GPU has its own copy of the variables, and gradients are # not shared between towers. This can be used to check performance when no # data is moved between GPUs. # distributed_replicated: Distributed training only. Each GPU has a copy of # the variables, and updates its copy after the parameter servers are all # updated with the gradients from all servers. Only works with # cross_replica_sync=true. Unlike 'replicated', currently never uses # nccl all-reduce for replicating within a server. tf.flags.DEFINE_string( 'variable_update', 'parameter_server', ('The method for managing variables: ' 'parameter_server, replicated, distributed_replicated, independent')) tf.flags.DEFINE_boolean( 'use_nccl', True, 'Whether to use nccl all-reduce primitives where possible') # Distributed training flags. tf.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') tf.flags.DEFINE_boolean('cross_replica_sync', True, '') # Summary and Save & load checkpoints. tf.flags.DEFINE_integer('summary_verbosity', 0, """Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints.""") tf.flags.DEFINE_integer('save_summaries_steps', 0, """How often to save summaries for trained models. Pass 0 to disable summaries.""") tf.flags.DEFINE_integer('save_model_secs', 0, """How often to save trained models. Pass 0 to disable checkpoints""") tf.flags.DEFINE_string('train_dir', None, """Path to session checkpoints.""") tf.flags.DEFINE_string('eval_dir', '/tmp/tf_cnn_benchmarks/eval', """Directory where to write eval event logs.""") tf.flags.DEFINE_string('pretrain_dir', None, """Path to pretrained session checkpoints.""") tf.flags.DEFINE_string('result_storage', None, """Specifies storage option for benchmark results. None means results won't be stored. 'cbuild_benchmark_datastore' means results will be stored in cbuild datastore (note: this option requires special pemissions and meant to be used from cbuilds).""") FLAGS = tf.flags.FLAGS log_fn = print # tf.logging.info class GlobalStepWatcher(threading.Thread): """A helper class for globe_step. Polls for changes in the global_step of the model, and finishes when the number of steps for the global run are done. """ def __init__(self, sess, global_step_op, start_at_global_step, end_at_global_step): threading.Thread.__init__(self) self.sess = sess self.global_step_op = global_step_op self.start_at_global_step = start_at_global_step self.end_at_global_step = end_at_global_step self.start_time = 0 self.start_step = 0 self.finish_time = 0 self.finish_step = 0 def run(self): while self.finish_time == 0: time.sleep(.25) global_step_val, = self.sess.run([self.global_step_op]) if self.start_time == 0 and global_step_val >= self.start_at_global_step: log_fn('Starting real work at step %s at time %s' % ( global_step_val, time.ctime())) self.start_time = time.time() self.start_step = global_step_val if self.finish_time == 0 and global_step_val >= self.end_at_global_step: log_fn('Finishing real work at step %s at time %s' % ( global_step_val, time.ctime())) self.finish_time = time.time() self.finish_step = global_step_val log_fn('total time %s' % (self.finish_time - self.start_time)) def done(self): return self.finish_time > 0 def steps_per_second(self): return ((self.finish_step - self.start_step) / (self.finish_time - self.start_time)) class ConvNetBuilder(object): """Builder of cnn net.""" def __init__(self, input_op, input_nchan, phase_train, # data_format='NCHW', data_format='NHWC', data_type=tf.float32): self.top_layer = input_op self.top_size = input_nchan self.phase_train = phase_train self.data_format = data_format self.data_type = data_type self.counts = defaultdict(lambda: 0) self.use_batch_norm = False self.batch_norm_config = {} # 'decay': 0.997, 'scale': True} self.channel_pos = ( 'channels_last' if data_format == 'NHWC' else 'channels_first') def conv(self, num_out_channels, k_height, k_width, d_height=1, d_width=1, mode='SAME', input_layer=None, num_channels_in=None, batch_norm=None, activation='relu'): if input_layer is None: input_layer = self.top_layer if num_channels_in is None: num_channels_in = self.top_size name = 'conv' + str(self.counts['conv']) self.counts['conv'] += 1 with tf.variable_scope(name): strides = [1, d_height, d_width, 1] if self.data_format == 'NCHW': strides = [strides[0], strides[3], strides[1], strides[2]] if mode != 'SAME_RESNET': conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding=mode, data_format=self.channel_pos, use_bias=False) else: # Special padding mode for ResNet models if d_height == 1 and d_width == 1: conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding='SAME', data_format=self.channel_pos, use_bias=False) else: rate = 1 # Unused (for 'a trous' convolutions) kernel_size_effective = k_height + (k_width - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padding = [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]] if self.data_format == 'NCHW': padding = [padding[0], padding[3], padding[1], padding[2]] input_layer = tf.pad(input_layer, padding) conv = conv_layers.conv2d( input_layer, num_out_channels, [k_height, k_width], strides=[d_height, d_width], padding='VALID', data_format=self.channel_pos, use_bias=False) if batch_norm is None: batch_norm = self.use_batch_norm if not batch_norm: biases = tf.get_variable( 'biases', [num_out_channels], self.data_type, tf.constant_initializer(0.0)) biased = tf.reshape( tf.nn.bias_add( conv, biases, data_format=self.data_format), conv.get_shape()) else: self.top_layer = conv self.top_size = num_out_channels biased = self.batch_norm(**self.batch_norm_config) if activation == 'relu': conv1 = tf.nn.relu(biased) elif activation == 'linear' or activation is None: conv1 = biased elif activation == 'tanh': conv1 = tf.nn.tanh(biased) else: raise KeyError('Invalid activation type \'%s\'' % activation) self.top_layer = conv1 self.top_size = num_out_channels return conv1 def mpool(self, k_height, k_width, d_height=2, d_width=2, mode='VALID', input_layer=None, num_channels_in=None): """Construct a max pooling layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = num_channels_in name = 'mpool' + str(self.counts['mpool']) self.counts['mpool'] += 1 pool = pooling_layers.max_pooling2d( input_layer, [k_height, k_width], [d_height, d_width], padding=mode, data_format=self.channel_pos, name=name) self.top_layer = pool return pool def apool(self, k_height, k_width, d_height=2, d_width=2, mode='VALID', input_layer=None, num_channels_in=None): """Construct an average pooling layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = num_channels_in name = 'apool' + str(self.counts['apool']) self.counts['apool'] += 1 pool = pooling_layers.average_pooling2d( input_layer, [k_height, k_width], [d_height, d_width], padding=mode, data_format=self.channel_pos, name=name) self.top_layer = pool return pool def reshape(self, shape, input_layer=None): if input_layer is None: input_layer = self.top_layer self.top_layer = tf.reshape(input_layer, shape) self.top_size = shape[-1] # HACK This may not always work return self.top_layer def affine(self, num_out_channels, input_layer=None, num_channels_in=None, activation='relu'): if input_layer is None: input_layer = self.top_layer if num_channels_in is None: num_channels_in = self.top_size name = 'affine' + str(self.counts['affine']) self.counts['affine'] += 1 with tf.variable_scope(name): init_factor = 2. if activation == 'relu' else 1. kernel = tf.get_variable( 'weights', [num_channels_in, num_out_channels], self.data_type, tf.random_normal_initializer(stddev=np.sqrt(init_factor / (num_channels_in)))) biases = tf.get_variable('biases', [num_out_channels], self.data_type, tf.constant_initializer(0.0)) logits = tf.matmul(input_layer, kernel) + biases if activation == 'relu': affine1 = tf.nn.relu(logits, name=name) elif activation == 'linear' or activation is None: affine1 = logits else: raise KeyError('Invalid activation type \'%s\'' % activation) self.top_layer = affine1 self.top_size = num_out_channels return affine1 def resnet_bottleneck_v1(self, depth, depth_bottleneck, stride, input_layer=None, in_size=None): if input_layer is None: input_layer = self.top_layer if in_size is None: in_size = self.top_size name = 'resnet_v1' + str(self.counts['resnet_v1']) self.counts['resnet_v1'] += 1 with tf.variable_scope(name): if depth == in_size: if stride == 1: shortcut = input_layer else: shortcut = self.mpool( 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) else: shortcut = self.conv( depth, 1, 1, stride, stride, activation=None, input_layer=input_layer, num_channels_in=in_size) self.conv( depth_bottleneck, 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size) self.conv(depth_bottleneck, 3, 3, 1, 1, mode='SAME_RESNET') res = self.conv(depth, 1, 1, 1, 1, activation=None) output = tf.nn.relu(shortcut + res) self.top_layer = output self.top_size = depth return output def inception_module(self, name, cols, input_layer=None, in_size=None): if input_layer is None: input_layer = self.top_layer if in_size is None: in_size = self.top_size name += str(self.counts[name]) self.counts[name] += 1 with tf.variable_scope(name): col_layers = [] col_layer_sizes = [] for c, col in enumerate(cols): col_layers.append([]) col_layer_sizes.append([]) for l, layer in enumerate(col): ltype, args = layer[0], layer[1:] kwargs = { 'input_layer': input_layer, 'num_channels_in': in_size } if l == 0 else {} if ltype == 'conv': self.conv(*args, **kwargs) elif ltype == 'mpool': self.mpool(*args, **kwargs) elif ltype == 'apool': self.apool(*args, **kwargs) elif ltype == 'share': # Share matching layer from previous column self.top_layer = col_layers[c - 1][l] self.top_size = col_layer_sizes[c - 1][l] else: raise KeyError('Invalid layer type for inception module: \'%s\'' % ltype) col_layers[c].append(self.top_layer) col_layer_sizes[c].append(self.top_size) catdim = 3 if self.data_format == 'NHWC' else 1 self.top_layer = tf.concat([layers[-1] for layers in col_layers], catdim) self.top_size = sum([sizes[-1] for sizes in col_layer_sizes]) return self.top_layer def residual(self, nout, net, scale=1.0): inlayer = self.top_layer net(self) self.conv(nout, 1, 1, activation=None) self.top_layer = tf.nn.relu(inlayer + scale * self.top_layer) def spatial_mean(self, keep_dims=False): name = 'spatial_mean' + str(self.counts['spatial_mean']) self.counts['spatial_mean'] += 1 axes = [1, 2] if self.data_format == 'NHWC' else [2, 3] self.top_layer = tf.reduce_mean( self.top_layer, axes, keep_dims=keep_dims, name=name) return self.top_layer def dropout(self, keep_prob=0.5, input_layer=None): if input_layer is None: input_layer = self.top_layer else: self.top_size = None name = 'dropout' + str(self.counts['dropout']) with tf.variable_scope(name): if not self.phase_train: keep_prob = 1.0 dropout = core_layers.dropout(input_layer, keep_prob) self.top_layer = dropout return dropout def batch_norm(self, input_layer=None, **kwargs): """Adds a Batch Normalization layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = None name = 'batchnorm' + str(self.counts['batchnorm']) self.counts['batchnorm'] += 1 with tf.variable_scope(name) as scope: bn = tf.contrib.layers.batch_norm( input_layer, is_training=self.phase_train, fused=True, data_format=self.data_format, scope=scope, **kwargs) self.top_layer = bn return bn def loss_function(logits, labels): # global cross_entropy # HACK TESTING cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='xentropy') loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') return loss def add_image_preprocessing(dataset, input_nchan, image_size, batch_size, num_compute_devices, input_data_type, resize_method, train): """Add image Preprocessing ops to tf graph.""" if dataset is not None: preproc_train = preprocessing.ImagePreprocessor( image_size, image_size, batch_size, num_compute_devices, input_data_type, train=train, resize_method=resize_method) if train: subset = 'train' else: subset = 'validation' images, labels = preproc_train.minibatch(dataset, subset=subset) images_splits = images labels_splits = labels # Note: We force all datasets to 1000 to ensure even comparison # This works because we use sparse_softmax_cross_entropy nclass = 1001 else: nclass = 1001 input_shape = [batch_size, image_size, image_size, input_nchan] images = tf.truncated_normal( input_shape, dtype=input_data_type, stddev=1e-1, name='synthetic_images') labels = tf.random_uniform( [batch_size], minval=1, maxval=nclass, dtype=tf.int32, name='synthetic_labels') # Note: This results in a H2D copy, but no computation # Note: This avoids recomputation of the random values, but still # results in a H2D copy. images = tf.contrib.framework.local_variable(images, name='images') labels = tf.contrib.framework.local_variable(labels, name='labels') # Change to 0-based (don't use background class like Inception does) labels -= 1 if num_compute_devices == 1: images_splits = [images] labels_splits = [labels] else: images_splits = tf.split(images, num_compute_devices, 0) labels_splits = tf.split(labels, num_compute_devices, 0) return nclass, images_splits, labels_splits def create_config_proto(): config = tf.ConfigProto() config.allow_soft_placement = True config.intra_op_parallelism_threads = FLAGS.num_intra_threads config.inter_op_parallelism_threads = FLAGS.num_inter_threads config.gpu_options.force_gpu_compatible = FLAGS.force_gpu_compatible return config def get_mode_from_flags(): """Determine which mode this script is running.""" if FLAGS.forward_only and FLAGS.eval: raise ValueError('Only one of forward_only and eval flags is true') if FLAGS.eval: return 'evaluation' if FLAGS.forward_only: return 'forward-only' return 'training' def benchmark_one_step(sess, fetches, step, batch_size, step_train_times, trace_filename, summary_op=None): """Advance one step of benchmarking.""" if trace_filename is not None and step == -1: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None summary_str = None start_time = time.time() if summary_op is None: results = sess.run(fetches, options=run_options, run_metadata=run_metadata) else: (results, summary_str) = sess.run( [fetches, summary_op], options=run_options, run_metadata=run_metadata) if not FLAGS.forward_only: lossval = results[1] else: lossval = 0. train_time = time.time() - start_time step_train_times.append(train_time) if step >= 0 and (step == 0 or (step + 1) % FLAGS.display_every == 0): log_fn('%i\t%s\t%.3f' % ( step + 1, get_perf_timing_str(batch_size, step_train_times), lossval)) if trace_filename is not None and step == -1: log_fn('Dumping trace to', trace_filename) trace = timeline.Timeline(step_stats=run_metadata.step_stats) with open(trace_filename, 'w') as trace_file: trace_file.write(trace.generate_chrome_trace_format(show_memory=True)) return summary_str def get_perf_timing_str(batch_size, step_train_times, scale=1): times = np.array(step_train_times) speeds = batch_size / times speed_mean = scale * batch_size / np.mean(times) if scale == 1: speed_uncertainty = np.std(speeds) / np.sqrt(float(len(speeds))) speed_madstd = 1.4826 * np.median(np.abs(speeds - np.median(speeds))) speed_jitter = speed_madstd return 'images/sec: %.1f +/- %.1f (jitter = %.1f)' % ( speed_mean, speed_uncertainty, speed_jitter) else: return 'images/sec: %.1f' % speed_mean def load_checkpoint(saver, sess, ckpt_dir): ckpt = tf.train.get_checkpoint_state(ckpt_dir) if ckpt and ckpt.model_checkpoint_path: if os.path.isabs(ckpt.model_checkpoint_path): # Restores from checkpoint with absolute path. model_checkpoint_path = ckpt.model_checkpoint_path else: # Restores from checkpoint with relative path. model_checkpoint_path = os.path.join(ckpt_dir, ckpt.model_checkpoint_path) # Assuming model_checkpoint_path looks something like: # /my-favorite-path/imagenet_train/model.ckpt-0, # extract global_step from it. global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] if not global_step.isdigit(): global_step = 0 else: global_step = int(global_step) saver.restore(sess, model_checkpoint_path) log_fn('Successfully loaded model from %s.' % ckpt.model_checkpoint_path) return global_step else: raise RuntimeError('No checkpoint file found.') class BenchmarkCNN(object): """Class for benchmarking a cnn network.""" def __init__(self): self.model = FLAGS.model self.model_conf = model_config.get_model_config(self.model) self.trace_filename = FLAGS.trace_file self.data_format = FLAGS.data_format self.num_batches = FLAGS.num_batches autotune_threshold = FLAGS.autotune_threshold if ( FLAGS.autotune_threshold) else 1 min_autotune_warmup = 5 * autotune_threshold * autotune_threshold self.num_warmup_batches = FLAGS.num_warmup_batches if ( FLAGS.num_warmup_batches) else max(10, min_autotune_warmup) self.graph_file = FLAGS.graph_file self.resize_method = FLAGS.resize_method self.sync_queue_counter = 0 self.num_gpus = FLAGS.num_gpus # Use the batch size from the command line if specified, otherwise use the # model's default batch size. Scale the benchmark's batch size by the # number of GPUs. if FLAGS.batch_size > 0: self.model_conf.set_batch_size(FLAGS.batch_size) self.batch_size = self.model_conf.get_batch_size() * FLAGS.num_gpus # Use the learning rate from the command line if specified, otherwise use # the model's default learning rate, which must always be set. assert self.model_conf.get_learning_rate() > 0.0 if FLAGS.learning_rate is not None: self.model_conf.set_learning_rate(FLAGS.learning_rate) self.job_name = FLAGS.job_name # "" for local training self.ps_hosts = FLAGS.ps_hosts.split(',') self.worker_hosts = FLAGS.worker_hosts.split(',') self.dataset = None self.data_name = FLAGS.data_name if FLAGS.data_dir is not None: if self.data_name is None: if 'imagenet' in FLAGS.data_dir: self.data_name = 'imagenet' elif 'flowers' in FLAGS.data_dir: self.data_name = 'flowers' else: raise ValueError('Could not identify name of dataset. ' 'Please specify with --data_name option.') if self.data_name == 'imagenet': self.dataset = datasets.ImagenetData(FLAGS.data_dir) elif self.data_name == 'flowers': self.dataset = datasets.FlowersData(FLAGS.data_dir) else: raise ValueError('Unknown dataset. Must be one of imagenet or flowers.') self.local_parameter_device_flag = FLAGS.local_parameter_device if self.job_name: self.task_index = FLAGS.task_index self.cluster = tf.train.ClusterSpec({'ps': self.ps_hosts, 'worker': self.worker_hosts}) self.server = None if not self.server: self.server = tf.train.Server(self.cluster, job_name=self.job_name, task_index=self.task_index, config=create_config_proto(), protocol=FLAGS.server_protocol) worker_prefix = '/job:worker/task:%s' % self.task_index self.param_server_device = tf.train.replica_device_setter( worker_device=worker_prefix + '/cpu:0', cluster=self.cluster) # This device on which the queues for managing synchronization between # servers should be stored. num_ps = len(self.ps_hosts) self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i for i in range(num_ps)] else: self.task_index = 0 self.cluster = None self.server = None worker_prefix = '' self.param_server_device = '/%s:0' % FLAGS.local_parameter_device self.sync_queue_devices = [self.param_server_device] # Device to use for ops that need to always run on the local worker's CPU. self.cpu_device = '%s/cpu:0' % worker_prefix # Device to use for ops that need to always run on the local worker's # compute device, and never on a parameter server device. self.raw_devices = ['%s/%s:%i' % (worker_prefix, FLAGS.device, i) for i in xrange(FLAGS.num_gpus)] if FLAGS.staged_vars and FLAGS.variable_update != 'parameter_server': raise ValueError('staged_vars for now is only supported with ' '--variable_update=parameter_server') if FLAGS.variable_update == 'parameter_server': if self.job_name: if not FLAGS.staged_vars: self.variable_mgr = variable_mgr.VariableMgrDistributedFetchFromPS( self) else: self.variable_mgr = ( variable_mgr.VariableMgrDistributedFetchFromStagedPS(self)) else: if not FLAGS.staged_vars: self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromPS(self) else: self.variable_mgr = variable_mgr.VariableMgrLocalFetchFromStagedPS( self) elif FLAGS.variable_update == 'replicated': if self.job_name: raise ValueError('Invalid --variable_update in distributed mode: %s' % FLAGS.variable_update) self.variable_mgr = variable_mgr.VariableMgrLocalReplicated( self, FLAGS.use_nccl) elif FLAGS.variable_update == 'distributed_replicated': if not self.job_name: raise ValueError('Invalid --variable_update in local mode: %s' % FLAGS.variable_update) self.variable_mgr = variable_mgr.VariableMgrDistributedReplicated(self) elif FLAGS.variable_update == 'independent': if self.job_name: raise ValueError('Invalid --variable_update in distributed mode: %s' % FLAGS.variable_update) self.variable_mgr = variable_mgr.VariableMgrIndependent(self) else: raise ValueError('Invalid --variable_update: %s' % FLAGS.variable_update) # Device to use for running on the local worker's compute device, but # with variables assigned to parameter server devices. self.devices = self.variable_mgr.get_devices() if self.job_name: self.global_step_device = self.param_server_device else: self.global_step_device = self.cpu_device def print_info(self): """Print basic information.""" log_fn('Model: %s' % self.model) log_fn('Mode: %s' % get_mode_from_flags()) log_fn('Batch size: %s global' % self.batch_size) log_fn(' %s per device' % (self.batch_size / len(self.devices))) log_fn('Devices: %s' % self.raw_devices) log_fn('Data format: %s' % self.data_format) log_fn('Optimizer: %s' % FLAGS.optimizer) log_fn('Variables: %s' % FLAGS.variable_update) if FLAGS.variable_update == 'replicated': log_fn('Use NCCL: %s' % FLAGS.use_nccl) if self.job_name: log_fn('Sync: %s' % FLAGS.cross_replica_sync) if FLAGS.staged_vars: log_fn('Staged vars: %s' % FLAGS.staged_vars) log_fn('==========') def run(self): if FLAGS.job_name == 'ps': log_fn('Running parameter server %s' % self.task_index) self.server.join() return with tf.Graph().as_default(): if FLAGS.eval: self._eval_cnn() else: self._benchmark_cnn() def _eval_cnn(self): """Evaluate the model from a checkpoint using validation dataset.""" (enqueue_ops, fetches) = self._build_model() saver = tf.train.Saver(tf.global_variables()) summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, tf.get_default_graph()) target = '' with tf.Session(target=target, config=create_config_proto()) as sess: for i in xrange(len(enqueue_ops)): sess.run(enqueue_ops[:(i+1)]) if FLAGS.train_dir is None: raise ValueError('Trained model directory not specified') global_step = load_checkpoint(saver, sess, FLAGS.train_dir) start_time = time.time() count_top_1 = 0.0 count_top_5 = 0.0 total_eval_count = self.num_batches * self.batch_size for step in xrange(self.num_batches): results = sess.run(fetches) count_top_1 += results[0] count_top_5 += results[1] if (step + 1) % FLAGS.display_every == 0: duration = time.time() - start_time examples_per_sec = self.batch_size * self.num_batches / duration log_fn('%i\t%.1f examples/sec' % (step + 1, examples_per_sec)) start_time = time.time() precision_at_1 = count_top_1 / total_eval_count recall_at_5 = count_top_5 / total_eval_count summary = tf.Summary() summary.value.add(tag='eval/Accuracy@1', simple_value=precision_at_1) summary.value.add(tag='eval/Recall@5', simple_value=recall_at_5) summary_writer.add_summary(summary, global_step) log_fn('Precision @ 1 = %.4f recall @ 5 = %.4f [%d examples]' % (precision_at_1, recall_at_5, total_eval_count)) def _benchmark_cnn(self): """Run cnn in benchmark mode. When forward_only on, it forwards CNN.""" (enqueue_ops, fetches) = self._build_model() main_fetch_group = tf.group(*fetches) execution_barrier = None if self.job_name and not FLAGS.cross_replica_sync: execution_barrier = self.add_sync_queues_and_barrier( 'execution_barrier_', []) global_step = tf.contrib.framework.get_global_step() with tf.device(self.global_step_device): with tf.control_dependencies([main_fetch_group]): inc_global_step = global_step.assign_add(1) fetches.append(inc_global_step) if self.job_name and FLAGS.cross_replica_sync: # Block all replicas until all replicas are ready for next step. fetches.append(self.add_sync_queues_and_barrier( 'sync_queues_step_end_', [main_fetch_group])) variable_mgr_post_init_ops = self.variable_mgr.get_post_init_ops() if variable_mgr_post_init_ops: post_init_op_group = tf.group(*variable_mgr_post_init_ops) else: post_init_op_group = None local_var_init_op = tf.local_variables_initializer() summary_op = tf.summary.merge_all() is_chief = (not self.job_name or self.task_index == 0) summary_writer = None if (is_chief and FLAGS.summary_verbosity and FLAGS.train_dir and FLAGS.save_summaries_steps > 0): summary_writer = tf.summary.FileWriter(FLAGS.train_dir, tf.get_default_graph()) # We run the summaries in the same thread as the training operations by # passing in None for summary_op to avoid a summary_thread being started. # Running summaries and training operations in parallel could run out of # GPU memory. sv = tf.train.Supervisor( is_chief=is_chief, logdir=FLAGS.train_dir, saver=tf.train.Saver(tf.global_variables()), global_step=global_step, summary_op=None, save_model_secs=FLAGS.save_model_secs, summary_writer=summary_writer) step_train_times = [] with sv.managed_session( master=self.server.target if self.server else '', config=create_config_proto(), start_standard_services=FLAGS.summary_verbosity > 0) as sess: for i in xrange(len(enqueue_ops)): sess.run(enqueue_ops[:(i+1)]) sess.run(local_var_init_op) if post_init_op_group: sess.run(post_init_op_group) init_global_step = 0 if FLAGS.pretrain_dir is not None: init_global_step = load_checkpoint(sv.saver, sess, FLAGS.pretrain_dir) global_step_watcher = GlobalStepWatcher( sess, global_step, len(self.worker_hosts) * self.num_warmup_batches + init_global_step, len(self.worker_hosts) * ( self.num_warmup_batches + self.num_batches) - 1) global_step_watcher.start() if self.graph_file is not None: path, filename = os.path.split(self.graph_file) as_text = filename.endswith('txt') log_fn('Writing GraphDef as %s to %s' % ( 'text' if as_text else 'binary', self.graph_file)) tf.train.write_graph(sess.graph_def, path, filename, as_text) log_fn('Running warm up') local_step = -1 * self.num_warmup_batches if FLAGS.cross_replica_sync and FLAGS.job_name: # In cross-replica sync mode, all workers must run the same number of # local steps, or else the workers running the extra step will block. done_fn = lambda: local_step == self.num_batches else: done_fn = lambda: global_step_watcher.done() while not done_fn(): if local_step == 0: log_fn('Done warm up') if execution_barrier: log_fn('Waiting for other replicas to finish warm up') assert global_step_watcher.start_time == 0 sess.run([execution_barrier]) log_fn('Step\tImg/sec\tloss') assert len(step_train_times) == self.num_warmup_batches step_train_times = [] # reset to ignore warm up batches if (summary_writer and (local_step + 1) % FLAGS.save_summaries_steps == 0): fetch_summary = summary_op else: fetch_summary = None summary_str = benchmark_one_step( sess, fetches, local_step, self.batch_size, step_train_times, self.trace_filename, fetch_summary) if summary_str is not None and is_chief: sv.summary_computed(sess, summary_str) local_step += 1 # Waits for the global step to be done, regardless of done_fn. while not global_step_watcher.done(): time.sleep(.25) images_per_sec = global_step_watcher.steps_per_second() * self.batch_size log_fn('-' * 64) log_fn('total images/sec: %.2f' % images_per_sec) log_fn('-' * 64) if is_chief: store_benchmarks({'total_images_per_sec': images_per_sec}) # Save the model checkpoint. if FLAGS.train_dir is not None and is_chief: checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') if not gfile.Exists(FLAGS.train_dir): gfile.MakeDirs(FLAGS.train_dir) sv.saver.save(sess, checkpoint_path, global_step) if execution_barrier: # Wait for other workers to reach the end, so this worker doesn't # go away underneath them. sess.run([execution_barrier]) sv.stop() def _build_model(self): """Build the TensorFlow graph.""" image_size = self.model_conf.get_image_size() data_type = tf.float32 input_data_type = tf.float32 input_nchan = 3 tf.set_random_seed(1234) np.random.seed(4321) phase_train = not (FLAGS.eval or FLAGS.forward_only) log_fn('Generating model') losses = [] device_grads = [] all_logits = [] all_top_1_ops = [] all_top_5_ops = [] enqueue_ops = [] gpu_copy_stage_ops = [] gpu_compute_stage_ops = [] gpu_grad_stage_ops = [] use_synthetic_gpu_images = (self.dataset is None) with tf.device(self.global_step_device): global_step = tf.contrib.framework.get_or_create_global_step() # Build the processing and model for the worker. with tf.device(self.cpu_device): nclass, images_splits, labels_splits = add_image_preprocessing( self.dataset, input_nchan, image_size, self.batch_size, len(self.devices), input_data_type, self.resize_method, not FLAGS.eval) update_ops = None staging_delta_ops = [] for device_num in range(len(self.devices)): with self.variable_mgr.create_outer_variable_scope( device_num), tf.name_scope('tower_%i' % device_num) as name_scope: results = self.add_forward_pass_and_gradients( images_splits[device_num], labels_splits[device_num], nclass, phase_train, device_num, input_data_type, data_type, input_nchan, use_synthetic_gpu_images, gpu_copy_stage_ops, gpu_compute_stage_ops, gpu_grad_stage_ops) if phase_train: losses.append(results[0]) device_grads.append(results[1]) else: all_logits.append(results[0]) all_top_1_ops.append(results[1]) all_top_5_ops.append(results[2]) if self.variable_mgr.retain_tower_updates(device_num): # Retain the Batch Normalization updates operations only from the # first tower. Ideally, we should grab the updates from all towers but # these stats accumulate extremely fast so we can ignore the other # stats from the other towers without significant detriment. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope) staging_delta_ops = list(self.variable_mgr.staging_delta_ops) if not update_ops: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope) enqueue_ops.append(tf.group(*gpu_copy_stage_ops)) if self.variable_mgr.supports_staged_vars(): for staging_ops in self.variable_mgr.staging_vars_on_devices: gpu_compute_stage_ops.extend( [put_op for _, (put_op, _) in six.iteritems(staging_ops)]) enqueue_ops.append(tf.group(*gpu_compute_stage_ops)) if gpu_grad_stage_ops: staging_delta_ops += gpu_grad_stage_ops if staging_delta_ops: enqueue_ops.append(tf.group(*(staging_delta_ops))) if not phase_train: if FLAGS.forward_only: all_logits = tf.concat(all_logits, 0) fetches = [all_logits] + enqueue_ops else: all_top_1_ops = tf.reduce_sum(all_top_1_ops) all_top_5_ops = tf.reduce_sum(all_top_5_ops) fetches = [all_top_1_ops, all_top_5_ops] + enqueue_ops return (enqueue_ops, fetches) extra_nccl_ops = [] apply_gradient_devices, gradient_state = ( self.variable_mgr.preprocess_device_grads(device_grads)) training_ops = [] for d, device in enumerate(apply_gradient_devices): with tf.device(device): total_loss = tf.reduce_mean(losses) avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state) gradient_clip = FLAGS.gradient_clip learning_rate = self.model_conf.get_learning_rate() if self.dataset and FLAGS.num_epochs_per_decay > 0: num_batches_per_epoch = ( self.dataset.num_examples_per_epoch() / self.batch_size) decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) # Decay the learning rate exponentially based on the number of steps. learning_rate = tf.train.exponential_decay( FLAGS.learning_rate, global_step, decay_steps, FLAGS.learning_rate_decay_factor, staircase=True) if gradient_clip is not None: clipped_grads = [ (tf.clip_by_value(grad, -gradient_clip, +gradient_clip), var) for grad, var in avg_grads ] else: clipped_grads = avg_grads if FLAGS.optimizer == 'momentum': opt = tf.train.MomentumOptimizer( learning_rate, FLAGS.momentum, use_nesterov=True) elif FLAGS.optimizer == 'sgd': opt = tf.train.GradientDescentOptimizer(learning_rate) elif FLAGS.optimizer == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate, FLAGS.rmsprop_decay, momentum=FLAGS.rmsprop_momentum, epsilon=FLAGS.rmsprop_epsilon) else: raise ValueError('Optimizer "%s" was not recognized', FLAGS.optimizer) self.variable_mgr.append_apply_gradients_ops( gradient_state, opt, clipped_grads, training_ops) train_op = tf.group(*(training_ops + update_ops + extra_nccl_ops)) with tf.device(self.cpu_device): if self.task_index == 0 and FLAGS.summary_verbosity > 0: tf.summary.scalar('learning_rate', learning_rate) tf.summary.scalar('total_loss', total_loss) for grad, var in avg_grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) fetches = [train_op, total_loss] + enqueue_ops return (enqueue_ops, fetches) def add_forward_pass_and_gradients( self, host_images, host_labels, nclass, phase_train, device_num, input_data_type, data_type, input_nchan, use_synthetic_gpu_images, gpu_copy_stage_ops, gpu_compute_stage_ops, gpu_grad_stage_ops): """Add ops for forward-pass and gradient computations.""" if not use_synthetic_gpu_images: with tf.device(self.cpu_device): images_shape = host_images.get_shape() labels_shape = host_labels.get_shape() gpu_copy_stage = data_flow_ops.StagingArea( [tf.float32, tf.int32], shapes=[images_shape, labels_shape]) gpu_copy_stage_op = gpu_copy_stage.put( [host_images, host_labels]) gpu_copy_stage_ops.append(gpu_copy_stage_op) host_images, host_labels = gpu_copy_stage.get() with tf.device(self.raw_devices[device_num]): if not use_synthetic_gpu_images: gpu_compute_stage = data_flow_ops.StagingArea( [tf.float32, tf.int32], shapes=[images_shape, labels_shape] ) # The CPU-to-GPU copy is triggered here. gpu_compute_stage_op = gpu_compute_stage.put( [host_images, host_labels]) images, labels = gpu_compute_stage.get() images = tf.reshape(images, shape=images_shape) gpu_compute_stage_ops.append(gpu_compute_stage_op) else: # Minor hack to avoid H2D copy when using synthetic data images = tf.truncated_normal( host_images.get_shape(), dtype=input_data_type, stddev=1e-1, name='synthetic_images') images = tf.contrib.framework.local_variable( images, name='gpu_cached_images') labels = host_labels with tf.device(self.devices[device_num]): # Rescale to [0, 1) images *= 1. / 256 # Rescale to [-1,1] instead of [0, 1) images = tf.subtract(images, 0.5) images = tf.multiply(images, 2.0) if self.data_format == 'NCHW': images = tf.transpose(images, [0, 3, 1, 2]) if input_data_type != data_type: images = tf.cast(images, data_type) network = ConvNetBuilder( images, input_nchan, phase_train, self.data_format, data_type) self.model_conf.add_inference(network) # Add the final fully-connected class layer logits = network.affine(nclass, activation='linear') if not phase_train: top_1_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 1), data_type)) top_5_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 5), data_type)) return (logits, top_1_op, top_5_op) loss = loss_function(logits, labels) params = self.variable_mgr.trainable_variables_on_device(device_num) l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params]) weight_decay = FLAGS.weight_decay if weight_decay is not None and weight_decay != 0.: loss += weight_decay * l2_loss aggmeth = tf.AggregationMethod.DEFAULT grads = tf.gradients(loss, params, aggregation_method=aggmeth) if FLAGS.staged_vars: grad_dtypes = [grad.dtype for grad in grads] grad_shapes = [grad.shape for grad in grads] grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes) grad_stage_op = grad_stage.put(grads) # In general, this decouples the computation of the gradients and # the updates of the weights. # During the pipeline warm up, this runs enough training to produce # the first set of gradients. gpu_grad_stage_ops.append(grad_stage_op) grads = grad_stage.get() param_refs = self.variable_mgr.trainable_variables_on_device( device_num, writable=True) gradvars = list(zip(grads, param_refs)) return (loss, gradvars) def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list): """Adds ops to enqueue on all worker queues. Args: name_prefix: prefixed for the shared_name of ops. enqueue_after_list: control dependency from ops. Returns: an op that should be used as control dependency before starting next step. """ self.sync_queue_counter += 1 num_workers = self.cluster.num_tasks('worker') with tf.device(self.sync_queue_devices[ self.sync_queue_counter % len(self.sync_queue_devices)]): sync_queues = [ tf.FIFOQueue(num_workers, [tf.bool], shapes=[[]], shared_name='%s%s' % (name_prefix, i)) for i in range(num_workers)] queue_ops = [] # For each other worker, add an entry in a queue, signaling that it can # finish this step. token = tf.constant(False) with tf.control_dependencies(enqueue_after_list): for i, q in enumerate(sync_queues): if i == self.task_index: queue_ops.append(tf.no_op()) else: queue_ops.append(q.enqueue(token)) # Drain tokens off queue for this worker, one for each other worker. queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return tf.group(*queue_ops) def store_benchmarks(names_to_values): if FLAGS.result_storage: benchmark_storage.store_benchmark(names_to_values, FLAGS.result_storage) def main(_): if FLAGS.winograd_nonfused: os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' else: os.environ.pop('TF_ENABLE_WINOGRAD_NONFUSED', None) if FLAGS.autotune_threshold: os.environ['TF_AUTOTUNE_THRESHOLD'] = str(FLAGS.autotune_threshold) os.environ['TF_SYNC_ON_FINISH'] = str(int(FLAGS.sync_on_finish)) argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) bench = BenchmarkCNN() tfversion = cnn_util.tensorflow_version_tuple() log_fn('TensorFlow: %i.%i' % (tfversion[0], tfversion[1])) bench.print_info() bench.run() if __name__ == '__main__': tf.app.run()
[ "tensorflow.device", "tensorflow.concat", "numpy.sqrt", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "tensorflow.RunMetadata", "tensorflow.global_variables", "tensorflow.cast", "tensorflow.python.layers.convolutional.conv2d", "tensorflow.python.ops.data_flow_ops.StagingArea", "tensorflow.python.platform.gfile.Exists", "tensorflow.nn.l2_loss", "numpy.mean", "tensorflow.pad", "tensorflow.flags.DEFINE_float", "tensorflow.get_default_graph", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.python.client.timeline.Timeline", "tensorflow.Graph", "tensorflow.get_collection", "tensorflow.gradients", "tensorflow.contrib.framework.local_variable", "tensorflow.ConfigProto", "tensorflow.subtract", "numpy.std", "tensorflow.python.platform.gfile.MakeDirs", "tensorflow.train.exponential_decay", "tensorflow.name_scope", "tensorflow.python.layers.pooling.max_pooling2d", "tensorflow.train.MomentumOptimizer", "tensorflow.trainable_variables", "tensorflow.Summary", "tensorflow.nn.in_top_k", "tensorflow.train.write_graph", "tensorflow.app.run", "tensorflow.flags.DEFINE_boolean", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.FIFOQueue", "tensorflow.python.layers.pooling.average_pooling2d", "tensorflow.train.RMSPropOptimizer", "tensorflow.RunOptions", "numpy.median", "tensorflow.clip_by_value", "tensorflow.python.layers.core.dropout", "tensorflow.nn.tanh", "tensorflow.train.GradientDescentOptimizer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.summary.merge_all", "tensorflow.no_op", "tensorflow.set_random_seed", "tensorflow.contrib.layers.batch_norm", "tensorflow.split", "numpy.array", "tensorflow.flags.DEFINE_integer", "tensorflow.summary.histogram", "tensorflow.train.get_checkpoint_state", "tensorflow.nn.relu", "tensorflow.nn.bias_add", "tensorflow.multiply", "tensorflow.local_variables_initializer", "tensorflow.constant", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.transpose", "tensorflow.reshape", "tensorflow.contrib.framework.get_global_step", "tensorflow.train.ClusterSpec", "tensorflow.train.replica_device_setter", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.contrib.framework.get_or_create_global_step", "tensorflow.random_uniform" ]
scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py
[(48, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""model"""', '"""trivial"""', '"""name of the model to run"""'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""eval"""', '(False)', '"""whether use eval or benchmarking"""'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""forward_only"""', '(False)', '"""whether use forward-only or\n training for benchmarking"""'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(0)', '"""batch size per compute device"""'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_batches"""', '(100)', '"""number of batches to run, excluding warmup"""'], {}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_warmup_batches"""', 'None', '"""number of batches to run before timing"""'], {}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""autotune_threshold"""', 'None', '"""The autotune threshold for the models"""'], {}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_gpus"""', '(1)', '"""the number of GPUs to run on"""'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""display_every"""', '(10)', '"""Number of local steps after which progress is printed\n out"""'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""data_dir"""', 'None', '"""Path to dataset in TFRecord format\n (aka Example protobufs). If not specified,\n synthetic data will be used."""'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""data_name"""', 'None', '"""Name of dataset: imagenet or flowers.\n If not specified, it is automatically guessed\n based on --data_dir."""'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""resize_method"""', '"""bilinear"""', '"""Method for resizing input images:\n crop,nearest,bilinear,bicubic or area.\n The \'crop\' mode requires source images to be at least\n as large as the network input size,\n while the other modes support any sizes and apply\n random bbox distortions\n before resizing (even with --nodistortions)."""'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""distortions"""', '(True)', '"""Enable/disable distortions during\n image preprocessing. These include bbox and color\n distortions."""'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""local_parameter_device"""', '"""gpu"""', '"""Device to use as parameter server: cpu or gpu.\n For distributed training, it can affect where caching\n of variables happens."""'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""device"""', '"""gpu"""', '"""Device to use for computation: cpu or gpu"""'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""data_format"""', '"""NHWC"""', '"""Data layout to use: NHWC (TF native)\n or NCHW (cuDNN native)."""'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_intra_threads"""', '(1)', '"""Number of threads to use for intra-op\n parallelism. If set to 0, the system will pick\n an appropriate number."""'], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_inter_threads"""', '(0)', '"""Number of threads to use for inter-op\n parallelism. If set to 0, the system will pick\n an appropriate number."""'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""trace_file"""', 'None', '"""Enable TensorFlow tracing and write trace to\n this file."""'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""graph_file"""', 'None', '"""Write the model\'s graph definition to this\n file. Defaults to binary format unless filename ends\n in \'txt\'."""'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""optimizer"""', '"""sgd"""', '"""Optimizer to use: momentum or sgd or rmsprop"""'], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""learning_rate"""', 'None', '"""Initial learning rate for training."""'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""num_epochs_per_decay"""', '(0)', '"""Steps after which learning rate decays."""'], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""learning_rate_decay_factor"""', '(0.94)', '"""Learning rate decay factor."""'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""momentum"""', '(0.9)', '"""Momentum for training."""'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""rmsprop_decay"""', '(0.9)', '"""Decay term for RMSProp."""'], {}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""rmsprop_momentum"""', '(0.9)', '"""Momentum in RMSProp."""'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""rmsprop_epsilon"""', '(1.0)', '"""Epsilon term for RMSProp."""'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""gradient_clip"""', 'None', '"""Gradient clipping magnitude.\n Disabled by default."""'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""weight_decay"""', '(4e-05)', '"""Weight decay factor for training."""'], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""winograd_nonfused"""', '(True)', '"""Enable/disable using the Winograd non-fused\n algorithms."""'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""sync_on_finish"""', '(False)', '"""Enable/disable whether the devices are synced after\n each step."""'], {}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""staged_vars"""', '(False)', '"""whether the variables are staged from the main\n computation"""'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""force_gpu_compatible"""', '(True)', '"""whether to enable force_gpu_compatible in\n GPU_Options"""'], {}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""variable_update"""', '"""parameter_server"""', '"""The method for managing variables: parameter_server, replicated, distributed_replicated, independent"""'], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""use_nccl"""', '(True)', '"""Whether to use nccl all-reduce primitives where possible"""'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""job_name"""', '""""""', '"""One of "ps", "worker", "". Empty for local training"""'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""ps_hosts"""', '""""""', '"""Comma-separated list of target hosts"""'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""worker_hosts"""', '""""""', '"""Comma-separated list of target hosts"""'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""task_index"""', '(0)', '"""Index of task within the job"""'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""server_protocol"""', '"""grpc"""', '"""protocol for servers"""'], {}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""cross_replica_sync"""', '(True)', '""""""'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""summary_verbosity"""', '(0)', '"""Verbosity level for summary ops. Pass 0 to disable\n both summaries and checkpoints."""'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""save_summaries_steps"""', '(0)', '"""How often to save summaries for trained models.\n Pass 0 to disable summaries."""'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""save_model_secs"""', '(0)', '"""How often to save trained models. Pass 0 to disable\n checkpoints"""'], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""train_dir"""', 'None', '"""Path to session checkpoints."""'], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""eval_dir"""', '"""/tmp/tf_cnn_benchmarks/eval"""', '"""Directory where to write eval event logs."""'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""pretrain_dir"""', 'None', '"""Path to pretrained session checkpoints."""'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""result_storage"""', 'None', '"""Specifies storage option for benchmark results.\n None means results won\'t be stored.\n \'cbuild_benchmark_datastore\' means results will be stored\n in cbuild datastore (note: this option requires special\n pemissions and meant to be used from cbuilds)."""'], {}), True, 'import tensorflow as tf\n'), (575, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'labels', 'name': '"""xentropy"""'}), True, 'import tensorflow as tf\n'), (577, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""xentropy_mean"""'}), True, 'import tensorflow as tf\n'), (631, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (661, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (688, 'numpy.array', 'np.array', (['step_train_times'], {}), True, 'import numpy as np\n'), (702, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['ckpt_dir'], {}), True, 'import tensorflow as tf\n'), (1334, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), False, 'import argparse\n'), (1340, 'cnn_util.tensorflow_version_tuple', 'cnn_util.tensorflow_version_tuple', ([], {}), False, 'import cnn_util\n'), (1348, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (219, 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), False, 'import threading\n'), (269, 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), False, 'from collections import defaultdict\n'), (371, 'tensorflow.python.layers.pooling.max_pooling2d', 'pooling_layers.max_pooling2d', (['input_layer', '[k_height, k_width]', '[d_height, d_width]'], {'padding': 'mode', 'data_format': 'self.channel_pos', 'name': 'name'}), True, 'from tensorflow.python.layers import pooling as pooling_layers\n'), (394, 'tensorflow.python.layers.pooling.average_pooling2d', 'pooling_layers.average_pooling2d', (['input_layer', '[k_height, k_width]', '[d_height, d_width]'], {'padding': 'mode', 'data_format': 'self.channel_pos', 'name': 'name'}), True, 'from tensorflow.python.layers import pooling as pooling_layers\n'), (405, 'tensorflow.reshape', 'tf.reshape', (['input_layer', 'shape'], {}), True, 'import tensorflow as tf\n'), (532, 'tensorflow.nn.relu', 'tf.nn.relu', (['(inlayer + scale * self.top_layer)'], {}), True, 'import tensorflow as tf\n'), (538, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.top_layer', 'axes'], {'keep_dims': 'keep_dims', 'name': 'name'}), True, 'import tensorflow as tf\n'), (586, 'preprocessing.ImagePreprocessor', 'preprocessing.ImagePreprocessor', (['image_size', 'image_size', 'batch_size', 'num_compute_devices', 'input_data_type'], {'train': 'train', 'resize_method': 'resize_method'}), False, 'import preprocessing\n'), (603, 'tensorflow.truncated_normal', 'tf.truncated_normal', (['input_shape'], {'dtype': 'input_data_type', 'stddev': '(0.1)', 'name': '"""synthetic_images"""'}), True, 'import tensorflow as tf\n'), (608, 'tensorflow.random_uniform', 'tf.random_uniform', (['[batch_size]'], {'minval': '(1)', 'maxval': 'nclass', 'dtype': 'tf.int32', 'name': '"""synthetic_labels"""'}), True, 'import tensorflow as tf\n'), (617, 'tensorflow.contrib.framework.local_variable', 'tf.contrib.framework.local_variable', (['images'], {'name': '"""images"""'}), True, 'import tensorflow as tf\n'), (618, 'tensorflow.contrib.framework.local_variable', 'tf.contrib.framework.local_variable', (['labels'], {'name': '"""labels"""'}), True, 'import tensorflow as tf\n'), (655, 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), True, 'import tensorflow as tf\n'), (673, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (681, 'tensorflow.python.client.timeline.Timeline', 'timeline.Timeline', ([], {'step_stats': 'run_metadata.step_stats'}), False, 'from tensorflow.python.client import timeline\n'), (690, 'numpy.mean', 'np.mean', (['times'], {}), True, 'import numpy as np\n'), (704, 'os.path.isabs', 'os.path.isabs', (['ckpt.model_checkpoint_path'], {}), False, 'import os\n'), (730, 'model_config.get_model_config', 'model_config.get_model_config', (['self.model'], {}), False, 'import model_config\n'), (928, 'tensorflow.group', 'tf.group', (['*fetches'], {}), True, 'import tensorflow as tf\n'), (934, 'tensorflow.contrib.framework.get_global_step', 'tf.contrib.framework.get_global_step', ([], {}), True, 'import tensorflow as tf\n'), (951, 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (952, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (1061, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), True, 'import tensorflow as tf\n'), (1062, 'numpy.random.seed', 'np.random.seed', (['(4321)'], {}), True, 'import numpy as np\n'), (1181, 'tensorflow.group', 'tf.group', (['*(training_ops + update_ops + extra_nccl_ops)'], {}), True, 'import tensorflow as tf\n'), (1323, 'benchmark_storage.store_benchmark', 'benchmark_storage.store_benchmark', (['names_to_values', 'FLAGS.result_storage'], {}), False, 'import benchmark_storage\n'), (1330, 'os.environ.pop', 'os.environ.pop', (['"""TF_ENABLE_WINOGRAD_NONFUSED"""', 'None'], {}), False, 'import os\n'), (232, 'time.sleep', 'time.sleep', (['(0.25)'], {}), False, 'import time\n'), (292, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (420, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (453, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (485, 'tensorflow.nn.relu', 'tf.nn.relu', (['(shortcut + res)'], {}), True, 'import tensorflow as tf\n'), (497, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (524, 'tensorflow.concat', 'tf.concat', (['[layers[-1] for layers in col_layers]', 'catdim'], {}), True, 'import tensorflow as tf\n'), (548, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (551, 'tensorflow.python.layers.core.dropout', 'core_layers.dropout', (['input_layer', 'keep_prob'], {}), True, 'from tensorflow.python.layers import core as core_layers\n'), (564, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (565, 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['input_layer'], {'is_training': 'self.phase_train', 'fused': '(True)', 'data_format': 'self.data_format', 'scope': 'scope'}), True, 'import tensorflow as tf\n'), (625, 'tensorflow.split', 'tf.split', (['images', 'num_compute_devices', '(0)'], {}), True, 'import tensorflow as tf\n'), (626, 'tensorflow.split', 'tf.split', (['labels', 'num_compute_devices', '(0)'], {}), True, 'import tensorflow as tf\n'), (692, 'numpy.std', 'np.std', (['speeds'], {}), True, 'import numpy as np\n'), (709, 'os.path.join', 'os.path.join', (['ckpt_dir', 'ckpt.model_checkpoint_path'], {}), False, 'import os\n'), (781, 'tensorflow.train.ClusterSpec', 'tf.train.ClusterSpec', (["{'ps': self.ps_hosts, 'worker': self.worker_hosts}"], {}), True, 'import tensorflow as tf\n'), (791, 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {'worker_device': "(worker_prefix + '/cpu:0')", 'cluster': 'self.cluster'}), True, 'import tensorflow as tf\n'), (892, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (894, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (903, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (907, 'six.moves.xrange', 'xrange', (['self.num_batches'], {}), False, 'from six.moves import xrange\n'), (918, 'tensorflow.Summary', 'tf.Summary', ([], {}), True, 'import tensorflow as tf\n'), (935, 'tensorflow.device', 'tf.device', (['self.global_step_device'], {}), True, 'import tensorflow as tf\n'), (947, 'tensorflow.group', 'tf.group', (['*variable_mgr_post_init_ops'], {}), True, 'import tensorflow as tf\n'), (1078, 'tensorflow.device', 'tf.device', (['self.global_step_device'], {}), True, 'import tensorflow as tf\n'), (1079, 'tensorflow.contrib.framework.get_or_create_global_step', 'tf.contrib.framework.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (1082, 'tensorflow.device', 'tf.device', (['self.cpu_device'], {}), True, 'import tensorflow as tf\n'), (1116, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'name_scope'], {}), True, 'import tensorflow as tf\n'), (1117, 'tensorflow.group', 'tf.group', (['*gpu_copy_stage_ops'], {}), True, 'import tensorflow as tf\n'), (1122, 'tensorflow.group', 'tf.group', (['*gpu_compute_stage_ops'], {}), True, 'import tensorflow as tf\n'), (1183, 'tensorflow.device', 'tf.device', (['self.cpu_device'], {}), True, 'import tensorflow as tf\n'), (1212, 'tensorflow.device', 'tf.device', (['self.raw_devices[device_num]'], {}), True, 'import tensorflow as tf\n'), (1235, 'tensorflow.device', 'tf.device', (['self.devices[device_num]'], {}), True, 'import tensorflow as tf\n'), (1239, 'tensorflow.subtract', 'tf.subtract', (['images', '(0.5)'], {}), True, 'import tensorflow as tf\n'), (1240, 'tensorflow.multiply', 'tf.multiply', (['images', '(2.0)'], {}), True, 'import tensorflow as tf\n'), (1265, 'tensorflow.gradients', 'tf.gradients', (['loss', 'params'], {'aggregation_method': 'aggmeth'}), True, 'import tensorflow as tf\n'), (1306, 'tensorflow.constant', 'tf.constant', (['(False)'], {}), True, 'import tensorflow as tf\n'), (1318, 'tensorflow.group', 'tf.group', (['*queue_ops'], {}), True, 'import tensorflow as tf\n'), (237, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (242, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (297, 'tensorflow.python.layers.convolutional.conv2d', 'conv_layers.conv2d', (['input_layer', 'num_out_channels', '[k_height, k_width]'], {'strides': '[d_height, d_width]', 'padding': 'mode', 'data_format': 'self.channel_pos', 'use_bias': '(False)'}), True, 'from tensorflow.python.layers import convolutional as conv_layers\n'), (345, 'tensorflow.nn.relu', 'tf.nn.relu', (['biased'], {}), True, 'import tensorflow as tf\n'), (429, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.matmul', 'tf.matmul', (['input_layer', 'kernel'], {}), True, 'import tensorflow as tf\n'), (432, 'tensorflow.nn.relu', 'tf.nn.relu', (['logits'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (772, 'datasets.ImagenetData', 'datasets.ImagenetData', (['FLAGS.data_dir'], {}), False, 'import datasets\n'), (812, 'six.moves.xrange', 'xrange', (['FLAGS.num_gpus'], {}), False, 'from six.moves import xrange\n'), (836, 'variable_mgr.VariableMgrLocalReplicated', 'variable_mgr.VariableMgrLocalReplicated', (['self', 'FLAGS.use_nccl'], {}), False, 'import variable_mgr\n'), (936, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[main_fetch_group]'], {}), True, 'import tensorflow as tf\n'), (959, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (996, 'os.path.split', 'os.path.split', (['self.graph_file'], {}), False, 'import os\n'), (1000, 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', 'path', 'filename', 'as_text'], {}), True, 'import tensorflow as tf\n'), (1035, 'time.sleep', 'time.sleep', (['(0.25)'], {}), False, 'import time\n'), (1044, 'os.path.join', 'os.path.join', (['FLAGS.train_dir', '"""model.ckpt"""'], {}), False, 'import os\n'), (1093, 'tensorflow.name_scope', 'tf.name_scope', (["('tower_%i' % device_num)"], {}), True, 'import tensorflow as tf\n'), (1126, 'tensorflow.group', 'tf.group', (['*staging_delta_ops'], {}), True, 'import tensorflow as tf\n'), (1130, 'tensorflow.concat', 'tf.concat', (['all_logits', '(0)'], {}), True, 'import tensorflow as tf\n'), (1133, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['all_top_1_ops'], {}), True, 'import tensorflow as tf\n'), (1134, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['all_top_5_ops'], {}), True, 'import tensorflow as tf\n'), (1143, 'tensorflow.device', 'tf.device', (['device'], {}), True, 'import tensorflow as tf\n'), (1144, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), True, 'import tensorflow as tf\n'), (1185, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'learning_rate'], {}), True, 'import tensorflow as tf\n'), (1186, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'total_loss'], {}), True, 'import tensorflow as tf\n'), (1190, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (1201, 'tensorflow.device', 'tf.device', (['self.cpu_device'], {}), True, 'import tensorflow as tf\n'), (1204, 'tensorflow.python.ops.data_flow_ops.StagingArea', 'data_flow_ops.StagingArea', (['[tf.float32, tf.int32]'], {'shapes': '[images_shape, labels_shape]'}), False, 'from tensorflow.python.ops import data_flow_ops\n'), (1214, 'tensorflow.python.ops.data_flow_ops.StagingArea', 'data_flow_ops.StagingArea', (['[tf.float32, tf.int32]'], {'shapes': '[images_shape, labels_shape]'}), False, 'from tensorflow.python.ops import data_flow_ops\n'), (1222, 'tensorflow.reshape', 'tf.reshape', (['images'], {'shape': 'images_shape'}), True, 'import tensorflow as tf\n'), (1231, 'tensorflow.contrib.framework.local_variable', 'tf.contrib.framework.local_variable', (['images'], {'name': '"""gpu_cached_images"""'}), True, 'import tensorflow as tf\n'), (1243, 'tensorflow.transpose', 'tf.transpose', (['images', '[0, 3, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (1245, 'tensorflow.cast', 'tf.cast', (['images', 'data_type'], {}), True, 'import tensorflow as tf\n'), (1270, 'tensorflow.python.ops.data_flow_ops.StagingArea', 'data_flow_ops.StagingArea', (['grad_dtypes', 'grad_shapes'], {}), False, 'from tensorflow.python.ops import data_flow_ops\n'), (1300, 'tensorflow.FIFOQueue', 'tf.FIFOQueue', (['num_workers', '[tf.bool]'], {'shapes': '[[]]', 'shared_name': "('%s%s' % (name_prefix, i))"}), True, 'import tensorflow as tf\n'), (1307, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['enqueue_after_list'], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.python.layers.convolutional.conv2d', 'conv_layers.conv2d', (['input_layer', 'num_out_channels', '[k_height, k_width]'], {'strides': '[d_height, d_width]', 'padding': '"""SAME"""', 'data_format': 'self.channel_pos', 'use_bias': '(False)'}), True, 'from tensorflow.python.layers import convolutional as conv_layers\n'), (322, 'tensorflow.pad', 'tf.pad', (['input_layer', 'padding'], {}), True, 'import tensorflow as tf\n'), (323, 'tensorflow.python.layers.convolutional.conv2d', 'conv_layers.conv2d', (['input_layer', 'num_out_channels', '[k_height, k_width]'], {'strides': '[d_height, d_width]', 'padding': '"""VALID"""', 'data_format': 'self.channel_pos', 'use_bias': '(False)'}), True, 'from tensorflow.python.layers import convolutional as conv_layers\n'), (335, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'biases'], {'data_format': 'self.data_format'}), True, 'import tensorflow as tf\n'), (774, 'datasets.FlowersData', 'datasets.FlowersData', (['FLAGS.data_dir'], {}), False, 'import datasets\n'), (821, 'variable_mgr.VariableMgrDistributedFetchFromPS', 'variable_mgr.VariableMgrDistributedFetchFromPS', (['self'], {}), False, 'import variable_mgr\n'), (825, 'variable_mgr.VariableMgrDistributedFetchFromStagedPS', 'variable_mgr.VariableMgrDistributedFetchFromStagedPS', (['self'], {}), False, 'import variable_mgr\n'), (828, 'variable_mgr.VariableMgrLocalFetchFromPS', 'variable_mgr.VariableMgrLocalFetchFromPS', (['self'], {}), False, 'import variable_mgr\n'), (830, 'variable_mgr.VariableMgrLocalFetchFromStagedPS', 'variable_mgr.VariableMgrLocalFetchFromStagedPS', (['self'], {}), False, 'import variable_mgr\n'), (842, 'variable_mgr.VariableMgrDistributedReplicated', 'variable_mgr.VariableMgrDistributedReplicated', (['self'], {}), False, 'import variable_mgr\n'), (883, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (915, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (968, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (1045, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['FLAGS.train_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (1046, 'tensorflow.python.platform.gfile.MakeDirs', 'gfile.MakeDirs', (['FLAGS.train_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (1112, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'name_scope'], {}), True, 'import tensorflow as tf\n'), (1155, 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['FLAGS.learning_rate', 'global_step', 'decay_steps', 'FLAGS.learning_rate_decay_factor'], {'staircase': '(True)'}), True, 'import tensorflow as tf\n'), (1168, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate', 'FLAGS.momentum'], {'use_nesterov': '(True)'}), True, 'import tensorflow as tf\n'), (1191, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['var.op.name', 'var'], {}), True, 'import tensorflow as tf\n'), (1259, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.nn.tanh', 'tf.nn.tanh', (['biased'], {}), True, 'import tensorflow as tf\n'), (425, 'numpy.sqrt', 'np.sqrt', (['(init_factor / num_channels_in)'], {}), True, 'import numpy as np\n'), (693, 'numpy.median', 'np.median', (['speeds'], {}), True, 'import numpy as np\n'), (847, 'variable_mgr.VariableMgrIndependent', 'variable_mgr.VariableMgrIndependent', (['self'], {}), False, 'import variable_mgr\n'), (912, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (1171, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (1189, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["(var.op.name + '/gradients')", 'grad'], {}), True, 'import tensorflow as tf\n'), (1253, 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (1255, 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'labels', '(5)'], {}), True, 'import tensorflow as tf\n'), (236, 'time.ctime', 'time.ctime', ([], {}), False, 'import time\n'), (241, 'time.ctime', 'time.ctime', ([], {}), False, 'import time\n'), (1121, 'six.iteritems', 'six.iteritems', (['staging_ops'], {}), False, 'import six\n'), (1161, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-gradient_clip)', '(+gradient_clip)'], {}), True, 'import tensorflow as tf\n'), (1173, 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate', 'FLAGS.rmsprop_decay'], {'momentum': 'FLAGS.rmsprop_momentum', 'epsilon': 'FLAGS.rmsprop_epsilon'}), True, 'import tensorflow as tf\n'), (1310, 'tensorflow.no_op', 'tf.no_op', ([], {}), True, 'import tensorflow as tf\n')]
Bibbidi-Babbidi-Boo/Domain-Decomposition
5c33a84929e084cf458974f95310f997c6c5b1ec
import numpy as np import tensorflow as tf from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D ,Flatten import numpy as np from tensorflow.nn.rnn_cell import BasicLSTMCell #parameters for training GRAD_CLIP = 32. KEEP_PROB1 = 1 # was 0.5 KEEP_PROB2 = 1 # was 0.7 RNN_SIZE = 512 # GOAL_SIZE = 2 loc_layer_size = 2 # glimpse_size1 = 11 # glimpse_size2 = 22 # glimpse_size3 = 32 ''' CHANGES - changed num_channels = 1 ''' num_channels = 3 # fov_size = 3 # loc_std = 0.8 #Used to initialize weights for policy and value output layers (Do we need to use that? Maybe not now) def normalized_columns_initializer(std=1.0): def _initializer(shape, dtype=None, partition_info=None): out = np.random.randn(*shape).astype(np.float32) out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) return tf.constant(out) return _initializer ''' G(x) = 1 {-(1/2)*[(x-u)/sigma]^2} ------------------- e sigma*(2*pi)^(1/2) ''' def gaussian_pdf(mean, loc_std, sample): Z = 1.0 / (loc_std * tf.sqrt(2.0 * np.pi)) a = - tf.square(sample - mean) / (2.0 * tf.square(loc_std)) return Z * tf.exp(a) class ACNet: def __init__(self, scope, GRID_SIZE, a_size, trainer,TRAINING, GLOBAL_NET_SCOPE): with tf.variable_scope(str(scope)+'/qvalues'): #The input size may require more work to fit the interface. self.inputs = tf.placeholder(shape=[None,GRID_SIZE,GRID_SIZE, num_channels], dtype=tf.float32) # input state # self.goal_pos = tf.placeholder(shape=[None,2],dtype=tf.float32) self.prev_loc = tf.placeholder(shape=[None,2], dtype=tf.float32) # self.policy, self.next_loc, self.value, self.state_out, self.state_in, self.state_init, self.valids, self.blocking, self.mypos, self.goalpos, self.next_loc_mean = self._build_net(self.inputs, self.inputs_primal, self.prev_loc, RNN_SIZE, TRAINING,a_size) ''' CHANGES - removed target_blocking, blocking layers, blocking_loss - removed imitation gradients and losss - removed valid_loss - removed train_valid - commented out policy loss (since, discrete) - next_loc_loss is now new policy loss - responsible_next_loc is NOW policy ''' self.value, self.next_loc_mean, self.loc_std, self.next_loc, self.state_out, self.state_in, self.state_init = self._build_net(self.inputs, self.prev_loc, RNN_SIZE, TRAINING, a_size) # self.goal_pos if TRAINING: self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget') self.advantages = tf.placeholder(shape=[None], dtype=tf.float32) self.sampled_next_locs = tf.placeholder(tf.float32, [None,2]) # sampled action is stored here self.policy = gaussian_pdf(self.next_loc_mean, self.loc_std, self.sampled_next_locs) # Distribution == Policy # Loss Functions self.value_loss = 0.5*tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, shape=[-1]))) # H(x) = Sum[p(x)*log(p(x))] self.entropy = - 0.01 * tf.reduce_sum(self.policy * tf.log(tf.clip_by_value(self.policy,1e-10,1.0))) self.policy_loss = - 0.2 * tf.reduce_sum( tf.log(tf.clip_by_value(self.policy[:,0],1e-15,1.0)) * self.advantages + tf.log(tf.clip_by_value(self.policy[:,1],1e-15,1.0)) * self.advantages) #For Normal RL Part self.loss = self.value_loss + self.policy_loss - self.entropy # removed self.blocking_loss, valid_loss, discrete_policy _loss #+ 0.5*self.mypos_loss + 0.5*self.goalpos_loss #For Imitation Learning Part # self.bc_loss = 0.5 * tf.reduce_mean(tf.contrib.keras.backend.categorical_crossentropy(self.optimal_actions_onehot,self.policy)) # self.next_loc_loss_il = 0.2 * tf.reduce_sum(tf.sqrt(tf.square(self.next_loc_mean[:-1,:] - self.il_nextloc))) # self.imitation_loss = self.bc_loss #+ self.next_loc_loss_il # Get gradients from local network using local losses and # normalize the gradients using clipping local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope+'/qvalues') self.gradients = tf.gradients(self.loss, local_vars) self.var_norms = tf.global_norm(local_vars) grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP) # Apply local gradients to global network global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE+'/qvalues') self.apply_grads = trainer.apply_gradients(zip(grads, global_vars)) #now the gradients for imitation loss # self.i_gradients = tf.gradients(self.imitation_loss, local_vars) # self.i_var_norms = tf.global_norm(local_vars) # i_grads, self.i_grad_norms = tf.clip_by_global_norm(self.i_gradients, GRAD_CLIP) # Apply local gradients to global network # self.apply_imitation_grads = trainer.apply_gradients(zip(i_grads, global_vars)) print("Hello World... From "+str(scope)) # :) def _build_net(self, inputs, prev_loc, RNN_SIZE, TRAINING, a_size): # goal_pos ''' CHANGES - Added one more block consisting of 3 conv layers and 1 max pool layer - kernel size was changed (3,3) -> (8,8), strides from 1 to 4, to get 1 x 1 in last layer - removed policy layers ''' w_init = tf.contrib.layers.variance_scaling_initializer() # glimpse1 = tf.image.extract_glimpse(inputs, [glimpse_size1,glimpse_size1], self.prev_loc, centered=True, normalized=True) # glimpse2 = tf.image.extract_glimpse(inputs, [glimpse_size2,glimpse_size2], self.prev_loc, centered=True, normalized=True) # glimpse2 = tf.image.resize(glimpse2, [glimpse_size1,glimpse_size1]) # glimpse3 = tf.image.extract_glimpse(inputs, [glimpse_size3,glimpse_size3], self.prev_loc, centered=True, normalized=True) # glimpse3 = tf.image.resize(glimpse3, [glimpse_size1,glimpse_size1]) # self.glimpses = tf.concat([glimpse1,glimpse2,glimpse3],axis=-1) # Block 1 conv1a = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8], strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs) conv1b = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a) conv1c = Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b) pool1 = MaxPool2D(pool_size=[2,2])(conv1c) # Block 2 conv2a = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1) conv2b = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2a) conv2c = Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b) pool2 = MaxPool2D(pool_size=[2,2])(conv2c) # Block 3 conv3a = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2) conv3b = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a) conv3c = Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b) pool3 = MaxPool2D(pool_size=[2,2])(conv3c) # final convolutional layer #removed GOAL_SIZE conv4 = Conv2D(padding="valid", filters=RNN_SIZE-loc_layer_size, kernel_size=[2, 2], strides=1, data_format='channels_last', kernel_initializer=w_init,activation=None)(pool3) # FC layers flat1a = Flatten(data_format='channels_last')(conv4) #removed GOAL_SIZE flat1b = Dense(units=RNN_SIZE-loc_layer_size)(flat1a) # FC layers for goal_pos input # goal_layer1 = Dense(units=GOAL_SIZE)(goal_pos) # goal_layer2 = Dense(units=GOAL_SIZE)(goal_layer1) # FC layers to find next location loc_layer1 = Dense(units=loc_layer_size)(prev_loc) loc_layer2 = Dense(units=loc_layer_size)(loc_layer1) # Concatenationation of above layers, followed by FC layer concat = tf.concat([flat1b, loc_layer2],1) # goal_layer2 h1 = Dense(units=RNN_SIZE)(concat) h2 = Dense(units=RNN_SIZE)(h1) self.h3 = tf.nn.relu(h2+concat) #Recurrent network for temporal dependencies lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE,state_is_tuple=True) c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in) rnn_in = tf.expand_dims(self.h3, [0]) step_size = tf.shape(inputs)[:1] state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in) lstm_outputs, lstm_state = tf.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False) lstm_c, lstm_h = lstm_state state_out = (lstm_c[:1, :], lstm_h[:1, :]) self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE]) ''' CHANGES - removed blocking layer - edited out stop_gradient lines (Dont need them) ''' # Value FC value = Dense(units=1, kernel_initializer=normalized_columns_initializer(1.0), bias_initializer=None, activation=None)(inputs=self.rnn_out) # rnn_out_frozen = tf.stop_gradient(self.rnn_out) next_loc_mean = Dense(units=2, kernel_initializer=normalized_columns_initializer(1.0), bias_initializer=None, activation=tf.math.tanh)(inputs=self.rnn_out) # was rnn_out_frozen loc_std = Dense(units=1, kernel_initializer=normalized_columns_initializer(1.0), activation=tf.nn.softplus)(inputs = self.rnn_out) # Policy FC next_loc = tf.clip_by_value(next_loc_mean + tf.random_normal([1,2], 0, loc_std), -1, 1) # next_loc = tf.stop_gradient(next_loc) return value, next_loc_mean, loc_std, next_loc, state_out, state_in, state_init
[ "tensorflow.nn.dynamic_rnn", "tensorflow.concat", "tensorflow.nn.rnn_cell.LSTMStateTuple", "numpy.random.randn", "numpy.square", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.get_collection", "tensorflow.keras.layers.Conv2D", "tensorflow.gradients", "tensorflow.square", "numpy.zeros", "tensorflow.keras.layers.Flatten", "tensorflow.nn.rnn_cell.BasicLSTMCell", "tensorflow.shape", "tensorflow.keras.layers.Dense", "tensorflow.exp", "tensorflow.placeholder", "tensorflow.global_norm", "tensorflow.clip_by_value", "tensorflow.nn.relu", "tensorflow.constant", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.keras.layers.MaxPool2D", "tensorflow.clip_by_global_norm", "tensorflow.sqrt", "tensorflow.random_normal" ]
ACNet.py
[(34, 'tensorflow.constant', 'tf.constant', (['out'], {}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.exp', 'tf.exp', (['a'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {}), True, 'import tensorflow as tf\n'), (172, 'tensorflow.concat', 'tf.concat', (['[flat1b, loc_layer2]', '(1)'], {}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.nn.relu', 'tf.nn.relu', (['(h2 + concat)'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['RNN_SIZE'], {'state_is_tuple': '(True)'}), True, 'import tensorflow as tf\n'), (179, 'numpy.zeros', 'np.zeros', (['(1, lstm_cell.state_size.c)', 'np.float32'], {}), True, 'import numpy as np\n'), (180, 'numpy.zeros', 'np.zeros', (['(1, lstm_cell.state_size.h)', 'np.float32'], {}), True, 'import numpy as np\n'), (182, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, lstm_cell.state_size.c]'], {}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[1, lstm_cell.state_size.h]'], {}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.h3', '[0]'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.nn.rnn_cell.LSTMStateTuple', 'tf.nn.rnn_cell.LSTMStateTuple', (['c_in', 'h_in'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['lstm_cell', 'rnn_in'], {'initial_state': 'state_in', 'sequence_length': 'step_size', 'time_major': '(False)'}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.reshape', 'tf.reshape', (['lstm_outputs', '[-1, RNN_SIZE]'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.sqrt', 'tf.sqrt', (['(2.0 * np.pi)'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.square', 'tf.square', (['(sample - mean)'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.square', 'tf.square', (['loc_std'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, GRID_SIZE, GRID_SIZE, num_channels]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 2]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]', '"""Vtarget"""'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None]', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', "(scope + '/qvalues')"], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'local_vars'], {}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.global_norm', 'tf.global_norm', (['local_vars'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['self.gradients', 'GRAD_CLIP'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', "(GLOBAL_NET_SCOPE + '/qvalues')"], {}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 8)', 'kernel_size': '[8, 8]', 'strides': '(4)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (138, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 8)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (139, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 8)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (140, 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '[2, 2]'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (143, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 4)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (144, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 4)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (145, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 4)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (146, 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '[2, 2]'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (149, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 2)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (150, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 2)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (151, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""same"""', 'filters': '(RNN_SIZE // 2)', 'kernel_size': '[3, 3]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'tf.nn.relu'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (152, 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '[2, 2]'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (156, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'padding': '"""valid"""', 'filters': '(RNN_SIZE - loc_layer_size)', 'kernel_size': '[2, 2]', 'strides': '(1)', 'data_format': '"""channels_last"""', 'kernel_initializer': 'w_init', 'activation': 'None'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (159, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'data_format': '"""channels_last"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (161, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(RNN_SIZE - loc_layer_size)'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (168, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'loc_layer_size'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (169, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'loc_layer_size'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (173, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'RNN_SIZE'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (174, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'RNN_SIZE'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten\n'), (186, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n'), (32, 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), True, 'import numpy as np\n'), (208, 'tensorflow.random_normal', 'tf.random_normal', (['[1, 2]', '(0)', 'loc_std'], {}), True, 'import tensorflow as tf\n'), (33, 'numpy.square', 'np.square', (['out'], {}), True, 'import numpy as np\n'), (80, 'tensorflow.reshape', 'tf.reshape', (['self.value'], {'shape': '[-1]'}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.policy', '(1e-10)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.policy[:, (0)]', '(1e-15)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.policy[:, (1)]', '(1e-15)', '(1.0)'], {}), True, 'import tensorflow as tf\n')]
siddsax/PocketFlow
909808b8344f03cd9d41cb1bba6daa3b0201184a
# Tencent is pleased to support the open source community by making PocketFlow available. # # Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model helper for creating a ResNet model for the CIFAR-10 dataset.""" import tensorflow as tf from nets.abstract_model_helper import AbstractModelHelper from datasets.cifar10_dataset import Cifar10Dataset from utils.external import resnet_model as ResNet from utils.lrn_rate_utils import setup_lrn_rate_piecewise_constant from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_integer('resnet_size', 20, '# of layers in the ResNet model') tf.app.flags.DEFINE_float('nb_epochs_rat', 1.0, '# of training epochs\'s ratio') tf.app.flags.DEFINE_float('lrn_rate_init', 1e-1, 'initial learning rate') tf.app.flags.DEFINE_float('batch_size_norm', 128, 'normalization factor of batch size') tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum coefficient') tf.app.flags.DEFINE_float('loss_w_dcy', 2e-4, 'weight decaying loss\'s coefficient') def forward_fn(inputs, is_train, data_format): """Forward pass function. Args: * inputs: inputs to the network's forward pass * is_train: whether to use the forward pass with training operations inserted * data_format: data format ('channels_last' OR 'channels_first') Returns: * inputs: outputs from the network's forward pass """ # setup hyper-parameters nb_blocks = (FLAGS.resnet_size - 2) // 6 bottleneck = False nb_classes = FLAGS.nb_classes nb_filters = 16 kernel_size = 3 conv_stride = 1 first_pool_size = None first_pool_stride = None block_sizes = [nb_blocks] * 3 block_strides = [1, 2, 2] # model definition model = ResNet.Model( FLAGS.resnet_size, bottleneck, nb_classes, nb_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, block_sizes, block_strides, data_format=data_format) inputs = model(inputs, is_train) return inputs class ModelHelper(AbstractModelHelper): """Model helper for creating a ResNet model for the CIFAR-10 dataset.""" def __init__(self, data_format='channels_last'): """Constructor function.""" # class-independent initialization super(ModelHelper, self).__init__(data_format) # initialize training & evaluation subsets self.dataset_train = Cifar10Dataset(is_train=True) self.dataset_eval = Cifar10Dataset(is_train=False) def build_dataset_train(self, enbl_trn_val_split=False): """Build the data subset for training, usually with data augmentation.""" return self.dataset_train.build(enbl_trn_val_split) def build_dataset_eval(self): """Build the data subset for evaluation, usually without data augmentation.""" return self.dataset_eval.build() def forward_train(self, inputs): """Forward computation at training.""" return forward_fn(inputs, is_train=True, data_format=self.data_format) def forward_eval(self, inputs): """Forward computation at evaluation.""" return forward_fn(inputs, is_train=False, data_format=self.data_format) def calc_loss(self, labels, outputs, trainable_vars): """Calculate loss (and some extra evaluation metrics).""" loss = tf.losses.softmax_cross_entropy(labels, outputs) loss_filter = lambda var: 'batch_normalization' not in var.name loss += FLAGS.loss_w_dcy \ * tf.add_n([tf.nn.l2_loss(var) for var in trainable_vars if loss_filter(var)]) accuracy = tf.reduce_mean( tf.cast(tf.equal(tf.argmax(labels, axis=1), tf.argmax(outputs, axis=1)), tf.float32)) metrics = {'accuracy': accuracy} return loss, metrics def setup_lrn_rate(self, global_step): """Setup the learning rate (and number of training iterations).""" nb_epochs = 250 idxs_epoch = [100, 150, 200] decay_rates = [1.0, 0.1, 0.01, 0.001] batch_size = FLAGS.batch_size * (1 if not FLAGS.enbl_multi_gpu else mgw.size()) lrn_rate = setup_lrn_rate_piecewise_constant(global_step, batch_size, idxs_epoch, decay_rates) nb_iters = int(FLAGS.nb_smpls_train * nb_epochs * FLAGS.nb_epochs_rat / batch_size) return lrn_rate, nb_iters @property def model_name(self): """Model's name.""" return 'resnet_%d' % FLAGS.resnet_size @property def dataset_name(self): """Dataset's name.""" return 'cifar_10'
[ "tensorflow.app.flags.DEFINE_integer", "tensorflow.losses.softmax_cross_entropy", "tensorflow.nn.l2_loss", "tensorflow.app.flags.DEFINE_float", "tensorflow.argmax" ]
nets/resnet_at_cifar10.py
[(29, 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""resnet_size"""', '(20)', '"""# of layers in the ResNet model"""'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""nb_epochs_rat"""', '(1.0)', '"""# of training epochs\'s ratio"""'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""lrn_rate_init"""', '(0.1)', '"""initial learning rate"""'], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""batch_size_norm"""', '(128)', '"""normalization factor of batch size"""'], {}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""momentum"""', '(0.9)', '"""momentum coefficient"""'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""loss_w_dcy"""', '(0.0002)', '"""weight decaying loss\'s coefficient"""'], {}), True, 'import tensorflow as tf\n'), (61, 'utils.external.resnet_model.Model', 'ResNet.Model', (['FLAGS.resnet_size', 'bottleneck', 'nb_classes', 'nb_filters', 'kernel_size', 'conv_stride', 'first_pool_size', 'first_pool_stride', 'block_sizes', 'block_strides'], {'data_format': 'data_format'}), True, 'from utils.external import resnet_model as ResNet\n'), (78, 'datasets.cifar10_dataset.Cifar10Dataset', 'Cifar10Dataset', ([], {'is_train': '(True)'}), False, 'from datasets.cifar10_dataset import Cifar10Dataset\n'), (79, 'datasets.cifar10_dataset.Cifar10Dataset', 'Cifar10Dataset', ([], {'is_train': '(False)'}), False, 'from datasets.cifar10_dataset import Cifar10Dataset\n'), (104, 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['labels', 'outputs'], {}), True, 'import tensorflow as tf\n'), (121, 'utils.lrn_rate_utils.setup_lrn_rate_piecewise_constant', 'setup_lrn_rate_piecewise_constant', (['global_step', 'batch_size', 'idxs_epoch', 'decay_rates'], {}), False, 'from utils.lrn_rate_utils import setup_lrn_rate_piecewise_constant\n'), (120, 'utils.multi_gpu_wrapper.MultiGpuWrapper.size', 'mgw.size', ([], {}), True, 'from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw\n'), (107, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.argmax', 'tf.argmax', (['labels'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.argmax', 'tf.argmax', (['outputs'], {'axis': '(1)'}), True, 'import tensorflow as tf\n')]
micmelesse/tensor2tensor
93d34d69092f86b203f0f0a8230fcd9ecbe9086f
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Clean discrete bottleneck as in https://arxiv.org/abs/1805.11063.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from functools import partial from tensor2tensor.layers import common_layers import tensorflow as tf from tensorflow.python.training import moving_averages class DiscreteBottleneck(object): """Discrete bottleneck class.""" def __init__(self, hparams): self.hparams = hparams print ("self.hparams.z_size", self.hparams.z_size) # Set the discretization bottleneck specific things here self.hparams.z_size_per_residual = self.hparams.z_size // \ self.hparams.num_residuals print ("self.hparams.num_residuals", self.hparams.num_residuals) self.hparams.block_dim = int( self.hparams.hidden_size // self.hparams.num_blocks) self.hparams.block_v_size = 2**( self.hparams.z_size_per_residual / self.hparams.num_blocks) self.hparams.block_v_size = int(self.hparams.block_v_size) self.means = tf.get_variable( name="means", shape=[ self.hparams.num_blocks, self.hparams.block_v_size, self.hparams.block_dim ], initializer=tf.initializers.variance_scaling(distribution="uniform")) # Create the shadow variables if we are using EMA if self.hparams.ema: self.ema_count = tf.get_variable( "ema_count", [self.hparams.num_blocks, self.hparams.block_v_size], initializer=tf.constant_initializer(0), trainable=False) with tf.colocate_with(self.means): self.ema_means = tf.get_variable( "ema_means", initializer=self.means.initialized_value(), trainable=False) def slice_hidden(self, x): """Slice encoder hidden state into block_dim. Args: x: Encoder hidden state of shape [-1, hidden_size]. Returns: Sliced states of shape [-1, num_blocks, block_dim]. """ x_sliced = tf.reshape( x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim]) return x_sliced def nearest_neighbor(self, x, means): """Find the nearest element in means to elements in x. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means of shape. Returns: Tensor with nearest element in mean encoded in one-hot notation. """ x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True) scalar_prod = tf.matmul( tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) dist = x_norm_sq + tf.transpose( means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod if self.hparams.soft_em: nearest_idx = tf.stack( [ tf.multinomial( -dist[:, i, :], num_samples=self.hparams.num_samples) for i in range(self.hparams.num_blocks) ], axis=1) nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size) nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) else: if self.hparams.random_top_k > 1: _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k) nearest_idx = tf.gather( top_k_idx, tf.random_uniform( [1], minval=0, maxval=self.hparams.random_top_k - 1, dtype=tf.int32), axis=-1) else: if self.hparams.use_scales: dist /= tf.reshape(self.hparams.scales, [1, 1, self.hparams.moe_num_experts]) nearest_idx = tf.argmax(-dist, axis=-1) nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size) return nearest_hot def embedding_lookup(self, x, means): """Compute nearest neighbors and loss for training the embeddings. Args: x: Batch of encoder continuous latent states sliced/projected into shape [-1, num_blocks, block_dim]. means: Embedding means. Returns: The nearest neighbor in one hot form, the nearest neighbor itself, the commitment loss, embedding training loss. """ x_means_hot = self.nearest_neighbor(x, means) x_means_hot_flat = tf.reshape( x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) x_means = tf.transpose(x_means, [1, 0, 2]) q_loss = tf.reduce_mean( tf.squared_difference(tf.stop_gradient(x), x_means)) e_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) return x_means_hot, x_means, q_loss, e_loss def bit_to_int(self, x_bit, num_bits, base=2): """Turn x_bit representing numbers bitwise (lower-endian) to int tensor. Args: x_bit: Tensor containing numbers in a particular base to be converted to int. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Integer representation of this number. """ x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits]))) x_labels = [] for i in range(num_bits): x_labels.append(x_l[:, i] * tf.to_int32(base)**tf.to_int32(i)) res = sum(x_labels) return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) def int_to_bit(self, x_int, num_bits, base=2): """Turn x_int representing numbers into a bitwise (lower-endian) tensor. Args: x_int: Tensor containing integer to be converted into base notation. num_bits: Number of bits in the representation. base: Base of the representation. Returns: Corresponding number expressed in base. """ x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1)) x_labels = [] for i in range(num_bits): x_labels.append( tf.floormod( tf.floordiv(tf.to_int32(x_l), tf.to_int32(base)**i), tf.to_int32(base))) res = tf.concat(x_labels, axis=-1) return tf.to_float(res) def embed(self, x): """Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments. """ shape_x = common_layers.shape_list(x) x_flat = tf.reshape(x, [-1, 1]) c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape.append(self.hparams.num_blocks) new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(self.hparams.hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) c_int = self.bit_to_int( c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) c_hot_flat = tf.reshape( c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) h1 = tf.transpose(h1, perm=[1, 0, 2]) h1 = tf.reshape(h1, shape=h1_shape) h1_shape[0] = self.hparams.batch_size h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense( tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") return res def discrete_bottleneck(self, x): """Discretization bottleneck for latent variables. Args: x: Input to the discretization bottleneck. Returns: Embedding to pass to the decoder, discrete latent, loss, and the embedding function. Raises: ValueError: If projection_tensors is None for reshape_method project, or ema_count or ema_means is None if we are using ema, or unknown args. """ x_reshaped = self.slice_hidden(x) x_means_hot = [] x_means = 0 loss = 0 x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup( x_reshaped, self.means) if self.hparams.ema: tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta)) updated_ema_count = \ moving_averages.assign_moving_average( self.ema_count, tf.reduce_sum( tf.reshape( x_means_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]), axis=0), self.hparams.decay, zero_debias=False) dw = tf.matmul( tf.transpose(x_means_hot, perm=[1, 2, 0]), tf.transpose(x_reshaped, perm=[1, 0, 2])) updated_ema_means = \ moving_averages.assign_moving_average( self.ema_means, dw, self.hparams.decay, zero_debias=False) n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( n + 2**self.hparams.z_size * self.hparams.epsilon) * n) updated_ema_means = updated_ema_means / tf.expand_dims( updated_ema_count, axis=-1) with tf.control_dependencies([e_loss]): update_means = tf.assign(self.means, updated_ema_means) with tf.control_dependencies([update_means]): loss += self.hparams.beta * e_loss else: # Use a gradient based loss for learning the cluster centers loss += q_loss + self.hparams.beta * e_loss # Get the discrete latent representation x_means_idx = tf.argmax(x_means_hot, axis=-1) # Get the binary representation num_bits = int(self.hparams.z_size // self.hparams.num_blocks) x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2) x_discrete = self.bit_to_int( tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2) # Reshape x_discrete shape_x = common_layers.shape_list(x) shape_discrete = shape_x[:-1] x_discrete = tf.reshape(x_discrete, shape_discrete) x_means = tf.reshape(x_means, shape=shape_x) h1 = x + tf.stop_gradient(x_means - x) h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") res = tf.layers.dense( tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") embed_fn = partial(self.embed) return { "dense": res, "discrete": x_discrete, "loss": loss, "embed": embed_fn }
[ "tensorflow.random_uniform", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.colocate_with", "tensorflow.reduce_sum", "tensorflow.to_int32", "tensorflow.python.training.moving_averages.assign_moving_average", "tensorflow.stop_gradient", "tensorflow.nn.top_k", "tensorflow.to_float", "tensorflow.square", "tensorflow.argmax", "tensorflow.one_hot", "tensorflow.nn.relu", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.assign", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.initializers.variance_scaling", "tensorflow.multinomial" ]
tensor2tensor/layers/vq_discrete.py
[(70, 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[-1, self.hparams.num_blocks, self.hparams.block_dim]'}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.transpose', 'tf.transpose', (['scalar_prod'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.reshape', 'tf.reshape', (['x_means_hot', '[-1, self.hparams.num_blocks, self.hparams.block_v_size]'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.transpose', 'tf.transpose', (['x_means', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.concat', 'tf.concat', (['x_labels'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.to_float', 'tf.to_float', (['res'], {}), True, 'import tensorflow as tf\n'), (200, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (201, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (203, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['c'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (210, 'tensorflow.zeros', 'tf.zeros', ([], {'dtype': 'tf.float32', 'shape': 'h1_shape'}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.one_hot', 'tf.one_hot', (['c_int'], {'depth': 'self.hparams.block_v_size', 'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.reshape', 'tf.reshape', (['c_hot'], {'shape': '[-1, self.hparams.num_blocks, self.hparams.block_v_size]'}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.transpose', 'tf.transpose', (['h1'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.reshape', 'tf.reshape', (['h1'], {'shape': 'h1_shape'}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.argmax', 'tf.argmax', (['x_means_hot'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (295, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['x'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (297, 'tensorflow.reshape', 'tf.reshape', (['x_discrete', 'shape_discrete'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.reshape', 'tf.reshape', (['x_means'], {'shape': 'shape_x'}), True, 'import tensorflow as tf\n'), (304, 'functools.partial', 'partial', (['self.embed'], {}), False, 'from functools import partial\n'), (85, 'tensorflow.square', 'tf.square', (['x'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.square', 'tf.square', (['means'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.transpose', 'tf.transpose', (['means'], {'perm': '[0, 2, 1]'}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.one_hot', 'tf.one_hot', (['nearest_idx'], {'depth': 'self.hparams.block_v_size'}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['nearest_hot'], {'axis': '(-2)'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.one_hot', 'tf.one_hot', (['nearest_idx', 'self.hparams.block_v_size'], {}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.transpose', 'tf.transpose', (['x_means_hot_flat'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.expand_dims', 'tf.expand_dims', (['x_int'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.reshape', 'tf.reshape', (['c'], {'shape': 'new_shape'}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.transpose', 'tf.transpose', (['c_hot_flat'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.nn.relu', 'tf.nn.relu', (['h1'], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.nn.relu', 'tf.nn.relu', (['h2'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.python.training.moving_averages.assign_moving_average', 'moving_averages.assign_moving_average', (['self.ema_means', 'dw', 'self.hparams.decay'], {'zero_debias': '(False)'}), False, 'from tensorflow.python.training import moving_averages\n'), (271, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['updated_ema_count'], {'axis': '(-1)', 'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.to_int32', 'tf.to_int32', (['x_means_bits'], {}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(x_means - x)'], {}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.nn.relu', 'tf.nn.relu', (['h1'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.nn.relu', 'tf.nn.relu', (['h2'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.initializers.variance_scaling', 'tf.initializers.variance_scaling', ([], {'distribution': '"""uniform"""'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.colocate_with', 'tf.colocate_with', (['self.means'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.transpose', 'tf.transpose', (['means_norm_sq'], {'perm': '[2, 0, 1]'}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.nn.top_k', 'tf.nn.top_k', (['(-dist)'], {'k': 'self.hparams.random_top_k'}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.argmax', 'tf.argmax', (['(-dist)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['x'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['x_means'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.reshape', 'tf.reshape', (['x_bit', '[-1, num_bits]'], {}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.transpose', 'tf.transpose', (['x_means_hot'], {'perm': '[1, 2, 0]'}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.transpose', 'tf.transpose', (['x_reshaped'], {'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (274, 'tensorflow.expand_dims', 'tf.expand_dims', (['updated_ema_count'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[e_loss]'], {}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.assign', 'tf.assign', (['self.means', 'updated_ema_means'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.multinomial', 'tf.multinomial', (['(-dist[:, (i), :])'], {'num_samples': 'self.hparams.num_samples'}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(self.hparams.random_top_k - 1)', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.reshape', 'tf.reshape', (['self.hparams.scales', '[1, 1, self.hparams.moe_num_experts]'], {}), True, 'import tensorflow as tf\n'), (165, 'tensor2tensor.layers.common_layers.shape_list', 'common_layers.shape_list', (['x_bit'], {}), False, 'from tensor2tensor.layers import common_layers\n'), (185, 'tensorflow.to_int32', 'tf.to_int32', (['base'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.reshape', 'tf.reshape', (['x_means_hot'], {'shape': '[-1, self.hparams.num_blocks, self.hparams.block_v_size]'}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[update_means]'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.to_int32', 'tf.to_int32', (['base'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.to_int32', 'tf.to_int32', (['i'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.to_int32', 'tf.to_int32', (['x_l'], {}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.to_int32', 'tf.to_int32', (['base'], {}), True, 'import tensorflow as tf\n')]
narutowang/indrnn
434e1200b5e742a0eac92bed661c69e97b8b8711
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Example / benchmark for building a PTB LSTM model. Trains the model described in: (Zaremba, et. al.) Recurrent Neural Network Regularization http://arxiv.org/abs/1409.2329 There are 3 supported model configurations: =========================================== | config | epochs | train | valid | test =========================================== | small | 13 | 37.99 | 121.39 | 115.91 | medium | 39 | 48.45 | 86.16 | 82.07 | large | 55 | 37.87 | 82.62 | 78.29 The exact results may vary depending on the random initialization. The hyperparameters used in the model: - init_scale - the initial scale of the weights - learning_rate - the initial value of the learning rate - max_grad_norm - the maximum permissible norm of the gradient - num_layers - the number of LSTM layers - num_steps - the number of unrolled steps of LSTM - hidden_size - the number of LSTM units - max_epoch - the number of epochs trained with the initial learning rate - max_max_epoch - the total number of epochs for training - keep_prob - the probability of keeping weights in the dropout layer - lr_decay - the decay of the learning rate for each epoch after "max_epoch" - batch_size - the batch size - rnn_mode - the low level implementation of lstm cell: one of CUDNN, BASIC, or BLOCK, representing cudnn_lstm, basic_lstm, and lstm_block_cell classes. The data required for this example is in the data/ dir of the PTB dataset from Tomas Mikolov's webpage: $ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz $ tar xvf simple-examples.tgz To run: $ python ptb_word_lm.py --data_path=simple-examples/data/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np import tensorflow as tf import reader import util from tensorflow.python.client import device_lib from ind_rnn_cell import IndRNNCell TIME_STEPS = 50 RECURRENT_MAX = pow(2, 1 / TIME_STEPS) flags = tf.flags logging = tf.logging flags.DEFINE_string( "model", "small", "A type of model. Possible options are: small, medium, large.") flags.DEFINE_string("data_path", None, "Where the training/test data is stored.") flags.DEFINE_string("save_path", None, "Model output directory.") flags.DEFINE_bool("use_fp16", False, "Train using 16-bit floats instead of 32bit floats") flags.DEFINE_integer("num_gpus", 1, "If larger than 1, Grappler AutoParallel optimizer " "will create multiple training replicas with each GPU " "running one replica.") flags.DEFINE_string("rnn_mode", None, "The low level implementation of lstm cell: one of CUDNN, " "BASIC, and BLOCK, representing cudnn_lstm, basic_lstm, " "and lstm_block_cell classes.") FLAGS = flags.FLAGS BASIC = "basic" CUDNN = "cudnn" BLOCK = "block" def data_type(): return tf.float16 if FLAGS.use_fp16 else tf.float32 class PTBInput(object): """The input data.""" def __init__(self, config, data, name=None): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps self.epoch_size = ((len(data) // batch_size) - 1) // num_steps self.input_data, self.targets = reader.ptb_producer( data, batch_size, num_steps, name=name) class PTBModel(object): """The PTB model.""" def __init__(self, is_training, config, input_): self._is_training = is_training self._input = input_ self._rnn_params = None self._cell = None self.batch_size = input_.batch_size self.num_steps = input_.num_steps size = config.hidden_size vocab_size = config.vocab_size with tf.device("/cpu:0"): embedding = tf.get_variable( "embedding", [vocab_size, size], dtype=data_type()) inputs = tf.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = tf.get_variable( "softmax_w", [size, vocab_size], dtype=data_type()) softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type()) logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b) # Reshape logits to be a 3-D tensor for sequence loss logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) # Use the contrib sequence loss and average over the batches loss = tf.contrib.seq2seq.sequence_loss( logits, input_.targets, tf.ones([self.batch_size, self.num_steps], dtype=data_type()), average_across_timesteps=False, average_across_batch=True) # Update the cost self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name="new_learning_rate") self._lr_update = tf.assign(self._lr, self._new_lr) def _build_rnn_graph(self, inputs, config, is_training): if config.rnn_mode == CUDNN: return self._build_rnn_graph_cudnn(inputs, config, is_training) else: return self._build_rnn_graph_lstm(inputs, config, is_training) def _build_rnn_graph_cudnn(self, inputs, config, is_training): """Build the inference graph using CUDNN cell.""" inputs = tf.transpose(inputs, [1, 0, 2]) self._cell = tf.contrib.cudnn_rnn.CudnnLSTM( num_layers=config.num_layers, num_units=config.hidden_size, input_size=config.hidden_size, dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = tf.get_variable( "lstm_params", initializer=tf.random_uniform( [params_size_t], -config.init_scale, config.init_scale), validate_shape=False) c = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2]) outputs = tf.reshape(outputs, [-1, config.hidden_size]) return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): #if config.rnn_mode == BASIC: # return tf.contrib.rnn.BasicLSTMCell( # config.hidden_size, forget_bias=0.0, state_is_tuple=True, # reuse=not is_training) #if config.rnn_mode == BLOCK: # return tf.contrib.rnn.LSTMBlockCell( # config.hidden_size, forget_bias=0.0) #if config.rnn_mode == INDRNN: return IndRNNCell(config.hidden_size, recurrent_max_abs=RECURRENT_MAX) raise ValueError("rnn_mode %s not supported" % config.rnn_mode) def _build_rnn_graph_lstm(self, inputs, config, is_training): """Build the inference graph using canonical LSTM cells.""" # Slightly better results can be obtained with forget gate biases # initialized to 1 but the hyperparameters of the model would need to be # different than reported in the paper. def make_cell(): cell = self._get_lstm_cell(config, is_training) if is_training and config.keep_prob < 1: cell = tf.contrib.rnn.DropoutWrapper( cell, output_keep_prob=config.keep_prob) return cell cell = tf.contrib.rnn.MultiRNNCell( [make_cell() for _ in range(config.num_layers)], state_is_tuple=True) self._initial_state = cell.zero_state(config.batch_size, data_type()) state = self._initial_state # Simplified version of tf.nn.static_rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use tf.nn.static_rnn() or tf.nn.static_state_saving_rnn(). # # The alternative version of the code below is: # # inputs = tf.unstack(inputs, num=self.num_steps, axis=1) # outputs, state = tf.nn.static_rnn(cell, inputs, # initial_state=self._initial_state) outputs = [] with tf.variable_scope("RNN"): for time_step in range(self.num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size]) return output, state def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) def export_ops(self, name): """Exports ops to collections.""" self._name = name ops = {util.with_prefix(self._name, "cost"): self._cost} if self._is_training: ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update) if self._rnn_params: ops.update(rnn_params=self._rnn_params) for name, op in ops.items(): tf.add_to_collection(name, op) self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = tf.get_collection_ref(util.with_prefix(self._name, "cost"))[0] num_replicas = FLAGS.num_gpus if self._name == "Train" else 1 self._initial_state = util.import_state_tuples( self._initial_state, self._initial_state_name, num_replicas) self._final_state = util.import_state_tuples( self._final_state, self._final_state_name, num_replicas) @property def input(self): return self._input @property def initial_state(self): return self._initial_state @property def cost(self): return self._cost @property def final_state(self): return self._final_state @property def lr(self): return self._lr @property def train_op(self): return self._train_op @property def initial_state_name(self): return self._initial_state_name @property def final_state_name(self): return self._final_state_name class SmallConfig(object): """Small config.""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 20 hidden_size = 200 max_epoch = 4 max_max_epoch = 13 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class MediumConfig(object): """Medium config.""" init_scale = 0.05 learning_rate = 1.0 max_grad_norm = 5 num_layers = 2 num_steps = 35 hidden_size = 650 max_epoch = 6 max_max_epoch = 39 keep_prob = 0.5 lr_decay = 0.8 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class LargeConfig(object): """Large config.""" init_scale = 0.04 learning_rate = 1.0 max_grad_norm = 10 num_layers = 2 num_steps = 35 hidden_size = 1500 max_epoch = 14 max_max_epoch = 55 keep_prob = 0.35 lr_decay = 1 / 1.15 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class TestConfig(object): """Tiny config, for testing.""" init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 1 num_layers = 1 num_steps = 2 hidden_size = 2 max_epoch = 1 max_max_epoch = 1 keep_prob = 1.0 lr_decay = 0.5 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK def run_epoch(session, model, eval_op=None, verbose=False): """Runs the model on the given data.""" start_time = time.time() costs = 0.0 iters = 0 state = session.run(model.initial_state) fetches = { "cost": model.cost, "final_state": model.final_state, } if eval_op is not None: fetches["eval_op"] = eval_op for step in range(model.input.epoch_size): feed_dict = {} for i, (c, h) in enumerate(model.initial_state): feed_dict[c] = state[i].c feed_dict[h] = state[i].h vals = session.run(fetches, feed_dict) cost = vals["cost"] state = vals["final_state"] costs += cost iters += model.input.num_steps if verbose and step % (model.input.epoch_size // 10) == 10: print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size * max(1, FLAGS.num_gpus) / (time.time() - start_time))) return np.exp(costs / iters) def get_config(): """Get model config.""" config = None if FLAGS.model == "small": config = SmallConfig() elif FLAGS.model == "medium": config = MediumConfig() elif FLAGS.model == "large": config = LargeConfig() elif FLAGS.model == "test": config = TestConfig() else: raise ValueError("Invalid model: %s", FLAGS.model) if FLAGS.rnn_mode: config.rnn_mode = FLAGS.rnn_mode if FLAGS.num_gpus != 1 or tf.__version__ < "1.3.0" : config.rnn_mode = BASIC return config def main(_): if not FLAGS.data_path: raise ValueError("Must set --data_path to PTB data directory") gpus = [ x.name for x in device_lib.list_local_devices() if x.device_type == "GPU" ] if FLAGS.num_gpus > len(gpus): raise ValueError( "Your machine has only %d gpus " "which is less than the requested --num_gpus=%d." % (len(gpus), FLAGS.num_gpus)) raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 with tf.Graph().as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope("Train"): train_input = PTBInput(config=config, data=train_data, name="TrainInput") with tf.variable_scope("Model", reuse=None, initializer=initializer): m = PTBModel(is_training=True, config=config, input_=train_input) tf.summary.scalar("Training Loss", m.cost) tf.summary.scalar("Learning Rate", m.lr) with tf.name_scope("Valid"): valid_input = PTBInput(config=config, data=valid_data, name="ValidInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mvalid = PTBModel(is_training=False, config=config, input_=valid_input) tf.summary.scalar("Validation Loss", mvalid.cost) with tf.name_scope("Test"): test_input = PTBInput( config=eval_config, data=test_data, name="TestInput") with tf.variable_scope("Model", reuse=True, initializer=initializer): mtest = PTBModel(is_training=False, config=eval_config, input_=test_input) models = {"Train": m, "Valid": mvalid, "Test": mtest} for name, model in models.items(): model.export_ops(name) metagraph = tf.train.export_meta_graph() if tf.__version__ < "1.1.0" and FLAGS.num_gpus > 1: raise ValueError("num_gpus > 1 is not supported for TensorFlow versions " "below 1.1.0") soft_placement = False if FLAGS.num_gpus > 1: soft_placement = True util.auto_parallel(metagraph, m) with tf.Graph().as_default(): tf.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(allow_soft_placement=soft_placement) with sv.managed_session(config=config_proto) as session: for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0) m.assign_lr(session, config.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True) print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) valid_perplexity = run_epoch(session, mvalid) print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity)) test_perplexity = run_epoch(session, mtest) print("Test Perplexity: %.3f" % test_perplexity) if FLAGS.save_path: print("Saving model to %s." % FLAGS.save_path) sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step) if __name__ == "__main__": tf.app.run()
[ "tensorflow.device", "tensorflow.concat", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.zeros", "tensorflow.reduce_sum", "numpy.exp", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.random_uniform_initializer", "tensorflow.train.export_meta_graph", "tensorflow.gradients", "tensorflow.train.import_meta_graph", "tensorflow.train.get_or_create_global_step", "tensorflow.ConfigProto", "tensorflow.name_scope", "tensorflow.trainable_variables", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.nn.xw_plus_b", "tensorflow.contrib.cudnn_rnn.CudnnLSTM", "tensorflow.placeholder", "tensorflow.train.GradientDescentOptimizer", "tensorflow.contrib.rnn.LSTMStateTuple", "tensorflow.add_to_collection", "tensorflow.nn.embedding_lookup", "tensorflow.transpose", "tensorflow.get_collection_ref", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.reshape", "tensorflow.assign", "tensorflow.train.Supervisor", "tensorflow.contrib.cudnn_rnn.RNNParamsSaveable", "tensorflow.variable_scope", "tensorflow.get_variable_scope", "tensorflow.random_uniform" ]
ptb/ptb_word_lm.py
[(396, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (427, 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), True, 'import numpy as np\n'), (462, 'reader.ptb_raw_data', 'reader.ptb_raw_data', (['FLAGS.data_path'], {}), False, 'import reader\n'), (533, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (113, 'reader.ptb_producer', 'reader.ptb_producer', (['data', 'batch_size', 'num_steps'], {'name': 'name'}), False, 'import reader\n'), (143, 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['output', 'softmax_w', 'softmax_b'], {}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.reshape', 'tf.reshape', (['logits', '[self.batch_size, self.num_steps, vocab_size]'], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self._lr'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]', 'name': '"""new_learning_rate"""'}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.assign', 'tf.assign', (['self._lr', 'self._new_lr'], {}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.transpose', 'tf.transpose', (['inputs', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.contrib.cudnn_rnn.CudnnLSTM', 'tf.contrib.cudnn_rnn.CudnnLSTM', ([], {'num_layers': 'config.num_layers', 'num_units': 'config.hidden_size', 'input_size': 'config.hidden_size', 'dropout': '(1 - config.keep_prob if is_training else 0)'}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.zeros', 'tf.zeros', (['[config.num_layers, self.batch_size, config.hidden_size]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.zeros', 'tf.zeros', (['[config.num_layers, self.batch_size, config.hidden_size]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.transpose', 'tf.transpose', (['outputs', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, config.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (214, 'ind_rnn_cell.IndRNNCell', 'IndRNNCell', (['config.hidden_size'], {'recurrent_max_abs': 'RECURRENT_MAX'}), False, 'from ind_rnn_cell import IndRNNCell\n'), (265, 'util.with_prefix', 'util.with_prefix', (['self._name', '"""initial"""'], {}), False, 'import util\n'), (266, 'util.with_prefix', 'util.with_prefix', (['self._name', '"""final"""'], {}), False, 'import util\n'), (267, 'util.export_state_tuples', 'util.export_state_tuples', (['self._initial_state', 'self._initial_state_name'], {}), False, 'import util\n'), (268, 'util.export_state_tuples', 'util.export_state_tuples', (['self._final_state', 'self._final_state_name'], {}), False, 'import util\n'), (288, 'util.import_state_tuples', 'util.import_state_tuples', (['self._initial_state', 'self._initial_state_name', 'num_replicas'], {}), False, 'import util\n'), (290, 'util.import_state_tuples', 'util.import_state_tuples', (['self._final_state', 'self._final_state_name', 'num_replicas'], {}), False, 'import util\n'), (471, 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-config.init_scale)', 'config.init_scale'], {}), True, 'import tensorflow as tf\n'), (497, 'tensorflow.train.export_meta_graph', 'tf.train.export_meta_graph', ([], {}), True, 'import tensorflow as tf\n'), (507, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['metagraph'], {}), True, 'import tensorflow as tf\n'), (510, 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {'logdir': 'FLAGS.save_path'}), True, 'import tensorflow as tf\n'), (511, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'soft_placement'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'input_.input_data'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', 'config.keep_prob'], {}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.gradients', 'tf.gradients', (['self._cost', 'tvars'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', ([], {'h': 'h', 'c': 'c'}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""RNN"""'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.concat', 'tf.concat', (['outputs', '(1)'], {}), True, 'import tensorflow as tf\n'), (258, 'util.with_prefix', 'util.with_prefix', (['self._name', '"""cost"""'], {}), False, 'import util\n'), (264, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['name', 'op'], {}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""rnn_params"""'], {}), True, 'import tensorflow as tf\n'), (454, 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), False, 'from tensorflow.python.client import device_lib\n'), (474, 'tensorflow.name_scope', 'tf.name_scope', (['"""Train"""'], {}), True, 'import tensorflow as tf\n'), (478, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Training Loss"""', 'm.cost'], {}), True, 'import tensorflow as tf\n'), (479, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Learning Rate"""', 'm.lr'], {}), True, 'import tensorflow as tf\n'), (481, 'tensorflow.name_scope', 'tf.name_scope', (['"""Valid"""'], {}), True, 'import tensorflow as tf\n'), (485, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Validation Loss"""', 'mvalid.cost'], {}), True, 'import tensorflow as tf\n'), (487, 'tensorflow.name_scope', 'tf.name_scope', (['"""Test"""'], {}), True, 'import tensorflow as tf\n'), (504, 'util.auto_parallel', 'util.auto_parallel', (['metagraph', 'm'], {}), False, 'import util\n'), (169, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.random_uniform', 'tf.random_uniform', (['[params_size_t]', '(-config.init_scale)', 'config.init_scale'], {}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', ([], {'h': 'h', 'c': 'c'}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['cell'], {'output_keep_prob': 'config.keep_prob'}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""train_op"""'], {}), True, 'import tensorflow as tf\n'), (274, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""lr"""'], {}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""new_lr"""'], {}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""lr_update"""'], {}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.contrib.cudnn_rnn.RNNParamsSaveable', 'tf.contrib.cudnn_rnn.RNNParamsSaveable', (['self._cell', 'self._cell.params_to_canonical', 'self._cell.canonical_to_params', 'rnn_params'], {'base_variable_scope': '"""Model/RNN"""'}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.SAVEABLE_OBJECTS', 'params_saveable'], {}), True, 'import tensorflow as tf\n'), (286, 'util.with_prefix', 'util.with_prefix', (['self._name', '"""cost"""'], {}), False, 'import util\n'), (470, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (476, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'None', 'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (483, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (490, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (506, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (423, 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), True, 'import numpy as np\n'), (246, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (425, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
whj363636/CamDrop
f8af8c200665145f112b59348f60fc4cf80f04ec
# -*- coding: utf-8 -*- # File: imagenet_utils.py import multiprocessing import numpy as np import os from abc import abstractmethod import cv2 import tensorflow as tf import tqdm from tensorpack import ModelDesc from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug from tensorpack.input_source import QueueInput, StagingInput from tensorpack.models import regularize_cost from tensorpack.predict import FeedfreePredictor, PredictConfig from tensorpack.tfutils.summary import add_moving_summary from tensorpack.utils import logger from tensorpack.utils.stats import RatioCounter """ ====== DataFlow ======= """ def fbresnet_augmentor(isTrain): """ Augmentor used in fb.resnet.torch, for BGR images in range [0,255]. """ interpolation = cv2.INTER_CUBIC # linear seems to have more stable performance. # but we keep cubic for compatibility with old models if isTrain: augmentors = [ imgaug.GoogleNetRandomCropAndResize(interp=interpolation), # It's OK to remove the following augs if your CPU is not fast enough. # Removing brightness/contrast/saturation does not have a significant effect on accuracy. # Removing lighting leads to a tiny drop in accuracy. imgaug.RandomOrderAug( [imgaug.BrightnessScale((0.6, 1.4), clip=False), imgaug.Contrast((0.6, 1.4), rgb=False, clip=False), imgaug.Saturation(0.4, rgb=False), # rgb-bgr conversion for the constants copied from fb.resnet.torch imgaug.Lighting(0.1, eigval=np.asarray( [0.2175, 0.0188, 0.0045][::-1]) * 255.0, eigvec=np.array( [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')[::-1, ::-1] )]), imgaug.Flip(horiz=True), ] else: augmentors = [ imgaug.ResizeShortestEdge(256, interp=interpolation), imgaug.CenterCrop((224, 224)), ] return augmentors def get_imagenet_dataflow( datadir, name, batch_size, augmentors=None, parallel=None): """ Args: augmentors (list[imgaug.Augmentor]): Defaults to `fbresnet_augmentor(isTrain)` Returns: A DataFlow which produces BGR images and labels. See explanations in the tutorial: http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html """ assert name in ['train', 'val', 'test'] isTrain = name == 'train' assert datadir is not None if augmentors is None: augmentors = fbresnet_augmentor(isTrain) assert isinstance(augmentors, list) if parallel is None: parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading if isTrain: ds = dataset.ILSVRC12(datadir, name, shuffle=True) ds = AugmentImageComponent(ds, augmentors, copy=False) if parallel < 16: logger.warn("DataFlow may become the bottleneck when too few processes are used.") ds = PrefetchDataZMQ(ds, parallel) ds = BatchData(ds, batch_size, remainder=False) else: ds = dataset.ILSVRC12Files(datadir, name, shuffle=False) aug = imgaug.AugmentorList(augmentors) def mapf(dp): fname, cls = dp im = cv2.imread(fname, cv2.IMREAD_COLOR) im = aug.augment(im) return im, cls ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True) ds = BatchData(ds, batch_size, remainder=True) ds = PrefetchDataZMQ(ds, 1) return ds """ ====== tf.data ======= """ def get_imagenet_tfdata(datadir, name, batch_size, mapper=None, parallel=None): """ Args: mapper: a symbolic function that takes a tf.string (the raw bytes read from file) and produces a BGR image. Defaults to `fbresnet_mapper(isTrain)`. Returns: A `tf.data.Dataset`. If training, the dataset is infinite. The dataset contains BGR images and labels. """ def get_imglist(dir, name): """ Returns: [(full filename, label)] """ dir = os.path.join(dir, name) meta = dataset.ILSVRCMeta() imglist = meta.get_image_list( name, dataset.ILSVRCMeta.guess_dir_structure(dir)) def _filter(fname): # png return 'n02105855_2933.JPEG' in fname ret = [] for fname, label in imglist: if _filter(fname): logger.info("Image {} was filtered out.".format(fname)) continue fname = os.path.join(dir, fname) ret.append((fname, label)) return ret assert name in ['train', 'val', 'test'] assert datadir is not None isTrain = name == 'train' if mapper is None: mapper = fbresnet_mapper(isTrain) if parallel is None: parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading imglist = get_imglist(datadir, name) N = len(imglist) filenames = tf.constant([k[0] for k in imglist], name='filenames') labels = tf.constant([k[1] for k in imglist], dtype=tf.int32, name='labels') ds = tf.data.Dataset.from_tensor_slices((filenames, labels)) if isTrain: ds = ds.shuffle(N, reshuffle_each_iteration=True).repeat() ds = ds.apply( tf.data.experimental.map_and_batch( lambda fname, label: (mapper(tf.read_file(fname)), label), batch_size=batch_size, num_parallel_batches=parallel)) ds = ds.prefetch(100) return ds def fbresnet_mapper(isTrain): """ Note: compared to fbresnet_augmentor, it lacks some photometric augmentation that may have a small effect (0.1~0.2%) on accuracy. """ JPEG_OPT = {'fancy_upscaling': True, 'dct_method': 'INTEGER_ACCURATE'} def uint8_resize_bicubic(image, shape): ret = tf.image.resize_bicubic([image], shape) return tf.cast(tf.clip_by_value(ret, 0, 255), tf.uint8)[0] def resize_shortest_edge(image, image_shape, size): shape = tf.cast(image_shape, tf.float32) w_greater = tf.greater(image_shape[0], image_shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return uint8_resize_bicubic(image, shape) def center_crop(image, size): image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] offset_height = (image_height - size) // 2 offset_width = (image_width - size) // 2 image = tf.slice(image, [offset_height, offset_width, 0], [size, size, -1]) return image def lighting(image, std, eigval, eigvec): v = tf.random_normal(shape=[3], stddev=std) * eigval inc = tf.matmul(eigvec, tf.reshape(v, [3, 1])) image = tf.cast(tf.cast(image, tf.float32) + tf.reshape(inc, [3]), image.dtype) return image def validation_mapper(byte): image = tf.image.decode_jpeg( tf.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, tf.shape(image), 256) image = center_crop(image, 224) image = tf.reverse(image, axis=[2]) # to BGR return image def training_mapper(byte): jpeg_shape = tf.image.extract_jpeg_shape(byte) # hwc bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box( jpeg_shape, bounding_boxes=tf.zeros(shape=[0, 0, 4]), min_object_covered=0, aspect_ratio_range=[0.75, 1.33], area_range=[0.08, 1.0], max_attempts=10, use_image_if_no_bounding_boxes=True) is_bad = tf.reduce_sum(tf.cast(tf.equal(bbox_size, jpeg_shape), tf.int32)) >= 2 def good(): offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg( byte, crop_window, channels=3, **JPEG_OPT) image = uint8_resize_bicubic(image, [224, 224]) return image def bad(): image = tf.image.decode_jpeg( tf.reshape(byte, shape=[]), 3, **JPEG_OPT) image = resize_shortest_edge(image, jpeg_shape, 224) image = center_crop(image, 224) return image image = tf.cond(is_bad, bad, good) # TODO other imgproc image = lighting(image, 0.1, eigval=np.array([0.2175, 0.0188, 0.0045], dtype='float32') * 255.0, eigvec=np.array([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]], dtype='float32')) image = tf.image.random_flip_left_right(image) image = tf.reverse(image, axis=[2]) # to BGR return image return training_mapper if isTrain else validation_mapper """ ====== Model & Evaluation ======= """ def eval_on_ILSVRC12(model, sessinit, dataflow): pred_config = PredictConfig( model=model, session_init=sessinit, input_names=['input', 'label'], output_names=['wrong-top1', 'wrong-top5'] ) acc1, acc5 = RatioCounter(), RatioCounter() # This does not have a visible improvement over naive predictor, # but will have an improvement if image_dtype is set to float32. pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0')) for _ in tqdm.trange(dataflow.size()): top1, top5 = pred() batch_size = top1.shape[0] acc1.feed(top1.sum(), batch_size) acc5.feed(top5.sum(), batch_size) print("Top1 Error: {}".format(acc1.ratio)) print("Top5 Error: {}".format(acc5.ratio)) class ImageNetModel(ModelDesc): image_shape = 224 """ uint8 instead of float32 is used as input type to reduce copy overhead. It might hurt the performance a liiiitle bit. The pretrained models were trained with float32. """ image_dtype = tf.uint8 """ Either 'NCHW' or 'NHWC' """ data_format = 'NCHW' """ Whether the image is BGR or RGB. If using DataFlow, then it should be BGR. """ image_bgr = True weight_decay = 1e-4 """ To apply on normalization parameters, use '.*/W|.*/gamma|.*/beta' """ weight_decay_pattern = '.*/W' """ Scale the loss, for whatever reasons (e.g., gradient averaging, fp16 training, etc) """ loss_scale = 1. """ Label smoothing (See tf.losses.softmax_cross_entropy) """ label_smoothing = 0. def inputs(self): return [tf.TensorSpec([None, self.image_shape, self.image_shape, 3], self.image_dtype, 'input'), tf.TensorSpec([None], tf.int32, 'label')] def build_graph(self, image, label): image = self.image_preprocess(image) assert self.data_format in ['NCHW', 'NHWC'] if self.data_format == 'NCHW': image = tf.transpose(image, [0, 3, 1, 2]) logits = self.get_logits(image, label) loss = ImageNetModel.compute_loss_and_error( logits, label, label_smoothing=self.label_smoothing) if self.weight_decay > 0: wd_loss = regularize_cost(self.weight_decay_pattern, tf.contrib.layers.l2_regularizer(self.weight_decay), name='l2_regularize_loss') add_moving_summary(loss, wd_loss) total_cost = tf.add_n([loss, wd_loss], name='cost') else: total_cost = tf.identity(loss, name='cost') add_moving_summary(total_cost) if self.loss_scale != 1.: logger.info("Scaling the total loss by {} ...".format(self.loss_scale)) return total_cost * self.loss_scale else: return total_cost @abstractmethod def get_logits(self, image, label): """ Args: image: 4D tensor of ``self.input_shape`` in ``self.data_format`` Returns: Nx#class logits """ def optimizer(self): lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False) tf.summary.scalar('learning_rate-summary', lr) return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True) def image_preprocess(self, image): with tf.name_scope('image_preprocess'): if image.dtype.base_dtype != tf.float32: image = tf.cast(image, tf.float32) mean = [0.485, 0.456, 0.406] # rgb std = [0.229, 0.224, 0.225] if self.image_bgr: mean = mean[::-1] std = std[::-1] image_mean = tf.constant(mean, dtype=tf.float32) * 255. image_std = tf.constant(std, dtype=tf.float32) * 255. image = (image - image_mean) / image_std return image @staticmethod def compute_loss_and_error(logits, label, label_smoothing=0.): if label_smoothing != 0.: nclass = logits.shape[-1] label = tf.one_hot(label, nclass) if label.shape.ndims == 1 else label if label.shape.ndims == 1: loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) else: loss = tf.losses.softmax_cross_entropy( label, logits, label_smoothing=label_smoothing, reduction=tf.losses.Reduction.NONE) loss = tf.reduce_mean(loss, name='xentropy-loss') def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'): with tf.name_scope('prediction_incorrect'): x = tf.logical_not(tf.nn.in_top_k(logits, label, topk)) return tf.cast(x, tf.float32, name=name) wrong = prediction_incorrect(logits, label, 1, name='wrong-top1') add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1')) wrong = prediction_incorrect(logits, label, 5, name='wrong-top5') add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5')) return loss if __name__ == '__main__': import argparse from tensorpack.dataflow import TestDataSpeed from tensorpack.tfutils import get_default_sess_config parser = argparse.ArgumentParser() parser.add_argument('--data', required=True) parser.add_argument('--batch', type=int, default=32) parser.add_argument('--aug', choices=['train', 'val'], default='val') parser.add_argument('--symbolic', action='store_true') args = parser.parse_args() if not args.symbolic: augs = fbresnet_augmentor(args.aug == 'train') df = get_imagenet_dataflow( args.data, 'train', args.batch, augs) # For val augmentor, Should get >100 it/s (i.e. 3k im/s) here on a decent E5 server. TestDataSpeed(df).start() else: assert args.aug == 'train' data = get_imagenet_tfdata(args.data, 'train', args.batch) itr = data.make_initializable_iterator() dp = itr.get_next() dpop = tf.group(*dp) with tf.Session(config=get_default_sess_config()) as sess: sess.run(itr.initializer) for _ in tqdm.trange(200): sess.run(dpop) for _ in tqdm.trange(5000, smoothing=0.1): sess.run(dpop)
[ "tensorflow.cond", "tensorflow.get_variable", "tensorflow.zeros", "numpy.asarray", "tensorflow.stack", "tensorflow.cast", "tensorflow.equal", "tensorflow.image.resize_bicubic", "tensorflow.image.decode_and_crop_jpeg", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.add_n", "tensorflow.image.random_flip_left_right", "tensorflow.greater", "tensorflow.read_file", "tensorflow.losses.softmax_cross_entropy", "tensorflow.train.MomentumOptimizer", "tensorflow.name_scope", "tensorflow.nn.in_top_k", "tensorflow.reverse", "tensorflow.unstack", "tensorflow.shape", "tensorflow.identity", "tensorflow.image.extract_jpeg_shape", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.one_hot", "numpy.array", "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.slice", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.reshape", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.TensorSpec", "tensorflow.random_normal" ]
ResNet/imagenet_utils.py
[(158, 'tensorflow.constant', 'tf.constant', (['[k[0] for k in imglist]'], {'name': '"""filenames"""'}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.constant', 'tf.constant', (['[k[1] for k in imglist]'], {'dtype': 'tf.int32', 'name': '"""labels"""'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(filenames, labels)'], {}), True, 'import tensorflow as tf\n'), (268, 'tensorpack.predict.PredictConfig', 'PredictConfig', ([], {'model': 'model', 'session_init': 'sessinit', 'input_names': "['input', 'label']", 'output_names': "['wrong-top1', 'wrong-top5']"}), False, 'from tensorpack.predict import FeedfreePredictor, PredictConfig\n'), (416, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (87, 'tensorpack.dataflow.dataset.ILSVRC12', 'dataset.ILSVRC12', (['datadir', 'name'], {'shuffle': '(True)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (88, 'tensorpack.dataflow.AugmentImageComponent', 'AugmentImageComponent', (['ds', 'augmentors'], {'copy': '(False)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (91, 'tensorpack.dataflow.PrefetchDataZMQ', 'PrefetchDataZMQ', (['ds', 'parallel'], {}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (92, 'tensorpack.dataflow.BatchData', 'BatchData', (['ds', 'batch_size'], {'remainder': '(False)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (94, 'tensorpack.dataflow.dataset.ILSVRC12Files', 'dataset.ILSVRC12Files', (['datadir', 'name'], {'shuffle': '(False)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (95, 'tensorpack.dataflow.imgaug.AugmentorList', 'imgaug.AugmentorList', (['augmentors'], {}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (102, 'tensorpack.dataflow.MultiThreadMapData', 'MultiThreadMapData', (['ds', 'parallel', 'mapf'], {'buffer_size': '(2000)', 'strict': '(True)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (103, 'tensorpack.dataflow.BatchData', 'BatchData', (['ds', 'batch_size'], {'remainder': '(True)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (104, 'tensorpack.dataflow.PrefetchDataZMQ', 'PrefetchDataZMQ', (['ds', '(1)'], {}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (129, 'os.path.join', 'os.path.join', (['dir', 'name'], {}), False, 'import os\n'), (130, 'tensorpack.dataflow.dataset.ILSVRCMeta', 'dataset.ILSVRCMeta', ([], {}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (183, 'tensorflow.image.resize_bicubic', 'tf.image.resize_bicubic', (['[image]', 'shape'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.cast', 'tf.cast', (['image_shape', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.greater', 'tf.greater', (['image_shape[0]', 'image_shape[1]'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.slice', 'tf.slice', (['image', '[offset_height, offset_width, 0]', '[size, size, -1]'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.reverse', 'tf.reverse', (['image'], {'axis': '[2]'}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.image.extract_jpeg_shape', 'tf.image.extract_jpeg_shape', (['byte'], {}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.cond', 'tf.cond', (['is_bad', 'bad', 'good'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.reverse', 'tf.reverse', (['image'], {'axis': '[2]'}), True, 'import tensorflow as tf\n'), (274, 'tensorpack.utils.stats.RatioCounter', 'RatioCounter', ([], {}), False, 'from tensorpack.utils.stats import RatioCounter\n'), (274, 'tensorpack.utils.stats.RatioCounter', 'RatioCounter', ([], {}), False, 'from tensorpack.utils.stats import RatioCounter\n'), (367, 'tensorflow.get_variable', 'tf.get_variable', (['"""learning_rate"""'], {'initializer': '(0.1)', 'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate-summary"""', 'lr'], {}), True, 'import tensorflow as tf\n'), (369, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['lr', '(0.9)'], {'use_nesterov': '(True)'}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {'name': '"""xentropy-loss"""'}), True, 'import tensorflow as tf\n'), (435, 'tensorflow.group', 'tf.group', (['*dp'], {}), True, 'import tensorflow as tf\n'), (37, 'tensorpack.dataflow.imgaug.GoogleNetRandomCropAndResize', 'imgaug.GoogleNetRandomCropAndResize', ([], {'interp': 'interpolation'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (55, 'tensorpack.dataflow.imgaug.Flip', 'imgaug.Flip', ([], {'horiz': '(True)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (59, 'tensorpack.dataflow.imgaug.ResizeShortestEdge', 'imgaug.ResizeShortestEdge', (['(256)'], {'interp': 'interpolation'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (60, 'tensorpack.dataflow.imgaug.CenterCrop', 'imgaug.CenterCrop', (['(224, 224)'], {}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (90, 'tensorpack.utils.logger.warn', 'logger.warn', (['"""DataFlow may become the bottleneck when too few processes are used."""'], {}), False, 'from tensorpack.utils import logger\n'), (99, 'cv2.imread', 'cv2.imread', (['fname', 'cv2.IMREAD_COLOR'], {}), False, 'import cv2\n'), (133, 'tensorpack.dataflow.dataset.ILSVRCMeta.guess_dir_structure', 'dataset.ILSVRCMeta.guess_dir_structure', (['dir'], {}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (144, 'os.path.join', 'os.path.join', (['dir', 'fname'], {}), False, 'import os\n'), (196, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[3]', 'stddev': 'std'}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.reshape', 'tf.reshape', (['v', '[3, 1]'], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.reshape', 'tf.reshape', (['byte'], {'shape': '[]'}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.shape', 'tf.shape', (['image'], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.unstack', 'tf.unstack', (['bbox_begin'], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.unstack', 'tf.unstack', (['bbox_size'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.stack', 'tf.stack', (['[offset_y, offset_x, target_height, target_width]'], {}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.image.decode_and_crop_jpeg', 'tf.image.decode_and_crop_jpeg', (['byte', 'crop_window'], {'channels': '(3)'}), True, 'import tensorflow as tf\n'), (278, 'tensorpack.input_source.QueueInput', 'QueueInput', (['dataflow'], {}), False, 'from tensorpack.input_source import QueueInput, StagingInput\n'), (327, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None, self.image_shape, self.image_shape, 3]', 'self.image_dtype', '"""input"""'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['[None]', 'tf.int32', '"""label"""'], {}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.transpose', 'tf.transpose', (['image', '[0, 3, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (344, 'tensorpack.tfutils.summary.add_moving_summary', 'add_moving_summary', (['loss', 'wd_loss'], {}), False, 'from tensorpack.tfutils.summary import add_moving_summary\n'), (345, 'tensorflow.add_n', 'tf.add_n', (['[loss, wd_loss]'], {'name': '"""cost"""'}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.identity', 'tf.identity', (['loss'], {'name': '"""cost"""'}), True, 'import tensorflow as tf\n'), (348, 'tensorpack.tfutils.summary.add_moving_summary', 'add_moving_summary', (['total_cost'], {}), False, 'from tensorpack.tfutils.summary import add_moving_summary\n'), (372, 'tensorflow.name_scope', 'tf.name_scope', (['"""image_preprocess"""'], {}), True, 'import tensorflow as tf\n'), (392, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'label'}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['label', 'logits'], {'label_smoothing': 'label_smoothing', 'reduction': 'tf.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {'name': 'name'}), True, 'import tensorflow as tf\n'), (405, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['wrong'], {'name': '"""train-error-top1"""'}), True, 'import tensorflow as tf\n'), (408, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['wrong'], {'name': '"""train-error-top5"""'}), True, 'import tensorflow as tf\n'), (438, 'tqdm.trange', 'tqdm.trange', (['(200)'], {}), False, 'import tqdm\n'), (440, 'tqdm.trange', 'tqdm.trange', (['(5000)'], {'smoothing': '(0.1)'}), False, 'import tqdm\n'), (84, 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), False, 'import multiprocessing\n'), (154, 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), False, 'import multiprocessing\n'), (184, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ret', '(0)', '(255)'], {}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.cast', 'tf.cast', (['[shape[0] / shape[1] * size, size]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.cast', 'tf.cast', (['[size, shape[1] / shape[0] * size]', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.reshape', 'tf.reshape', (['inc', '[3]'], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[0, 0, 4]'}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.reshape', 'tf.reshape', (['byte'], {'shape': '[]'}), True, 'import tensorflow as tf\n'), (252, 'numpy.array', 'np.array', (['[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814], [-0.5836, -0.6948, \n 0.4203]]'], {'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (342, 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['self.weight_decay'], {}), True, 'import tensorflow as tf\n'), (374, 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.constant', 'tf.constant', (['mean'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (381, 'tensorflow.constant', 'tf.constant', (['std'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (389, 'tensorflow.one_hot', 'tf.one_hot', (['label', 'nclass'], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.name_scope', 'tf.name_scope', (['"""prediction_incorrect"""'], {}), True, 'import tensorflow as tf\n'), (428, 'tensorpack.dataflow.TestDataSpeed', 'TestDataSpeed', (['df'], {}), False, 'from tensorpack.dataflow import TestDataSpeed\n'), (42, 'tensorpack.dataflow.imgaug.BrightnessScale', 'imgaug.BrightnessScale', (['(0.6, 1.4)'], {'clip': '(False)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (43, 'tensorpack.dataflow.imgaug.Contrast', 'imgaug.Contrast', (['(0.6, 1.4)'], {'rgb': '(False)', 'clip': '(False)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (44, 'tensorpack.dataflow.imgaug.Saturation', 'imgaug.Saturation', (['(0.4)'], {'rgb': '(False)'}), False, 'from tensorpack.dataflow import AugmentImageComponent, BatchData, MultiThreadMapData, PrefetchDataZMQ, dataset, imgaug\n'), (229, 'tensorflow.equal', 'tf.equal', (['bbox_size', 'jpeg_shape'], {}), True, 'import tensorflow as tf\n'), (251, 'numpy.array', 'np.array', (['[0.2175, 0.0188, 0.0045]'], {'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (401, 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'label', 'topk'], {}), True, 'import tensorflow as tf\n'), (436, 'tensorpack.tfutils.get_default_sess_config', 'get_default_sess_config', ([], {}), False, 'from tensorpack.tfutils import get_default_sess_config\n'), (168, 'tensorflow.read_file', 'tf.read_file', (['fname'], {}), True, 'import tensorflow as tf\n'), (47, 'numpy.asarray', 'np.asarray', (['[0.2175, 0.0188, 0.0045][::-1]'], {}), True, 'import numpy as np\n'), (49, 'numpy.array', 'np.array', (['[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.814], [-0.5836, -0.6948, \n 0.4203]]'], {'dtype': '"""float32"""'}), True, 'import numpy as np\n')]
Rita-ritally/Tacotron2-CN-TTS
f67f060750fe12a014e35857d6ff6e279d41d68a
from synthesizer.utils.symbols import symbols from synthesizer.utils.text import sequence_to_text from synthesizer.hparams import hparams_debug_string from synthesizer.feeder import Feeder from synthesizer.models import create_model from synthesizer.utils import ValueWindow, plot from synthesizer import infolog, audio from datetime import datetime from tqdm import tqdm import tensorflow as tf import numpy as np import traceback import time import os log = infolog.log def add_embedding_stats(summary_writer, embedding_names, paths_to_meta, checkpoint_path): # Create tensorboard projector config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig() config.model_checkpoint_path = checkpoint_path for embedding_name, path_to_meta in zip(embedding_names, paths_to_meta): # Initialize config embedding = config.embeddings.add() # Specifiy the embedding variable and the metadata embedding.tensor_name = embedding_name embedding.metadata_path = path_to_meta # Project the embeddings to space dimensions for visualization tf.contrib.tensorboard.plugins.projector.visualize_embeddings(summary_writer, config) def add_train_stats(model, hparams): with tf.variable_scope("stats") as scope: for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_mel_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_mel_targets[i]) tf.summary.scalar("before_loss", model.before_loss) tf.summary.scalar("after_loss", model.after_loss) if hparams.predict_linear: tf.summary.scalar("linear_loss", model.linear_loss) for i in range(hparams.tacotron_num_gpus): tf.summary.histogram("mel_outputs %d" % i, model.tower_linear_outputs[i]) tf.summary.histogram("mel_targets %d" % i, model.tower_linear_targets[i]) tf.summary.scalar("regularization_loss", model.regularization_loss) tf.summary.scalar("stop_token_loss", model.stop_token_loss) tf.summary.scalar("loss", model.loss) tf.summary.scalar("learning_rate", model.learning_rate) # Control learning rate decay speed if hparams.tacotron_teacher_forcing_mode == "scheduled": tf.summary.scalar("teacher_forcing_ratio", model.ratio) # Control teacher forcing # ratio decay when mode = "scheduled" gradient_norms = [tf.norm(grad) for grad in model.gradients] tf.summary.histogram("gradient_norm", gradient_norms) tf.summary.scalar("max_gradient_norm", tf.reduce_max(gradient_norms)) # visualize # gradients (in case of explosion) return tf.summary.merge_all() def add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, loss): values = [ tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_before_loss", simple_value=before_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_after_loss", simple_value=after_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/stop_token_loss", simple_value=stop_token_loss), tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_loss", simple_value=loss), ] if linear_loss is not None: values.append(tf.Summary.Value(tag="Tacotron_eval_model/eval_stats/eval_linear_loss", simple_value=linear_loss)) test_summary = tf.Summary(value=values) summary_writer.add_summary(test_summary, step) def time_string(): return datetime.now().strftime("%Y-%m-%d %H:%M") def model_train_mode(args, feeder, hparams, global_step): with tf.variable_scope("Tacotron_model", reuse=tf.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.inputs, feeder.input_lengths, feeder.speaker_embeddings, feeder.mel_targets, feeder.token_targets, targets_lengths=feeder.targets_lengths, global_step=global_step, is_training=True, split_infos=feeder.split_infos) model.add_loss() model.add_optimizer(global_step) stats = add_train_stats(model, hparams) return model, stats def model_test_mode(args, feeder, hparams, global_step): with tf.variable_scope("Tacotron_model", reuse=tf.AUTO_REUSE) as scope: model = create_model("Tacotron", hparams) model.initialize(feeder.eval_inputs, feeder.eval_input_lengths, feeder.eval_speaker_embeddings, feeder.eval_mel_targets, feeder.eval_token_targets, targets_lengths=feeder.eval_targets_lengths, global_step=global_step, is_training=False, is_evaluating=True, split_infos=feeder.eval_split_infos) model.add_loss() return model def train(log_dir, args, hparams): save_dir = os.path.join(log_dir, "taco_pretrained") plot_dir = os.path.join(log_dir, "plots") wav_dir = os.path.join(log_dir, "wavs") mel_dir = os.path.join(log_dir, "mel-spectrograms") eval_dir = os.path.join(log_dir, "eval-dir") eval_plot_dir = os.path.join(eval_dir, "plots") eval_wav_dir = os.path.join(eval_dir, "wavs") tensorboard_dir = os.path.join(log_dir, "tacotron_events") meta_folder = os.path.join(log_dir, "metas") os.makedirs(save_dir, exist_ok=True) os.makedirs(plot_dir, exist_ok=True) os.makedirs(wav_dir, exist_ok=True) os.makedirs(mel_dir, exist_ok=True) os.makedirs(eval_dir, exist_ok=True) os.makedirs(eval_plot_dir, exist_ok=True) os.makedirs(eval_wav_dir, exist_ok=True) os.makedirs(tensorboard_dir, exist_ok=True) os.makedirs(meta_folder, exist_ok=True) checkpoint_fpath = os.path.join(save_dir, "tacotron_model.ckpt") metadat_fpath = os.path.join(args.synthesizer_root, "train.txt") log("Checkpoint path: {}".format(checkpoint_fpath)) log("Loading training data from: {}".format(metadat_fpath)) log("Using model: Tacotron") log(hparams_debug_string()) # Start by setting a seed for repeatability tf.set_random_seed(hparams.tacotron_random_seed) # Set up data feeder coord = tf.train.Coordinator() with tf.variable_scope("datafeeder") as scope: feeder = Feeder(coord, metadat_fpath, hparams) # Set up model: global_step = tf.Variable(0, name="global_step", trainable=False) model, stats = model_train_mode(args, feeder, hparams, global_step) eval_model = model_test_mode(args, feeder, hparams, global_step) # Embeddings metadata char_embedding_meta = os.path.join(meta_folder, "CharacterEmbeddings.tsv") if not os.path.isfile(char_embedding_meta): with open(char_embedding_meta, "w", encoding="utf-8") as f: for symbol in symbols: if symbol == " ": symbol = "\\s" # For visual purposes, swap space with \s f.write("{}\n".format(symbol)) char_embedding_meta = char_embedding_meta.replace(log_dir, "..") # Book keeping step = 0 time_window = ValueWindow(100) loss_window = ValueWindow(100) saver = tf.train.Saver(max_to_keep=5) log("Tacotron training set to a maximum of {} steps".format(args.tacotron_train_steps)) # Memory allocation on the GPU as needed config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True # Train with tf.Session(config=config) as sess: try: summary_writer = tf.summary.FileWriter(tensorboard_dir, sess.graph) sess.run(tf.global_variables_initializer()) # saved model restoring if args.restore: # Restore saved model if the user requested it, default = True try: checkpoint_state = tf.train.get_checkpoint_state(save_dir) if checkpoint_state and checkpoint_state.model_checkpoint_path: log("Loading checkpoint {}".format(checkpoint_state.model_checkpoint_path), slack=True) saver.restore(sess, checkpoint_state.model_checkpoint_path) else: log("No model to load at {}".format(save_dir), slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) except tf.errors.OutOfRangeError as e: log("Cannot restore checkpoint: {}".format(e), slack=True) else: log("Starting new training!", slack=True) saver.save(sess, checkpoint_fpath, global_step=global_step) # initializing feeder feeder.start_threads(sess) # Training loop while not coord.should_stop() and step < args.tacotron_train_steps: start_time = time.time() step, loss, opt = sess.run([global_step, model.loss, model.optimize]) time_window.append(time.time() - start_time) loss_window.append(loss) message = "Step {:7d} [{:.3f} sec/step, loss={:.5f}, avg_loss={:.5f}]".format( step, time_window.average, loss, loss_window.average) log(message, end="\r", slack=(step % args.checkpoint_interval == 0)) print(message) if loss > 100 or np.isnan(loss): log("Loss exploded to {:.5f} at step {}".format(loss, step)) raise Exception("Loss exploded") if step % args.summary_interval == 0: log("\nWriting summary at step {}".format(step)) summary_writer.add_summary(sess.run(stats), step) if step % args.eval_interval == 0: # Run eval and save eval stats log("\nRunning evaluation at step {}".format(step)) eval_losses = [] before_losses = [] after_losses = [] stop_token_losses = [] linear_losses = [] linear_loss = None if hparams.predict_linear: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, linear_loss, mel_p, \ mel_t, t_len, align, lin_p, lin_t = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_linear_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0], eval_model.tower_linear_outputs[0][0], eval_model.tower_linear_targets[0][0], ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) linear_losses.append(linear_loss) linear_loss = sum(linear_losses) / len(linear_losses) wav = audio.inv_linear_spectrogram(lin_p.T, hparams) audio.save_wav(wav, os.path.join(eval_wav_dir, "step-{}-eval-wave-from-linear.wav".format( step)), sr=hparams.sample_rate) else: for i in tqdm(range(feeder.test_steps)): eloss, before_loss, after_loss, stop_token_loss, mel_p, mel_t, t_len, \ align = sess.run( [ eval_model.tower_loss[0], eval_model.tower_before_loss[0], eval_model.tower_after_loss[0], eval_model.tower_stop_token_loss[0], eval_model.tower_mel_outputs[0][0], eval_model.tower_mel_targets[0][0], eval_model.tower_targets_lengths[0][0], eval_model.tower_alignments[0][0] ]) eval_losses.append(eloss) before_losses.append(before_loss) after_losses.append(after_loss) stop_token_losses.append(stop_token_loss) eval_loss = sum(eval_losses) / len(eval_losses) before_loss = sum(before_losses) / len(before_losses) after_loss = sum(after_losses) / len(after_losses) stop_token_loss = sum(stop_token_losses) / len(stop_token_losses) log("Saving eval log to {}..".format(eval_dir)) # Save some log to monitor model improvement on same unseen sequence wav = audio.inv_mel_spectrogram(mel_p.T, hparams) audio.save_wav(wav, os.path.join(eval_wav_dir, "step-{}-eval-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) plot.plot_alignment(align, os.path.join(eval_plot_dir, "step-{}-eval-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, eval_loss), max_len=t_len // hparams.outputs_per_step) plot.plot_spectrogram(mel_p, os.path.join(eval_plot_dir, "step-{" "}-eval-mel-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, eval_loss), target_spectrogram=mel_t, max_len=t_len) if hparams.predict_linear: plot.plot_spectrogram(lin_p, os.path.join(eval_plot_dir, "step-{}-eval-linear-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format( "Tacotron", time_string(), step, eval_loss), target_spectrogram=lin_t, max_len=t_len, auto_aspect=True) log("Eval loss for global step {}: {:.3f}".format(step, eval_loss)) log("Writing eval summary!") add_eval_stats(summary_writer, step, linear_loss, before_loss, after_loss, stop_token_loss, eval_loss) if step % args.checkpoint_interval == 0 or step == args.tacotron_train_steps or \ step == 300: # Save model and current global step saver.save(sess, checkpoint_fpath, global_step=global_step) log("\nSaving alignment, Mel-Spectrograms and griffin-lim inverted waveform..") input_seq, mel_prediction, alignment, target, target_length = sess.run([ model.tower_inputs[0][0], model.tower_mel_outputs[0][0], model.tower_alignments[0][0], model.tower_mel_targets[0][0], model.tower_targets_lengths[0][0], ]) # save predicted mel spectrogram to disk (debug) mel_filename = "mel-prediction-step-{}.npy".format(step) np.save(os.path.join(mel_dir, mel_filename), mel_prediction.T, allow_pickle=False) # save griffin lim inverted wav for debug (mel -> wav) wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams) audio.save_wav(wav, os.path.join(wav_dir, "step-{}-wave-from-mel.wav".format(step)), sr=hparams.sample_rate) # save alignment plot to disk (control purposes) plot.plot_alignment(alignment, os.path.join(plot_dir, "step-{}-align.png".format(step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss), max_len=target_length // hparams.outputs_per_step) # save real and predicted mel-spectrogram plot to disk (control purposes) plot.plot_spectrogram(mel_prediction, os.path.join(plot_dir, "step-{}-mel-spectrogram.png".format( step)), title="{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss), target_spectrogram=target, max_len=target_length) log("Input at step {}: {}".format(step, sequence_to_text(input_seq))) if step % args.embedding_interval == 0 or step == args.tacotron_train_steps or step == 1: # Get current checkpoint state checkpoint_state = tf.train.get_checkpoint_state(save_dir) # Update Projector log("\nSaving Model Character Embeddings visualization..") add_embedding_stats(summary_writer, [model.embedding_table.name], [char_embedding_meta], checkpoint_state.model_checkpoint_path) log("Tacotron Character embeddings have been updated on tensorboard!") log("Tacotron training complete after {} global steps!".format( args.tacotron_train_steps), slack=True) return save_dir except Exception as e: log("Exiting due to exception: {}".format(e), slack=True) traceback.print_exc() coord.request_stop(e) def tacotron_train(args, log_dir, hparams): return train(log_dir, args, hparams)
[ "tensorflow.summary.scalar", "tensorflow.Variable", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.Summary", "tensorflow.norm", "numpy.isnan", "tensorflow.train.Coordinator", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.set_random_seed", "tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig", "tensorflow.summary.histogram", "tensorflow.train.get_checkpoint_state", "tensorflow.reduce_max", "tensorflow.summary.FileWriter", "tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings", "tensorflow.Summary.Value", "tensorflow.variable_scope" ]
synthesizer/train.py
[(21, 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'tf.contrib.tensorboard.plugins.projector.ProjectorConfig', ([], {}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'tf.contrib.tensorboard.plugins.projector.visualize_embeddings', (['summary_writer', 'config'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.Summary', 'tf.Summary', ([], {'value': 'values'}), True, 'import tensorflow as tf\n'), (111, 'os.path.join', 'os.path.join', (['log_dir', '"""taco_pretrained"""'], {}), False, 'import os\n'), (112, 'os.path.join', 'os.path.join', (['log_dir', '"""plots"""'], {}), False, 'import os\n'), (113, 'os.path.join', 'os.path.join', (['log_dir', '"""wavs"""'], {}), False, 'import os\n'), (114, 'os.path.join', 'os.path.join', (['log_dir', '"""mel-spectrograms"""'], {}), False, 'import os\n'), (115, 'os.path.join', 'os.path.join', (['log_dir', '"""eval-dir"""'], {}), False, 'import os\n'), (116, 'os.path.join', 'os.path.join', (['eval_dir', '"""plots"""'], {}), False, 'import os\n'), (117, 'os.path.join', 'os.path.join', (['eval_dir', '"""wavs"""'], {}), False, 'import os\n'), (118, 'os.path.join', 'os.path.join', (['log_dir', '"""tacotron_events"""'], {}), False, 'import os\n'), (119, 'os.path.join', 'os.path.join', (['log_dir', '"""metas"""'], {}), False, 'import os\n'), (120, 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (121, 'os.makedirs', 'os.makedirs', (['plot_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (122, 'os.makedirs', 'os.makedirs', (['wav_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (123, 'os.makedirs', 'os.makedirs', (['mel_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (124, 'os.makedirs', 'os.makedirs', (['eval_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (125, 'os.makedirs', 'os.makedirs', (['eval_plot_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (126, 'os.makedirs', 'os.makedirs', (['eval_wav_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (127, 'os.makedirs', 'os.makedirs', (['tensorboard_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (128, 'os.makedirs', 'os.makedirs', (['meta_folder'], {'exist_ok': '(True)'}), False, 'import os\n'), (131, 'os.path.join', 'os.path.join', (['save_dir', '"""tacotron_model.ckpt"""'], {}), False, 'import os\n'), (132, 'os.path.join', 'os.path.join', (['args.synthesizer_root', '"""train.txt"""'], {}), False, 'import os\n'), (140, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['hparams.tacotron_random_seed'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (153, 'os.path.join', 'os.path.join', (['meta_folder', '"""CharacterEmbeddings.tsv"""'], {}), False, 'import os\n'), (166, 'synthesizer.utils.ValueWindow', 'ValueWindow', (['(100)'], {}), False, 'from synthesizer.utils import ValueWindow, plot\n'), (167, 'synthesizer.utils.ValueWindow', 'ValueWindow', (['(100)'], {}), False, 'from synthesizer.utils import ValueWindow, plot\n'), (168, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(5)'}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""stats"""'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""before_loss"""', 'model.before_loss'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""after_loss"""', 'model.after_loss'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""regularization_loss"""', 'model.regularization_loss'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stop_token_loss"""', 'model.stop_token_loss'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'model.loss'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'model.learning_rate'], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""gradient_norm"""', 'gradient_norms'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Tacotron_eval_model/eval_stats/eval_before_loss"""', 'simple_value': 'before_loss'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Tacotron_eval_model/eval_stats/eval_after_loss"""', 'simple_value': 'after_loss'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Tacotron_eval_model/eval_stats/stop_token_loss"""', 'simple_value': 'stop_token_loss'}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Tacotron_eval_model/eval_stats/eval_loss"""', 'simple_value': 'loss'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Tacotron_model"""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (87, 'synthesizer.models.create_model', 'create_model', (['"""Tacotron"""', 'hparams'], {}), False, 'from synthesizer.models import create_model\n'), (99, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Tacotron_model"""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (100, 'synthesizer.models.create_model', 'create_model', (['"""Tacotron"""', 'hparams'], {}), False, 'from synthesizer.models import create_model\n'), (137, 'synthesizer.hparams.hparams_debug_string', 'hparams_debug_string', ([], {}), False, 'from synthesizer.hparams import hparams_debug_string\n'), (144, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""datafeeder"""'], {}), True, 'import tensorflow as tf\n'), (145, 'synthesizer.feeder.Feeder', 'Feeder', (['coord', 'metadat_fpath', 'hparams'], {}), False, 'from synthesizer.feeder import Feeder\n'), (154, 'os.path.isfile', 'os.path.isfile', (['char_embedding_meta'], {}), False, 'import os\n'), (178, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('mel_outputs %d' % i)", 'model.tower_mel_outputs[i]'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('mel_targets %d' % i)", 'model.tower_mel_targets[i]'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""linear_loss"""', 'model.linear_loss'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""teacher_forcing_ratio"""', 'model.ratio'], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.norm', 'tf.norm', (['grad'], {}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.reduce_max', 'tf.reduce_max', (['gradient_norms'], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Tacotron_eval_model/eval_stats/eval_linear_loss"""', 'simple_value': 'linear_loss'}), True, 'import tensorflow as tf\n'), (82, 'datetime.datetime.now', 'datetime.now', ([], {}), False, 'from datetime import datetime\n'), (180, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['tensorboard_dir', 'sess.graph'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('mel_outputs %d' % i)", 'model.tower_linear_outputs[i]'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('mel_targets %d' % i)", 'model.tower_linear_targets[i]'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (210, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (388, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (188, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['save_dir'], {}), True, 'import tensorflow as tf\n'), (219, 'numpy.isnan', 'np.isnan', (['loss'], {}), True, 'import numpy as np\n'), (291, 'synthesizer.audio.inv_mel_spectrogram', 'audio.inv_mel_spectrogram', (['mel_p.T', 'hparams'], {}), False, 'from synthesizer import infolog, audio\n'), (348, 'synthesizer.audio.inv_mel_spectrogram', 'audio.inv_mel_spectrogram', (['mel_prediction.T', 'hparams'], {}), False, 'from synthesizer import infolog, audio\n'), (373, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['save_dir'], {}), True, 'import tensorflow as tf\n'), (212, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (261, 'synthesizer.audio.inv_linear_spectrogram', 'audio.inv_linear_spectrogram', (['lin_p.T', 'hparams'], {}), False, 'from synthesizer import infolog, audio\n'), (344, 'os.path.join', 'os.path.join', (['mel_dir', 'mel_filename'], {}), False, 'import os\n'), (369, 'synthesizer.utils.text.sequence_to_text', 'sequence_to_text', (['input_seq'], {}), False, 'from synthesizer.utils.text import sequence_to_text\n')]
yscoder-github/Language_Intelligent_Competition
23325173a6e6b228da575e8be55a538ce1dbcae6
# -*- coding: utf-8 -*- import json import logging import os import time import tensorflow as tf from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding from optimizer import AdamWOptimizer from tensorflow.python.ops import array_ops from utils.dureader_eval import compute_bleu_rouge from utils.dureader_eval import normalize class Model(object): def __init__(self, vocab, config, demo=False): # logging self.logger = logging.getLogger("QANet") self.config = config self.demo = demo # basic config self.optim_type = config.optim self.learning_rate = config.learning_rate self.weight_decay = config.weight_decay self.use_dropout = config.dropout < 1 # length limit if not self.demo: self.max_p_num = config.max_p_num self.logger.info("numbers of passages %s" % self.max_p_num) else: self.max_p_num = 1 self.max_p_len = config.max_p_len self.max_q_len = config.max_q_len self.max_a_len = config.max_a_len # the vocab self.vocab = vocab # session info sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = False self.sess = tf.Session(config=sess_config) self._build_graph() # save info self.saver = tf.train.Saver() # initialize the model self.sess.run(tf.global_variables_initializer()) def _build_graph(self): """ Builds the computation graph with Tensorflow """ start_t = time.time() self._setup_placeholders() self._embed() self._encode() self._fuse() self._decode() self._compute_loss() self._create_train_op() self.logger.info('Time to build graph: {} s'.format(time.time() - start_t)) param_num = total_params(tf.trainable_variables()) self.logger.info('There are {} parameters in the model'.format(param_num)) """ :description: Placeholders """ def _setup_placeholders(self): if self.demo: self.c = tf.placeholder(tf.int32, [None, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [None, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [None, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [None, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [None], "answer_label1") self.end_label = tf.placeholder(tf.int32, [None], "answer_label2") else: self.c = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len], "context") self.q = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len], "question") self.ch = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_p_len, self.config.max_ch_len], "context_char") self.qh = tf.placeholder(tf.int32, [self.config.batch_size * self.max_p_num, self.config.max_q_len, self.config.max_ch_len], "question_char") self.start_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label1") self.end_label = tf.placeholder(tf.int32, [self.config.batch_size], "answer_label2") self.position_emb = position_embedding(self.c, 2 * self.config.hidden_size) self.c_mask = tf.cast(self.c, tf.bool) # index 0 is padding symbol N x self.max_p_num, max_p_len self.q_mask = tf.cast(self.q, tf.bool) self.c_len = tf.reduce_sum(tf.cast(self.c_mask, tf.int32), axis=1) self.q_len = tf.reduce_sum(tf.cast(self.q_mask, tf.int32), axis=1) self.dropout = tf.placeholder(tf.float32, name="dropout") self.global_step = tf.Variable(0, name="global_step", trainable=False) """ :descrition: The embedding layer, question and passage share embeddings """ def _embed(self): with tf.variable_scope('word_char_embedding'): if self.config.fix_pretrained_vector: self.pretrained_word_mat = tf.get_variable("word_emb_mat", [self.vocab.word_size() - 2, self.vocab.word_embed_dim], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.word_embeddings[2:], dtype=tf.float32), trainable=False) self.word_pad_unk_mat = tf.get_variable("word_unk_pad", [2, self.pretrained_word_mat.get_shape()[1]], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.word_embeddings[:2], dtype=tf.float32), trainable=True) self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0) self.pretrained_char_mat = tf.get_variable("char_emb_mat", [self.vocab.char_size() - 2, self.vocab.char_embed_dim], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.char_embeddings[2:], dtype=tf.float32), trainable=False) self.char_pad_unk_mat = tf.get_variable("char_unk_pad", [2, self.pretrained_char_mat.get_shape()[1]], dtype=tf.float32, initializer=tf.constant_initializer( self.vocab.char_embeddings[:2], dtype=tf.float32), trainable=True) self.char_mat = tf.concat([self.char_pad_unk_mat, self.pretrained_char_mat], axis=0) else: self.word_mat = tf.get_variable( 'word_embeddings', shape=[self.vocab.word_size(), self.vocab.word_embed_dim], initializer=tf.constant_initializer(self.vocab.word_embeddings), trainable=True ) self.char_mat = tf.get_variable( 'char_embeddings', shape=[self.vocab.char_size(), self.vocab.char_embed_dim], initializer=tf.constant_initializer(self.vocab.char_embeddings), trainable=True ) self.ch_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.ch, tf.bool), tf.int32), axis=2), [-1]) self.qh_len = tf.reshape(tf.reduce_sum( tf.cast(tf.cast(self.qh, tf.bool), tf.int32), axis=2), [-1]) N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with tf.variable_scope("Input_Embedding_Layer"): ch_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.ch), [N * PL * self.max_p_num, CL, dc]) qh_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.qh), [N * QL * self.max_p_num, CL, dc]) ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout) ch_emb = conv(ch_emb, d, bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None) qh_emb = conv(qh_emb, d, bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=True) ch_emb = tf.reduce_max(ch_emb, axis=1) qh_emb = tf.reduce_max(qh_emb, axis=1) ch_emb = tf.reshape(ch_emb, [N * self.max_p_num, PL, -1]) qh_emb = tf.reshape(qh_emb, [N * self.max_p_num, QL, -1]) c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout) q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout) c_emb = tf.concat([c_emb, ch_emb], axis=2) q_emb = tf.concat([q_emb, qh_emb], axis=2) self.c_emb = highway(c_emb, size=d, scope="highway", dropout=self.dropout, reuse=None) self.q_emb = highway(q_emb, size=d, scope="highway", dropout=self.dropout, reuse=True) def _encode(self): N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with tf.variable_scope("Embedding_Encoder_Layer"): self.c_embed_encoding = residual_block(self.c_emb, num_blocks=1, num_conv_layers=2, kernel_size=7, mask=self.c_mask, num_filters=d, num_heads=nh, seq_len=self.c_len, scope="Encoder_Residual_Block", bias=False, dropout=self.dropout) self.q_embed_encoding = residual_block(self.q_emb, num_blocks=1, num_conv_layers=2, kernel_size=7, mask=self.q_mask, num_filters=d, num_heads=nh, seq_len=self.q_len, scope="Encoder_Residual_Block", reuse=True, # Share the weights between passage and question bias=False, dropout=self.dropout) def _fuse(self): with tf.variable_scope("Context_to_Query_Attention_Layer"): C = tf.tile(tf.expand_dims(self.c_embed_encoding, 2), [1, 1, self.max_q_len, 1]) Q = tf.tile(tf.expand_dims(self.q_embed_encoding, 1), [1, self.max_p_len, 1, 1]) S = trilinear([C, Q, C * Q], input_keep_prob=1.0 - self.dropout) mask_q = tf.expand_dims(self.q_mask, 1) S_ = tf.nn.softmax(mask_logits(S, mask=mask_q)) mask_c = tf.expand_dims(self.c_mask, 2) S_T = tf.transpose(tf.nn.softmax(mask_logits(S, mask=mask_c), dim=1), (0, 2, 1)) self.c2q = tf.matmul(S_, self.q_embed_encoding) self.q2c = tf.matmul(tf.matmul(S_, S_T), self.c_embed_encoding) self.attention_outputs = [self.c_embed_encoding, self.c2q, self.c_embed_encoding * self.c2q, self.c_embed_encoding * self.q2c] N, PL, QL, CL, d, dc, nh = self._params() if self.config.fix_pretrained_vector: dc = self.char_mat.get_shape()[-1] with tf.variable_scope("Model_Encoder_Layer"): inputs = tf.concat(self.attention_outputs, axis=-1) self.enc = [conv(inputs, d, name="input_projection")] for i in range(3): if i % 2 == 0: self.enc[i] = tf.nn.dropout(self.enc[i], 1.0 - self.dropout) self.enc.append( residual_block(self.enc[i], num_blocks=1, num_conv_layers=2, kernel_size=5, mask=self.c_mask, num_filters=d, num_heads=nh, seq_len=self.c_len, scope="Model_Encoder", bias=False, reuse=True if i > 0 else None, dropout=self.dropout) ) for i, item in enumerate(self.enc): self.enc[i] = tf.reshape(self.enc[i], [N, -1, self.enc[i].get_shape()[-1]]) def _decode(self): N, PL, QL, CL, d, dc, nh = self._params() if self.config.use_position_attn: start_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[2]], axis=-1), name="attn1"), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(self._attention(tf.concat([self.enc[1], self.enc[3]], axis=-1), name="attn2"), 1, bias=False, name="end_pointer"), -1) else: start_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"), -1) end_logits = tf.squeeze( conv(tf.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"), -1) self.logits = [mask_logits(start_logits, mask=tf.reshape(self.c_mask, [N, -1])), mask_logits(end_logits, mask=tf.reshape(self.c_mask, [N, -1]))] self.logits1, self.logits2 = [l for l in self.logits] outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.logits1), axis=2), tf.expand_dims(tf.nn.softmax(self.logits2), axis=1)) outer = tf.matrix_band_part(outer, 0, self.max_a_len) self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1) self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1) def _compute_loss(self): def focal_loss(logits, labels, weights=None, alpha=0.25, gamma=2): logits = tf.nn.sigmoid(logits) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits) cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0)) return tf.reduce_sum(cross_ent, 1) start_label = tf.one_hot(self.start_label, tf.shape(self.logits1)[1], axis=1) end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1) if self.config.loss_type == 'cross_entropy': start_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits1, labels=start_label) end_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits2, labels=end_label) self.loss = tf.reduce_mean(start_loss + end_loss) else: start_loss = focal_loss(tf.nn.softmax(self.logits1, -1), start_label) end_loss = focal_loss(tf.nn.softmax(self.logits2, -1), end_label) self.loss = tf.reduce_mean(start_loss + end_loss) self.logger.info("loss type %s" % self.config.loss_type) self.all_params = tf.trainable_variables() if self.config.l2_norm is not None: self.logger.info("applying l2 loss") variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss if self.config.decay is not None: self.var_ema = tf.train.ExponentialMovingAverage(self.config.decay) ema_op = self.var_ema.apply(tf.trainable_variables()) with tf.control_dependencies([ema_op]): self.loss = tf.identity(self.loss) self.shadow_vars = [] self.global_vars = [] for var in tf.global_variables(): v = self.var_ema.average(var) if v: self.shadow_vars.append(v) self.global_vars.append(var) self.assign_vars = [] for g, v in zip(self.global_vars, self.shadow_vars): self.assign_vars.append(tf.assign(g, v)) def _create_train_op(self): # self.lr = tf.minimum(self.learning_rate, self.learning_rate / tf.log(999.) * tf.log(tf.cast(self.global_step, tf.float32) + 1)) self.lr = self.learning_rate if self.optim_type == 'adagrad': self.optimizer = tf.train.AdagradOptimizer(self.lr) elif self.optim_type == 'adam': self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr) elif self.optim_type == 'rprop': self.optimizer = tf.train.RMSPropOptimizer(self.lr) elif self.optim_type == 'sgd': self.optimizer = tf.train.GradientDescentOptimizer(self.lr) elif self.optim_type == 'adamW': self.optimizer = AdamWOptimizer(self.config.weight_decay, learning_rate=self.lr) else: raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type)) self.logger.info("applying optimize %s" % self.optim_type) trainable_vars = tf.trainable_variables() if self.config.clip_weight: # clip_weight tvars = tf.trainable_variables() grads = tf.gradients(self.loss, tvars) grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.config.max_norm_grad) grad_var_pairs = zip(grads, tvars) self.train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad') else: self.train_op = self.optimizer.minimize(self.loss) def _attention(self, output, name='attn', reuse=None): with tf.variable_scope(name, reuse=reuse): W = tf.get_variable(name="attn_W", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) V = tf.get_variable(name="attn_V", shape=[2 * self.config.hidden_size, 1], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) U = tf.get_variable(name="attn_U", shape=[2 * self.config.hidden_size, 2 * self.config.hidden_size], initializer=tf.contrib.layers.xavier_initializer(), # initializer=tf.truncated_normal_initializer(), # initializer=tf.keras.initializers.lecun_normal(), dtype=tf.float32) self.position_emb = tf.reshape(self.position_emb, [-1, 2 * self.config.hidden_size]) shape = tf.shape(output) output = tf.reshape(output, [-1, 2 * self.config.hidden_size]) atten_hidden = tf.tanh( tf.add( tf.matmul(self.position_emb, W), tf.matmul(output, U))) alpha = tf.nn.softmax( tf.reshape(tf.matmul(atten_hidden, V), [-1, shape[1], 1]), axis=1) output = tf.reshape(output, [-1, shape[1], 2 * self.config.hidden_size]) C = tf.multiply(alpha, output) return tf.concat([output, C], axis=-1) def _train_epoch(self, train_batches, dropout): """ :param train_batches: :param dropout: :return: """ total_num, total_loss = 0, 0 log_every_n_batch, n_batch_loss = 1000, 0 for bitx, batch in enumerate(train_batches, 1): feed_dict = {self.c: batch['passage_token_ids'], self.q: batch['question_token_ids'], self.qh: batch['question_char_ids'], self.ch: batch["passage_char_ids"], self.start_label: batch['start_id'], self.end_label: batch['end_id'], self.dropout: dropout} try: _, loss, global_step = self.sess.run([self.train_op, self.loss, self.global_step], feed_dict) total_loss += loss * len(batch['raw_data']) total_num += len(batch['raw_data']) n_batch_loss += loss except Exception as e: # print("Error>>>", e) continue if log_every_n_batch > 0 and bitx % log_every_n_batch == 0: self.logger.info('Average loss from batch {} to {} is {}'.format( bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch)) n_batch_loss = 0 print("total_num", total_num) return 1.0 * total_loss / total_num def _params(self): return (self.config.batch_size if not self.demo else 1, self.max_p_len, self.max_q_len, self.config.max_ch_len, self.config.hidden_size, self.config.char_embed_size, self.config.head_size) def train(self, data, epochs, batch_size, save_dir, save_prefix, dropout=0.0, evaluate=True): pad_id = self.vocab.get_word_id(self.vocab.pad_token) pad_char_id = self.vocab.get_char_id(self.vocab.pad_token) max_rouge_l = 0 for epoch in range(1, epochs + 1): self.logger.info('Training the model for epoch {}'.format(epoch)) train_batches = data.next_batch('train', batch_size, pad_id, pad_char_id, shuffle=True) train_loss = self._train_epoch(train_batches, dropout) self.logger.info('Average train loss for epoch {} is {}'.format(epoch, train_loss)) if evaluate: self.logger.info('Evaluating the model after epoch {}'.format(epoch)) if data.dev_set is not None: eval_batches = data.next_batch('dev', batch_size, pad_id, pad_char_id, shuffle=False) eval_loss, bleu_rouge = self.evaluate(eval_batches) self.logger.info('Dev eval loss {}'.format(eval_loss)) self.logger.info('Dev eval result: {}'.format(bleu_rouge)) if bleu_rouge['Rouge-L'] > max_rouge_l: self.save(save_dir, save_prefix) max_rouge_l = bleu_rouge['Rouge-L'] else: self.logger.warning('No dev set is loaded for evaluation in the dataset!') else: self.save(save_dir, save_prefix + '_' + str(epoch)) def evaluate(self, eval_batches, result_dir=None, result_prefix=None, save_full_info=False): pred_answers, ref_answers = [], [] total_loss, total_num = 0, 0 for b_itx, batch in enumerate(eval_batches): feed_dict = {self.c: batch['passage_token_ids'], self.q: batch['question_token_ids'], self.qh: batch['question_char_ids'], self.ch: batch["passage_char_ids"], self.start_label: batch['start_id'], self.end_label: batch['end_id'], self.dropout: 0.0} try: start_probs, end_probs, loss = self.sess.run([self.logits1, self.logits2, self.loss], feed_dict) total_loss += loss * len(batch['raw_data']) total_num += len(batch['raw_data']) padded_p_len = len(batch['passage_token_ids'][0]) for sample, start_prob, end_prob in zip(batch['raw_data'], start_probs, end_probs): best_answer = self.find_best_answer(sample, start_prob, end_prob, padded_p_len) if save_full_info: sample['pred_answers'] = [best_answer] pred_answers.append(sample) else: pred_answers.append({'question_id': sample['question_id'], 'question_type': sample['question_type'], 'answers': [best_answer], 'entity_answers': [[]], 'yesno_answers': []}) if 'answers' in sample: ref_answers.append({'question_id': sample['question_id'], 'question_type': sample['question_type'], 'answers': sample['answers'], 'entity_answers': [[]], 'yesno_answers': []}) except: continue if result_dir is not None and result_prefix is not None: result_file = os.path.join(result_dir, result_prefix + '.json') with open(result_file, 'w') as fout: for pred_answer in pred_answers: fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n') self.logger.info('Saving {} results to {}'.format(result_prefix, result_file)) # this average loss is invalid on test set, since we don't have true start_id and end_id ave_loss = 1.0 * total_loss / total_num # compute the bleu and rouge scores if reference answers is provided if len(ref_answers) > 0: pred_dict, ref_dict = {}, {} for pred, ref in zip(pred_answers, ref_answers): question_id = ref['question_id'] if len(ref['answers']) > 0: pred_dict[question_id] = normalize(pred['answers']) ref_dict[question_id] = normalize(ref['answers']) bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict) else: bleu_rouge = None return ave_loss, bleu_rouge def find_best_answer(self, sample, start_prob, end_prob, padded_p_len): """ Finds the best answer for a sample given start_prob and end_prob for each position. This will call find_best_answer_for_passage because there are multiple passages in a sample """ best_p_idx, best_span, best_score = None, None, 0 for p_idx, passage in enumerate(sample['passages']): if p_idx >= self.max_p_num: continue passage_len = min(self.max_p_len, len(passage['passage_tokens'])) answer_span, score = self.find_best_answer_for_passage( start_prob[p_idx * padded_p_len: (p_idx + 1) * padded_p_len], end_prob[p_idx * padded_p_len: (p_idx + 1) * padded_p_len], passage_len) if score > best_score: best_score = score best_p_idx = p_idx best_span = answer_span if best_p_idx is None or best_span is None: best_answer = '' else: best_answer = ''.join( sample['passages'][best_p_idx]['passage_tokens'][best_span[0]: best_span[1] + 1]) return best_answer def find_best_answer_for_passage(self, start_probs, end_probs, passage_len=None): """ Finds the best answer with the maximum start_prob * end_prob from a single passage """ if passage_len is None: passage_len = len(start_probs) else: passage_len = min(len(start_probs), passage_len) best_start, best_end, max_prob = -1, -1, 0 for start_idx in range(passage_len): for ans_len in range(self.max_a_len): end_idx = start_idx + ans_len if end_idx >= passage_len: continue prob = start_probs[start_idx] * end_probs[end_idx] if prob > max_prob: best_start = start_idx best_end = end_idx max_prob = prob return (best_start, best_end), max_prob def save(self, model_dir, model_prefix): """ Saves the model into model_dir with model_prefix as the model indicator """ self.saver.save(self.sess, os.path.join(model_dir, model_prefix)) self.logger.info('Model saved in {}, with prefix {}.'.format(model_dir, model_prefix)) def restore(self, model_dir, model_prefix): """ Restores the model into model_dir from model_prefix as the model indicator """ self.saver.restore(self.sess, os.path.join(model_dir, model_prefix)) self.logger.info('Model restored from {}, with prefix {}'.format(model_dir, model_prefix))
[ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.concat", "tensorflow.matrix_band_part", "tensorflow.contrib.layers.apply_regularization", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.global_variables", "tensorflow.train.ExponentialMovingAverage", "tensorflow.train.AdamOptimizer", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.python.ops.array_ops.where", "tensorflow.gradients", "tensorflow.ConfigProto", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.train.AdagradOptimizer", "tensorflow.nn.sigmoid", "tensorflow.train.RMSPropOptimizer", "tensorflow.shape", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.placeholder", "tensorflow.identity", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.nn.embedding_lookup", "tensorflow.clip_by_value", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.assign", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.clip_by_global_norm", "tensorflow.variable_scope" ]
2019/MRC/models/QANet_dureader/model.py
[(21, 'logging.getLogger', 'logging.getLogger', (['"""QANet"""'], {}), False, 'import logging\n'), (46, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_config'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (62, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (99, 'layers.position_embedding', 'position_embedding', (['self.c', '(2 * self.config.hidden_size)'], {}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (100, 'tensorflow.cast', 'tf.cast', (['self.c', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.cast', 'tf.cast', (['self.q', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""dropout"""'}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.matrix_band_part', 'tf.matrix_band_part', (['outer', '(0)', 'self.max_a_len'], {}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (372, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.config.max_p_len]', '"""context"""'], {}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.config.max_q_len]', '"""question"""'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.config.max_p_len, self.config.max_ch_len]', '"""context_char"""'], {}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.config.max_q_len, self.config.max_ch_len]', '"""question_char"""'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""answer_label1"""'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]', '"""answer_label2"""'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.config.batch_size * self.max_p_num, self.config.max_p_len]', '"""context"""'], {}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.config.batch_size * self.max_p_num, self.config.max_q_len]', '"""question"""'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.config.batch_size * self.max_p_num, self.config.max_p_len, self.\n config.max_ch_len]', '"""context_char"""'], {}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.config.batch_size * self.max_p_num, self.config.max_q_len, self.\n config.max_ch_len]', '"""question_char"""'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.config.batch_size]', '"""answer_label1"""'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[self.config.batch_size]', '"""answer_label2"""'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.cast', 'tf.cast', (['self.c_mask', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.cast', 'tf.cast', (['self.q_mask', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""word_char_embedding"""'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Input_Embedding_Layer"""'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['ch_emb', '(1.0 - 0.5 * self.dropout)'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['qh_emb', '(1.0 - 0.5 * self.dropout)'], {}), True, 'import tensorflow as tf\n'), (182, 'layers.conv', 'conv', (['ch_emb', 'd'], {'bias': '(True)', 'activation': 'tf.nn.relu', 'kernel_size': '(5)', 'name': '"""char_conv"""', 'reuse': 'None'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (184, 'layers.conv', 'conv', (['qh_emb', 'd'], {'bias': '(True)', 'activation': 'tf.nn.relu', 'kernel_size': '(5)', 'name': '"""char_conv"""', 'reuse': '(True)'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (187, 'tensorflow.reduce_max', 'tf.reduce_max', (['ch_emb'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.reduce_max', 'tf.reduce_max', (['qh_emb'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (190, 'tensorflow.reshape', 'tf.reshape', (['ch_emb', '[N * self.max_p_num, PL, -1]'], {}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.reshape', 'tf.reshape', (['qh_emb', '[N * self.max_p_num, QL, -1]'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.concat', 'tf.concat', (['[c_emb, ch_emb]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.concat', 'tf.concat', (['[q_emb, qh_emb]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (199, 'layers.highway', 'highway', (['c_emb'], {'size': 'd', 'scope': '"""highway"""', 'dropout': 'self.dropout', 'reuse': 'None'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (200, 'layers.highway', 'highway', (['q_emb'], {'size': 'd', 'scope': '"""highway"""', 'dropout': 'self.dropout', 'reuse': '(True)'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (206, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Embedding_Encoder_Layer"""'], {}), True, 'import tensorflow as tf\n'), (207, 'layers.residual_block', 'residual_block', (['self.c_emb'], {'num_blocks': '(1)', 'num_conv_layers': '(2)', 'kernel_size': '(7)', 'mask': 'self.c_mask', 'num_filters': 'd', 'num_heads': 'nh', 'seq_len': 'self.c_len', 'scope': '"""Encoder_Residual_Block"""', 'bias': '(False)', 'dropout': 'self.dropout'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (218, 'layers.residual_block', 'residual_block', (['self.q_emb'], {'num_blocks': '(1)', 'num_conv_layers': '(2)', 'kernel_size': '(7)', 'mask': 'self.q_mask', 'num_filters': 'd', 'num_heads': 'nh', 'seq_len': 'self.q_len', 'scope': '"""Encoder_Residual_Block"""', 'reuse': '(True)', 'bias': '(False)', 'dropout': 'self.dropout'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (233, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Context_to_Query_Attention_Layer"""'], {}), True, 'import tensorflow as tf\n'), (236, 'layers.trilinear', 'trilinear', (['[C, Q, C * Q]'], {'input_keep_prob': '(1.0 - self.dropout)'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (237, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.q_mask', '(1)'], {}), True, 'import tensorflow as tf\n'), (239, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.c_mask', '(2)'], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.matmul', 'tf.matmul', (['S_', 'self.q_embed_encoding'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model_Encoder_Layer"""'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.concat', 'tf.concat', (['self.attention_outputs'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (300, 'tensorflow.reduce_max', 'tf.reduce_max', (['outer'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.reduce_max', 'tf.reduce_max', (['outer'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (305, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.python.ops.array_ops.zeros_like', 'array_ops.zeros_like', (['logits'], {'dtype': 'logits.dtype'}), False, 'from tensorflow.python.ops import array_ops\n'), (307, 'tensorflow.python.ops.array_ops.where', 'array_ops.where', (['(labels > zeros)', '(labels - logits)', 'zeros'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (308, 'tensorflow.python.ops.array_ops.where', 'array_ops.where', (['(labels > zeros)', 'zeros', 'logits'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (311, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cross_ent', '(1)'], {}), True, 'import tensorflow as tf\n'), (317, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'self.logits1', 'labels': 'start_label'}), True, 'import tensorflow as tf\n'), (319, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'self.logits2', 'labels': 'end_label'}), True, 'import tensorflow as tf\n'), (321, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(start_loss + end_loss)'], {}), True, 'import tensorflow as tf\n'), (325, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(start_loss + end_loss)'], {}), True, 'import tensorflow as tf\n'), (332, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['regularizer', 'variables'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['self.config.decay'], {}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['self.lr'], {}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (376, 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'tvars'], {}), True, 'import tensorflow as tf\n'), (377, 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads'], {'clip_norm': 'self.config.max_norm_grad'}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (403, 'tensorflow.reshape', 'tf.reshape', (['self.position_emb', '[-1, 2 * self.config.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (404, 'tensorflow.shape', 'tf.shape', (['output'], {}), True, 'import tensorflow as tf\n'), (405, 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, 2 * self.config.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (413, 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, shape[1], 2 * self.config.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (414, 'tensorflow.multiply', 'tf.multiply', (['alpha', 'output'], {}), True, 'import tensorflow as tf\n'), (415, 'tensorflow.concat', 'tf.concat', (['[output, C]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (527, 'os.path.join', 'os.path.join', (['result_dir', "(result_prefix + '.json')"], {}), False, 'import os\n'), (544, 'utils.dureader_eval.compute_bleu_rouge', 'compute_bleu_rouge', (['pred_dict', 'ref_dict'], {}), False, 'from utils.dureader_eval import compute_bleu_rouge\n'), (599, 'os.path.join', 'os.path.join', (['model_dir', 'model_prefix'], {}), False, 'import os\n'), (606, 'os.path.join', 'os.path.join', (['model_dir', 'model_prefix'], {}), False, 'import os\n'), (131, 'tensorflow.concat', 'tf.concat', (['[self.word_pad_unk_mat, self.pretrained_word_mat]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.concat', 'tf.concat', (['[self.char_pad_unk_mat, self.pretrained_char_mat]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.char_mat', 'self.ch'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.char_mat', 'self.qh'], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.word_mat', 'self.c'], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.word_mat', 'self.q'], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.c_embed_encoding', '(2)'], {}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.expand_dims', 'tf.expand_dims', (['self.q_embed_encoding', '(1)'], {}), True, 'import tensorflow as tf\n'), (238, 'layers.mask_logits', 'mask_logits', (['S'], {'mask': 'mask_q'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (242, 'tensorflow.matmul', 'tf.matmul', (['S_', 'S_T'], {}), True, 'import tensorflow as tf\n'), (251, 'layers.conv', 'conv', (['inputs', 'd'], {'name': '"""input_projection"""'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (296, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits1'], {}), True, 'import tensorflow as tf\n'), (297, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits2'], {}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.shape', 'tf.shape', (['self.logits1'], {}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.shape', 'tf.shape', (['self.logits2'], {}), True, 'import tensorflow as tf\n'), (323, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits1', '(-1)'], {}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits2', '(-1)'], {}), True, 'import tensorflow as tf\n'), (338, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[ema_op]'], {}), True, 'import tensorflow as tf\n'), (340, 'tensorflow.identity', 'tf.identity', (['self.loss'], {}), True, 'import tensorflow as tf\n'), (344, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr'}), True, 'import tensorflow as tf\n'), (70, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (240, 'layers.mask_logits', 'mask_logits', (['S'], {'mask': 'mask_c'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (254, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.enc[i]', '(1.0 - self.dropout)'], {}), True, 'import tensorflow as tf\n'), (256, 'layers.residual_block', 'residual_block', (['self.enc[i]'], {'num_blocks': '(1)', 'num_conv_layers': '(2)', 'kernel_size': '(5)', 'mask': 'self.c_mask', 'num_filters': 'd', 'num_heads': 'nh', 'seq_len': 'self.c_len', 'scope': '"""Model_Encoder"""', 'bias': '(False)', 'reuse': '(True if i > 0 else None)', 'dropout': 'self.dropout'}), False, 'from layers import regularizer, residual_block, highway, conv, mask_logits, trilinear, total_params, position_embedding\n'), (287, 'tensorflow.concat', 'tf.concat', (['[self.enc[1], self.enc[2]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.concat', 'tf.concat', (['[self.enc[1], self.enc[3]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.reshape', 'tf.reshape', (['self.c_mask', '[N, -1]'], {}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.reshape', 'tf.reshape', (['self.c_mask', '[N, -1]'], {}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.lr'], {}), True, 'import tensorflow as tf\n'), (387, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (392, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (398, 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), True, 'import tensorflow as tf\n'), (409, 'tensorflow.matmul', 'tf.matmul', (['self.position_emb', 'W'], {}), True, 'import tensorflow as tf\n'), (410, 'tensorflow.matmul', 'tf.matmul', (['output', 'U'], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.matmul', 'tf.matmul', (['atten_hidden', 'V'], {}), True, 'import tensorflow as tf\n'), (542, 'utils.dureader_eval.normalize', 'normalize', (["pred['answers']"], {}), False, 'from utils.dureader_eval import normalize\n'), (543, 'utils.dureader_eval.normalize', 'normalize', (["ref['answers']"], {}), False, 'from utils.dureader_eval import normalize\n'), (119, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.vocab.word_embeddings[2:]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.vocab.word_embeddings[:2]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.vocab.char_embeddings[2:]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.vocab.char_embeddings[:2]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.vocab.word_embeddings'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.vocab.char_embeddings'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.cast', 'tf.cast', (['self.ch', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.cast', 'tf.cast', (['self.qh', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (280, 'tensorflow.concat', 'tf.concat', (['[self.enc[1], self.enc[2]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.concat', 'tf.concat', (['[self.enc[1], self.enc[3]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (309, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['logits', '(1e-08)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (310, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1.0 - logits)', '(1e-08)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (351, 'tensorflow.assign', 'tf.assign', (['g', 'v'], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.lr'], {}), True, 'import tensorflow as tf\n'), (366, 'optimizer.AdamWOptimizer', 'AdamWOptimizer', (['self.config.weight_decay'], {'learning_rate': 'self.lr'}), False, 'from optimizer import AdamWOptimizer\n'), (530, 'json.dumps', 'json.dumps', (['pred_answer'], {'ensure_ascii': '(False)'}), False, 'import json\n')]
zsweet/R-Net
b1b35ff4799e46263923f25e373444e9867a6cf4
import tensorflow as tf INF = 1e30 class cudnn_gru: def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope=None): self.num_layers = num_layers self.grus = [] self.inits = [] self.dropout_mask = [] for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, )) self.dropout_mask.append((mask_fw, mask_bw, )) def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True): outputs = [tf.transpose(inputs, [1, 0, 2])] for layer in range(self.num_layers): gru_fw, gru_bw = self.grus[layer] init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with tf.variable_scope("fw_{}".format(layer)): out_fw, _ = gru_fw( outputs[-1] * mask_fw, initial_state=(init_fw, )) with tf.variable_scope("bw_{}".format(layer)): inputs_bw = tf.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw, )) out_bw = tf.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1) outputs.append(tf.concat([out_fw, out_bw], axis=2)) if concat_layers: res = tf.concat(outputs[1:], axis=2) else: res = outputs[-1] res = tf.transpose(res, [1, 0, 2]) return res class native_gru: def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope="native_gru"): self.num_layers = num_layers self.grus = [] self.inits = [] self.dropout_mask = [] self.scope = scope for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = tf.contrib.rnn.GRUCell(num_units) gru_bw = tf.contrib.rnn.GRUCell(num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, num_units])), [batch_size, 1]) mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, )) self.dropout_mask.append((mask_fw, mask_bw, )) def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True): outputs = [inputs] with tf.variable_scope(self.scope): for layer in range(self.num_layers): gru_fw, gru_bw = self.grus[layer] init_fw, init_bw = self.inits[layer] mask_fw, mask_bw = self.dropout_mask[layer] with tf.variable_scope("fw_{}".format(layer)): out_fw, _ = tf.nn.dynamic_rnn( gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32) with tf.variable_scope("bw_{}".format(layer)): inputs_bw = tf.reverse_sequence( outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) out_bw, _ = tf.nn.dynamic_rnn( gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32) out_bw = tf.reverse_sequence( out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0) outputs.append(tf.concat([out_fw, out_bw], axis=2)) if concat_layers: res = tf.concat(outputs[1:], axis=2) else: res = outputs[-1] return res class ptr_net: def __init__(self, batch, hidden, keep_prob=1.0, is_train=None, scope="ptr_net"): self.gru = tf.contrib.rnn.GRUCell(hidden) self.batch = batch self.scope = scope self.keep_prob = keep_prob self.is_train = is_train self.dropout_mask = dropout(tf.ones( [batch, hidden], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train) def __call__(self, init, match, d, mask): with tf.variable_scope(self.scope): d_match = dropout(match, keep_prob=self.keep_prob, is_train=self.is_train) inp, logits1 = pointer(d_match, init * self.dropout_mask, d, mask) d_inp = dropout(inp, keep_prob=self.keep_prob, is_train=self.is_train) _, state = self.gru(d_inp, init) tf.get_variable_scope().reuse_variables() _, logits2 = pointer(d_match, state * self.dropout_mask, d, mask) return logits1, logits2 def dropout(args, keep_prob, is_train, mode="recurrent"): if keep_prob < 1.0: noise_shape = None scale = 1.0 shape = tf.shape(args) if mode == "embedding": noise_shape = [shape[0], 1] scale = keep_prob if mode == "recurrent" and len(args.get_shape().as_list()) == 3: noise_shape = [shape[0], 1, shape[-1]] args = tf.cond(is_train, lambda: tf.nn.dropout( args, keep_prob, noise_shape=noise_shape) * scale, lambda: args) return args def softmax_mask(val, mask): return -INF * (1 - tf.cast(mask, tf.float32)) + val def pointer(inputs, state, hidden, mask, scope="pointer"): with tf.variable_scope(scope): u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2) #[N,PL,2d] s0 = tf.nn.tanh(dense(u, hidden, use_bias=False, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask)#[N,PL] a = tf.expand_dims(tf.nn.softmax(s1), axis=2)#[N,PL,1] res = tf.reduce_sum(a * inputs, axis=1) return res, s1 # attention_sum probability def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ"): with tf.variable_scope(scope): d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) s0 = tf.nn.tanh(dense(d_memory, hidden, scope="s0")) s = dense(s0, 1, use_bias=False, scope="s") s1 = softmax_mask(tf.squeeze(s, [2]), mask) a = tf.expand_dims(tf.nn.softmax(s1), axis=2) res = tf.reduce_sum(a * memory, axis=1) return res def dot_attention(inputs, memory, mask, hidden, keep_prob=1.0, is_train=None, scope="dot_attention"): with tf.variable_scope(scope): d_inputs = dropout(inputs, keep_prob=keep_prob, is_train=is_train) d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train) JX = tf.shape(inputs)[1] with tf.variable_scope("attention"): inputs_ = tf.nn.relu( dense(d_inputs, hidden, use_bias=False, scope="inputs")) memory_ = tf.nn.relu( dense(d_memory, hidden, use_bias=False, scope="memory")) outputs = tf.matmul(inputs_, tf.transpose( memory_, [0, 2, 1])) / (hidden ** 0.5) mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1]) logits = tf.nn.softmax(softmax_mask(outputs, mask)) outputs = tf.matmul(logits, memory) res = tf.concat([inputs, outputs], axis=2) with tf.variable_scope("gate"): dim = res.get_shape().as_list()[-1] d_res = dropout(res, keep_prob=keep_prob, is_train=is_train) gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False)) return res * gate def dense(inputs, hidden, use_bias=True, scope="dense"): with tf.variable_scope(scope): shape = tf.shape(inputs) dim = inputs.get_shape().as_list()[-1] out_shape = [shape[idx] for idx in range( len(inputs.get_shape().as_list()) - 1)] + [hidden] flat_inputs = tf.reshape(inputs, [-1, dim]) W = tf.get_variable("W", [dim, hidden]) res = tf.matmul(flat_inputs, W) if use_bias: b = tf.get_variable( "b", [hidden], initializer=tf.constant_initializer(0.)) res = tf.nn.bias_add(res, b) res = tf.reshape(res, out_shape) return res
[ "tensorflow.get_variable", "tensorflow.nn.dynamic_rnn", "tensorflow.concat", "tensorflow.contrib.rnn.GRUCell", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.squeeze", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.shape", "tensorflow.contrib.cudnn_rnn.CudnnGRU", "tensorflow.reverse_sequence", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.reshape", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ]
func.py
[(49, 'tensorflow.transpose', 'tf.transpose', (['res', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['hidden'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.shape', 'tf.shape', (['args'], {}), True, 'import tensorflow as tf\n'), (145, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(a * inputs)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(a * memory)'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, dim]'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""', '[dim, hidden]'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.matmul', 'tf.matmul', (['flat_inputs', 'W'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.reshape', 'tf.reshape', (['res', 'out_shape'], {}), True, 'import tensorflow as tf\n'), (15, 'tensorflow.contrib.cudnn_rnn.CudnnGRU', 'tf.contrib.cudnn_rnn.CudnnGRU', (['(1)', 'num_units'], {}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.contrib.cudnn_rnn.CudnnGRU', 'tf.contrib.cudnn_rnn.CudnnGRU', (['(1)', 'num_units'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.transpose', 'tf.transpose', (['inputs', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.concat', 'tf.concat', (['outputs[1:]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['num_units'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.contrib.rnn.GRUCell', 'tf.contrib.rnn.GRUCell', (['num_units'], {}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.concat', 'tf.concat', (['outputs[1:]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.ones', 'tf.ones', (['[batch, hidden]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (113, 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.squeeze', 'tf.squeeze', (['s', '[2]'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['s1'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.squeeze', 'tf.squeeze', (['s', '[2]'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['s1'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention"""'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.matmul', 'tf.matmul', (['logits', 'memory'], {}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.concat', 'tf.concat', (['[inputs, outputs]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""gate"""'], {}), True, 'import tensorflow as tf\n'), (204, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['res', 'b'], {}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.ones', 'tf.ones', (['[1, batch_size, input_size_]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.ones', 'tf.ones', (['[1, batch_size, input_size_]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['(outputs[-1] * mask_bw)'], {'seq_lengths': 'seq_len', 'seq_dim': '(0)', 'batch_dim': '(1)'}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['out_bw'], {'seq_lengths': 'seq_len', 'seq_dim': '(0)', 'batch_dim': '(1)'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.concat', 'tf.concat', (['[out_fw, out_bw]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.ones', 'tf.ones', (['[batch_size, 1, input_size_]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.ones', 'tf.ones', (['[batch_size, 1, input_size_]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.cast', 'tf.cast', (['mask', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.zeros', 'tf.zeros', (['[1, 1, num_units]'], {}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.zeros', 'tf.zeros', (['[1, 1, num_units]'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.zeros', 'tf.zeros', (['[1, num_units]'], {}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.zeros', 'tf.zeros', (['[1, num_units]'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['gru_fw', '(outputs[-1] * mask_fw)', 'seq_len'], {'initial_state': 'init_fw', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['(outputs[-1] * mask_bw)'], {'seq_lengths': 'seq_len', 'seq_dim': '(1)', 'batch_dim': '(0)'}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['gru_bw', 'inputs_bw', 'seq_len'], {'initial_state': 'init_bw', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['out_bw'], {'seq_lengths': 'seq_len', 'seq_dim': '(1)', 'batch_dim': '(0)'}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.concat', 'tf.concat', (['[out_fw, out_bw]'], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['args', 'keep_prob'], {'noise_shape': 'noise_shape'}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.expand_dims', 'tf.expand_dims', (['state'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.transpose', 'tf.transpose', (['memory_', '[0, 2, 1]'], {}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.shape', 'tf.shape', (['inputs'], {}), True, 'import tensorflow as tf\n')]
indigo-dc/retinopathy_test
5e87be2a67bbbc0b82f6ca258324e80068ef9407
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions for Residual Networks. Residual networks ('v1' ResNets) were originally proposed in: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 The full preactivation 'v2' ResNet variant was introduced by: [2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 The key difference of the full preactivation 'v2' variant compared to the 'v1' variant in [1] is the use of batch normalization before every weight layer rather than after. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf _BATCH_NORM_DECAY = 0.997 _BATCH_NORM_EPSILON = 1e-5 DEFAULT_VERSION = 2 DEFAULT_DTYPE = tf.float32 CASTABLE_TYPES = (tf.float16,) ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES ################################################################################ # Convenience functions for building the ResNet model. ################################################################################ def batch_norm(inputs, training, data_format): """Performs a batch normalization using a standard set of parameters.""" # We set fused=True for a significant performance boost. See # https://www.tensorflow.org/performance/performance_guide#common_fused_ops return tf.layers.batch_normalization( inputs=inputs, axis=1 if data_format == 'channels_first' else 3, momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=training, fused=True) def fixed_padding(inputs, kernel_size, data_format): """Pads the input along the spatial dimensions independently of input size. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. kernel_size: The kernel to be used in the conv2d or max_pool2d operation. Should be a positive integer. data_format: The input format ('channels_last' or 'channels_first'). Returns: A tensor with the same format as the input with the data either intact (if kernel_size == 1) or padded (if kernel_size > 1). """ pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg if data_format == 'channels_first': padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format): """Strided 2-D convolution with explicit padding.""" # The padding is consistent and is based only on `kernel_size`, not on the # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format) return tf.layers.conv2d( inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding=('SAME' if strides == 1 else 'VALID'), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format) ################################################################################ # ResNet block definitions. ################################################################################ def _building_block_v1(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v1, without a bottleneck. Convolution then batch normalization then ReLU as described by: Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) shortcut = batch_norm(inputs=shortcut, training=training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs += shortcut inputs = tf.nn.relu(inputs) return inputs def _building_block_v2(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v2, without a bottleneck. Batch normalization then ReLu then convolution as described by: Identity Mappings in Deep Residual Networks https://arxiv.org/pdf/1603.05027.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution. if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format) return inputs + shortcut def _bottleneck_block_v1(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v1, with a bottleneck. Similar to _building_block_v1(), except using the "bottleneck" blocks described in: Convolution then batch normalization then ReLU as described by: Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) shortcut = batch_norm(inputs=shortcut, training=training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs += shortcut inputs = tf.nn.relu(inputs) return inputs def _bottleneck_block_v2(inputs, filters, training, projection_shortcut, strides, data_format): """A single block for ResNet v2, without a bottleneck. Similar to _building_block_v2(), except using the "bottleneck" blocks described in: Convolution then batch normalization then ReLU as described by: Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015. Adapted to the ordering conventions of: Batch normalization then ReLu then convolution as described by: Identity Mappings in Deep Residual Networks https://arxiv.org/pdf/1603.05027.pdf by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the convolutions. training: A Boolean for whether the model is in training or inference mode. Needed for batch normalization. projection_shortcut: The function to use for projection shortcuts (typically a 1x1 convolution when downsampling the input). strides: The block's stride. If greater than 1, this block will ultimately downsample the input. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block; shape should match inputs. """ shortcut = inputs inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) # The projection shortcut should come after the first batch norm and ReLU # since it performs a 1x1 convolution. if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format) inputs = batch_norm(inputs, training, data_format) inputs = tf.nn.relu(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format) return inputs + shortcut def block_layer(inputs, filters, bottleneck, block_fn, blocks, strides, training, name, data_format): """Creates one layer of blocks for the ResNet model. Args: inputs: A tensor of size [batch, channels, height_in, width_in] or [batch, height_in, width_in, channels] depending on data_format. filters: The number of filters for the first convolution of the layer. bottleneck: Is the block created a bottleneck block. block_fn: The block to use within the model, either `building_block` or `bottleneck_block`. blocks: The number of blocks contained in the layer. strides: The stride to use for the first convolution of the layer. If greater than 1, this layer will ultimately downsample the input. training: Either True or False, whether we are currently training the model. Needed for batch norm. name: A string name for the tensor output of the block layer. data_format: The input format ('channels_last' or 'channels_first'). Returns: The output tensor of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = filters * 4 if bottleneck else filters def projection_shortcut(inputs): return conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn(inputs, filters, training, projection_shortcut, strides, data_format) for _ in range(1, blocks): inputs = block_fn(inputs, filters, training, None, 1, data_format) return tf.identity(inputs, name) class Model(object): """Base class for building the Resnet Model.""" def __init__(self, resnet_size, bottleneck, num_classes, num_filters, kernel_size, conv_stride, first_pool_size, first_pool_stride, block_sizes, block_strides, final_size, resnet_version=DEFAULT_VERSION, data_format=None, dtype=DEFAULT_DTYPE): """Creates a model for classifying an image. Args: resnet_size: A single integer for the size of the ResNet model. bottleneck: Use regular blocks or bottleneck blocks. num_classes: The number of classes used as labels. num_filters: The number of filters to use for the first block layer of the model. This number is then doubled for each subsequent block layer. kernel_size: The kernel size to use for convolution. conv_stride: stride size for the initial convolutional layer first_pool_size: Pool size to be used for the first pooling layer. If none, the first pooling layer is skipped. first_pool_stride: stride size for the first pooling layer. Not used if first_pool_size is None. block_sizes: A list containing n values, where n is the number of sets of block layers desired. Each value should be the number of blocks in the i-th set. block_strides: List of integers representing the desired stride size for each of the sets of block layers. Should be same length as block_sizes. final_size: The expected size of the model after the second pooling. resnet_version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. dtype: The TensorFlow dtype to use for calculations. If not specified tf.float32 is used. Raises: ValueError: if invalid version is selected. """ self.resnet_size = resnet_size if not data_format: data_format = ( 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last') data_format = "channels_last" #ki: added to make cpu runnable model self.resnet_version = resnet_version if resnet_version not in (1, 2): raise ValueError( 'Resnet version should be 1 or 2. See README for citations.') self.bottleneck = bottleneck if bottleneck: if resnet_version == 1: self.block_fn = _bottleneck_block_v1 else: self.block_fn = _bottleneck_block_v2 else: if resnet_version == 1: self.block_fn = _building_block_v1 else: self.block_fn = _building_block_v2 if dtype not in ALLOWED_TYPES: raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES)) self.data_format = data_format self.num_classes = num_classes self.num_filters = num_filters self.kernel_size = kernel_size self.conv_stride = conv_stride self.first_pool_size = first_pool_size self.first_pool_stride = first_pool_stride self.block_sizes = block_sizes self.block_strides = block_strides self.final_size = final_size self.dtype = dtype self.pre_activation = resnet_version == 2 #self.filter_list = [256, 512, 1024, 2048] #self.filter_list = [64, 128, 256, 512] def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE, *args, **kwargs): """Creates variables in fp32, then casts to fp16 if necessary. This function is a custom getter. A custom getter is a function with the same signature as tf.get_variable, except it has an additional getter parameter. Custom getters can be passed as the `custom_getter` parameter of tf.variable_scope. Then, tf.get_variable will call the custom getter, instead of directly getting a variable itself. This can be used to change the types of variables that are retrieved with tf.get_variable. The `getter` parameter is the underlying variable getter, that would have been called if no custom getter was used. Custom getters typically get a variable with `getter`, then modify it in some way. This custom getter will create an fp32 variable. If a low precision (e.g. float16) variable was requested it will then cast the variable to the requested dtype. The reason we do not directly create variables in low precision dtypes is that applying small gradients to such variables may cause the variable not to change. Args: getter: The underlying variable getter, that has the same signature as tf.get_variable and returns a variable. name: The name of the variable to get. shape: The shape of the variable to get. dtype: The dtype of the variable to get. Note that if this is a low precision dtype, the variable will be created as a tf.float32 variable, then cast to the appropriate dtype *args: Additional arguments to pass unmodified to getter. **kwargs: Additional keyword arguments to pass unmodified to getter. Returns: A variable which is cast to fp16 if necessary. """ if dtype in CASTABLE_TYPES: var = getter(name, shape, tf.float32, *args, **kwargs) return tf.cast(var, dtype=dtype, name=name + '_cast') else: return getter(name, shape, dtype, *args, **kwargs) def _model_variable_scope(self): """Returns a variable scope that the model should be created under. If self.dtype is a castable type, model variable will be created in fp32 then cast to self.dtype before being used. Returns: A variable scope for the model. """ return tf.variable_scope('resnet_model', custom_getter=self._custom_dtype_getter) def __call__(self, inputs, training): """Add operations to classify a batch of input images. Args: inputs: A Tensor representing a batch of input images. training: A boolean. Set to True to add operations required only when training the classifier. Returns: A logits Tensor with shape [<batch_size>, self.num_classes]. """ with self._model_variable_scope(): if self.data_format == 'channels_first': # Convert the inputs from channels_last (NHWC) to channels_first (NCHW). # This provides a large performance boost on GPU. See # https://www.tensorflow.org/performance/performance_guide#data_formats inputs = tf.transpose(inputs, [0, 3, 1, 2]) inputs = conv2d_fixed_padding( inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size, strides=self.conv_stride, data_format=self.data_format) inputs = tf.identity(inputs, 'initial_conv') # We do not include batch normalization or activation functions in V2 # for the initial conv1 because the first ResNet unit will perform these # for both the shortcut and non-shortcut paths as part of the first # block's projection. Cf. Appendix of [2]. if self.resnet_version == 1: inputs = batch_norm(inputs, training, self.data_format) inputs = tf.nn.relu(inputs) if self.first_pool_size: inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=self.first_pool_size, strides=self.first_pool_stride, padding='SAME', data_format=self.data_format) inputs = tf.identity(inputs, 'initial_max_pool') for i, num_blocks in enumerate(self.block_sizes): num_filters = self.num_filters * (2**i) inputs = block_layer( inputs=inputs, filters=num_filters, bottleneck=self.bottleneck, block_fn=self.block_fn, blocks=num_blocks, strides=self.block_strides[i], training=training, name='block_layer{}'.format(i + 1), data_format=self.data_format) # Only apply the BN and ReLU for model that does pre_activation in each # building/bottleneck block, eg resnet V2. if self.pre_activation: inputs = batch_norm(inputs, training, self.data_format) inputs = tf.nn.relu(inputs) # The current top layer has shape # `batch_size x pool_size x pool_size x final_size`. # ResNet does an Average Pooling layer over pool_size, # but that is the same as doing a reduce_mean. We do a reduce_mean # here because it performs better than AveragePooling2D. axes = [2, 3] if self.data_format == 'channels_first' else [1, 2] inputs = tf.reduce_mean(inputs, axes, keepdims=True) inputs = tf.identity(inputs, 'final_reduce_mean') inputs = tf.reshape(inputs, [-1, self.final_size]) inputs = tf.layers.dense(inputs=inputs, units=self.num_classes) inputs = tf.identity(inputs, 'final_dense') return inputs
[ "tensorflow.nn.relu", "tensorflow.layers.batch_normalization", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.test.is_built_with_cuda", "tensorflow.cast", "tensorflow.identity", "tensorflow.reshape", "tensorflow.layers.dense", "tensorflow.layers.max_pooling2d", "tensorflow.variance_scaling_initializer", "tensorflow.pad", "tensorflow.variable_scope" ]
retinopathy_test/models/resnet_model.py
[(51, 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': "(1 if data_format == 'channels_first' else 3)", 'momentum': '_BATCH_NORM_DECAY', 'epsilon': '_BATCH_NORM_EPSILON', 'center': '(True)', 'scale': '(True)', 'training': 'training', 'fused': '(True)'}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.identity', 'tf.identity', (['inputs', 'name'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.pad', 'tf.pad', (['inputs', '[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]'], {}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.pad', 'tf.pad', (['inputs', '[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (484, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""resnet_model"""'], {'custom_getter': 'self._custom_dtype_getter'}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.cast', 'tf.cast', (['var'], {'dtype': 'dtype', 'name': "(name + '_cast')"}), True, 'import tensorflow as tf\n'), (509, 'tensorflow.identity', 'tf.identity', (['inputs', '"""initial_conv"""'], {}), True, 'import tensorflow as tf\n'), (546, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs', 'axes'], {'keepdims': '(True)'}), True, 'import tensorflow as tf\n'), (547, 'tensorflow.identity', 'tf.identity', (['inputs', '"""final_reduce_mean"""'], {}), True, 'import tensorflow as tf\n'), (550, 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, self.final_size]'], {}), True, 'import tensorflow as tf\n'), (552, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'inputs', 'units': 'self.num_classes'}), True, 'import tensorflow as tf\n'), (553, 'tensorflow.identity', 'tf.identity', (['inputs', '"""final_dense"""'], {}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.test.is_built_with_cuda', 'tf.test.is_built_with_cuda', ([], {}), True, 'import tensorflow as tf\n'), (504, 'tensorflow.transpose', 'tf.transpose', (['inputs', '[0, 3, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (517, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n'), (520, 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'inputs', 'pool_size': 'self.first_pool_size', 'strides': 'self.first_pool_stride', 'padding': '"""SAME"""', 'data_format': 'self.data_format'}), True, 'import tensorflow as tf\n'), (524, 'tensorflow.identity', 'tf.identity', (['inputs', '"""initial_max_pool"""'], {}), True, 'import tensorflow as tf\n'), (538, 'tensorflow.nn.relu', 'tf.nn.relu', (['inputs'], {}), True, 'import tensorflow as tf\n')]
NTU-SER/speech_utils
84b1ec7da5bf435c09401fc33f6b81346b80a5fe
import sys import argparse import pickle from tensorflow.compat.v1 import ConfigProto, InteractiveSession import tensorflow as tf from speech_utils.ACRNN.tf.model_utils import train config = ConfigProto(log_device_placement=True) config.gpu_options.allow_growth = True session = tf.Session(config=config).as_default() def main(args): # Verify if args.save_path is None and args.perform_test: raise ValueError("Cannot test when `save_path` is set to `None`.") # Load data with open(args.data_path, "rb") as fin: data = pickle.load(fin) # If swap if args.swap: train_data = data[0:2] test_data = data[2:6] val_data = data[6:10] data = (*train_data, *val_data, *test_data) # Train train(data, args.num_steps, args.batch_size, args.lr, validate_every=args.validate_every, random_seed=args.seed, num_classes=args.num_classes, grad_clip=args.grad_clip, dropout_keep_prob=1 - args.dropout, save_path=args.save_path, use_CBL=args.use_cbl, beta=args.beta, perform_test=args.perform_test) def parse_arguments(argv): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Train a 3DCRNN model in an iterative-based manner with " "Tensorflow.") parser.add_argument( 'data_path', type=str, help='Path to the features extracted from `extract_mel.py`.') parser.add_argument( 'num_steps', type=int, help='Number of global steps.') parser.add_argument( '--batch_size', type=int, default=60, help='Mini batch size.') parser.add_argument( '--num_classes', type=int, default=4, help='Number of classes.') parser.add_argument( '--lr', type=int, default=1e-5, help='Learning rate.') parser.add_argument( '--dropout', type=float, default=0.0, help='Probability of a connection being set to 0 ' '(i.e., disconnected).') parser.add_argument( '--use_cbl', action="store_true", help='Whether to use Class Balanced Loss.') parser.add_argument( '--beta', type=float, default=0.9999, help='Hyperparameter for Class Balanced Loss. Used when ' '`use_cbl==True`.') parser.add_argument( '--grad_clip', action='store_true', help='Whether to clip gradients of Adam optimizer.') parser.add_argument( '--save_path', type=str, default=None, help='Path to save the best models with `.ckpt` as extension (e.g., ' '`save_path=./model.ckpt`, then the model at global step 500 ' 'will be saved as `./model.ckpt-500.data-00000-of-00001`, ' '`./model.ckpt-500.index` and `./model.ckpt-500.meta`).') parser.add_argument( '--swap', action='store_true', help='By default, the female recordings of a chosen session is set to ' 'validation data, and the male recordings of that session is set ' 'to test data. Set this to true to swap the validation set with ' 'the test set.') parser.add_argument( '--perform_test', action='store_true', help='Whether to test on test data at the end of training process.') parser.add_argument( '--validate_every', type=int, default=10, help='Number of batches between each test.') parser.add_argument( '--seed', type=int, default=None, help='Random seed for reproducibility.') return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
[ "tensorflow.compat.v1.ConfigProto", "tensorflow.Session" ]
scripts/ACRNN/train_tf.py
[(10, 'tensorflow.compat.v1.ConfigProto', 'ConfigProto', ([], {'log_device_placement': '(True)'}), False, 'from tensorflow.compat.v1 import ConfigProto, InteractiveSession\n'), (29, 'speech_utils.ACRNN.tf.model_utils.train', 'train', (['data', 'args.num_steps', 'args.batch_size', 'args.lr'], {'validate_every': 'args.validate_every', 'random_seed': 'args.seed', 'num_classes': 'args.num_classes', 'grad_clip': 'args.grad_clip', 'dropout_keep_prob': '(1 - args.dropout)', 'save_path': 'args.save_path', 'use_CBL': 'args.use_cbl', 'beta': 'args.beta', 'perform_test': 'args.perform_test'}), False, 'from speech_utils.ACRNN.tf.model_utils import train\n'), (37, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Train a 3DCRNN model in an iterative-based manner with Tensorflow."""'}), False, 'import argparse\n'), (12, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (21, 'pickle.load', 'pickle.load', (['fin'], {}), False, 'import pickle\n')]
zacqoo/tpu
764256b26f28eeff9ac4da04cfef1b8b8d5ef0dd
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model defination for the Mask-RCNN Model. Defines model_fn of Mask-RCNN for TF Estimator. The model_fn includes Mask-RCNN model architecture, loss function, learning rate schedule, and evaluation procedure. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import six import tensorflow as tf import anchors import learning_rates import losses import mask_rcnn_architecture _WEIGHT_DECAY = 1e-4 def create_optimizer(learning_rate, params): """Creates optimized based on the specified flags.""" if params['optimizer'] == 'momentum': optimizer = tf.train.MomentumOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'adam': optimizer = tf.train.AdamOptimizer(learning_rate) elif params['optimizer'] == 'adadelta': optimizer = tf.train.AdadeltaOptimizer(learning_rate) elif params['optimizer'] == 'adagrad': optimizer = tf.train.AdagradOptimizer(learning_rate) elif params['optimizer'] == 'rmsprop': optimizer = tf.train.RMSPropOptimizer( learning_rate, momentum=params['momentum']) elif params['optimizer'] == 'lars': optimizer = tf.contrib.opt.LARSOptimizer( learning_rate, momentum=params['momentum'], weight_decay=params['lars_weight_decay'], skip_list=['batch_normalization', 'bias']) else: raise ValueError('Unsupported optimizer type %s.' % params['optimizer']) return optimizer def remove_variables(variables, resnet_depth=50): """Removes low-level variables from the input. Removing low-level parameters (e.g., initial convolution layer) from training usually leads to higher training speed and slightly better testing accuracy. The intuition is that the low-level architecture (e.g., ResNet-50) is able to capture low-level features such as edges; therefore, it does not need to be fine-tuned for the detection task. Args: variables: all the variables in training resnet_depth: the depth of ResNet model Returns: var_list: a list containing variables for training """ # Freeze at conv2 based on reference model. # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/config.py#L194 # pylint: disable=line-too-long remove_list = [] prefix = 'resnet{}/'.format(resnet_depth) remove_list.append(prefix + 'conv2d/') remove_list.append(prefix + 'batch_normalization/') for i in range(1, 11): remove_list.append(prefix + 'conv2d_{}/'.format(i)) remove_list.append(prefix + 'batch_normalization_{}/'.format(i)) def _is_kept(variable): for rm_str in remove_list: if rm_str in variable.name: return False return True var_list = [v for v in variables if _is_kept(v)] return var_list def _model_fn(features, labels, mode, params, variable_filter_fn=None): """Model defination for the Mask-RCNN model based on ResNet. Args: features: the input image tensor and auxiliary information, such as `image_info` and `source_ids`. The image tensor has a shape of [batch_size, height, width, 3]. The height and width are fixed and equal. labels: the input labels in a dictionary. The labels include score targets and box targets which are dense label maps. The labels are generated from get_input_fn function in data/dataloader.py mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT. params: the dictionary defines hyperparameters of model. The default settings are in default_hparams function in this file. variable_filter_fn: the filter function that takes trainable_variables and returns the variable list after applying the filter rule. Returns: tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction. """ if params['transpose_input'] and mode == tf.estimator.ModeKeys.TRAIN: features['images'] = tf.transpose(features['images'], [3, 0, 1, 2]) image_size = (params['image_size'], params['image_size']) all_anchors = anchors.Anchors(params['min_level'], params['max_level'], params['num_scales'], params['aspect_ratios'], params['anchor_scale'], image_size) def _model_outputs(): """Generates outputs from the model.""" fpn_feats = mask_rcnn_architecture.resnet_fpn( features['images'], params['min_level'], params['max_level'], params['resnet_depth'], params['is_training_bn']) rpn_score_outputs, rpn_box_outputs = mask_rcnn_architecture.rpn_net( fpn_feats, params['min_level'], params['max_level'], len(params['aspect_ratios'] * params['num_scales'])) if mode != tf.estimator.ModeKeys.TRAIN: # The mask branch takes inputs from different places in training vs in # eval/predict. In training, the mask branch uses proposals combined with # labels to produce both mask outputs and targets. At test time, it uses # the post-processed predictions to generate masks. # Generate detections one image at a time. class_outputs, box_outputs, box_rois = ( mask_rcnn_architecture.faster_rcnn_fn( fpn_feats, rpn_score_outputs, rpn_box_outputs, all_anchors, features['image_info'], params, is_training=False)) batch_size, _, _ = class_outputs.get_shape().as_list() detections = [] softmax_class_outputs = tf.nn.softmax(class_outputs) for i in range(batch_size): detections.append( anchors.generate_detections_per_image_op( softmax_class_outputs[i], box_outputs[i], box_rois[i], features['source_ids'][i], features['image_info'][i], params['test_detections_per_image'], params['test_rpn_post_nms_topn'], params['test_nms'], params['bbox_reg_weights']) ) detections = tf.stack(detections, axis=0) if params['include_mask']: mask_outputs = mask_rcnn_architecture.mask_rcnn_fn( fpn_feats, params, is_training=False, detections=detections) else: (class_outputs, box_outputs, box_rois, class_targets, box_targets, proposal_to_label_map) = mask_rcnn_architecture.faster_rcnn_fn( fpn_feats, rpn_score_outputs, rpn_box_outputs, all_anchors, features['image_info'], params, is_training=True, labels=labels) encoded_box_targets = mask_rcnn_architecture.encode_box_targets( box_rois, box_targets, class_targets, params['bbox_reg_weights']) if params['include_mask']: mask_outputs, select_class_targets, mask_targets = ( mask_rcnn_architecture.mask_rcnn_fn( fpn_feats, params, is_training=True, detections=None, labels=labels, class_targets=class_targets, box_targets=box_targets, box_rois=box_rois, proposal_to_label_map=proposal_to_label_map)) if mode == tf.estimator.ModeKeys.TRAIN: model_outputs = { 'rpn_score_outputs': rpn_score_outputs, 'rpn_box_outputs': rpn_box_outputs, 'class_outputs': class_outputs, 'box_outputs': box_outputs, 'class_targets': class_targets, 'box_targets': encoded_box_targets, 'box_rois': box_rois, } if params['include_mask']: model_outputs.update({ 'mask_outputs': mask_outputs, 'mask_targets': mask_targets, 'select_class_targets': select_class_targets, }) else: model_outputs = { 'detections': detections, } if params['include_mask']: model_outputs.update({ 'mask_outputs': mask_outputs, }) return model_outputs if params['use_bfloat16']: with tf.contrib.tpu.bfloat16_scope(): model_outputs = _model_outputs() def cast_outputs_to_float(d): for k, v in sorted(six.iteritems(d)): if isinstance(v, dict): cast_outputs_to_float(v) else: d[k] = tf.cast(v, tf.float32) cast_outputs_to_float(model_outputs) else: model_outputs = _model_outputs() # First check if it is in PREDICT mode. if mode == tf.estimator.ModeKeys.PREDICT: predictions = {} predictions['detections'] = model_outputs['detections'] predictions['image_info'] = features['image_info'] if params['include_mask']: predictions['mask_outputs'] = tf.nn.sigmoid(model_outputs['mask_outputs']) if params['use_tpu']: return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Set up training loss and learning rate. global_step = tf.train.get_or_create_global_step() learning_rate = learning_rates.step_learning_rate_with_linear_warmup( global_step, params['init_learning_rate'], params['warmup_learning_rate'], params['warmup_steps'], params['learning_rate_levels'], params['learning_rate_steps']) # score_loss and box_loss are for logging. only total_loss is optimized. total_rpn_loss, rpn_score_loss, rpn_box_loss = losses.rpn_loss( model_outputs['rpn_score_outputs'], model_outputs['rpn_box_outputs'], labels, params) (total_fast_rcnn_loss, fast_rcnn_class_loss, fast_rcnn_box_loss) = losses.fast_rcnn_loss( model_outputs['class_outputs'], model_outputs['box_outputs'], model_outputs['class_targets'], model_outputs['box_targets'], params) # Only training has the mask loss. Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/model_builder.py # pylint: disable=line-too-long if mode == tf.estimator.ModeKeys.TRAIN and params['include_mask']: mask_loss = losses.mask_rcnn_loss( model_outputs['mask_outputs'], model_outputs['mask_targets'], model_outputs['select_class_targets'], params) else: mask_loss = 0. if variable_filter_fn: var_list = variable_filter_fn(tf.trainable_variables(), params['resnet_depth']) else: var_list = None l2_regularization_loss = _WEIGHT_DECAY * tf.add_n( [tf.nn.l2_loss(v) for v in var_list if 'batch_normalization' not in v.name and 'bias' not in v.name]) total_loss = (total_rpn_loss + total_fast_rcnn_loss + mask_loss + l2_regularization_loss) host_call = None if mode == tf.estimator.ModeKeys.TRAIN: optimizer = create_optimizer(learning_rate, params) optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) if not params['resnet_checkpoint']: scaffold_fn = None else: def scaffold_fn(): """Loads pretrained model through scaffold function.""" # Exclude all variable of optimizer. optimizer_vars = set([var.name for var in optimizer.variables()]) prefix = 'resnet%s/' % params['resnet_depth'] resnet_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, prefix) vars_to_load = {} for var in resnet_vars: if var.name not in optimizer_vars: var_name = var.name # Trim the index of the variable. if ':' in var_name: var_name = var_name[:var_name.rindex(':')] if params['skip_checkpoint_variables'] and re.match( params['skip_checkpoint_variables'], var_name[len(prefix):]): continue vars_to_load[var_name[len(prefix):]] = var_name tf.logging.info( 'Optimizer vars: %s.' % ', '.join(var for var in optimizer_vars)) tf.logging.info('Will train: %s.' % vars_to_load) tf.train.init_from_checkpoint(params['resnet_checkpoint'], vars_to_load) if not vars_to_load: raise ValueError('Variables to load is empty.') return tf.train.Scaffold() # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) grads_and_vars = optimizer.compute_gradients(total_loss, var_list) gradients, variables = zip(*grads_and_vars) grads_and_vars = [] # Special treatment for biases (beta is named as bias in reference model) # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/optimizer.py#L113 # pylint: disable=line-too-long for grad, var in zip(gradients, variables): if 'beta' in var.name or 'bias' in var.name: grad = 2.0 * grad grads_and_vars.append((grad, var)) minimize_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) with tf.control_dependencies(update_ops): train_op = minimize_op if params['use_host_call']: def host_call_fn(global_step, total_loss, total_rpn_loss, rpn_score_loss, rpn_box_loss, total_fast_rcnn_loss, fast_rcnn_class_loss, fast_rcnn_box_loss, mask_loss, learning_rate): """Training host call. Creates scalar summaries for training metrics. This function is executed on the CPU and should not directly reference any Tensors in the rest of the `model_fn`. To pass Tensors from the model to the `metric_fn`, provide as part of the `host_call`. See https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec for more information. Arguments should match the list of `Tensor` objects passed as the second element in the tuple passed to `host_call`. Args: global_step: `Tensor with shape `[batch, ]` for the global_step. total_loss: `Tensor` with shape `[batch, ]` for the training loss. total_rpn_loss: `Tensor` with shape `[batch, ]` for the training RPN loss. rpn_score_loss: `Tensor` with shape `[batch, ]` for the training RPN score loss. rpn_box_loss: `Tensor` with shape `[batch, ]` for the training RPN box loss. total_fast_rcnn_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN loss. fast_rcnn_class_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN class loss. fast_rcnn_box_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN box loss. mask_loss: `Tensor` with shape `[batch, ]` for the training Mask-RCNN mask loss. learning_rate: `Tensor` with shape `[batch, ]` for the learning_rate. Returns: List of summary ops to run on the CPU host. """ # Outfeed supports int32 but global_step is expected to be int64. global_step = tf.reduce_mean(global_step) # Host call fns are executed FLAGS.iterations_per_loop times after one # TPU loop is finished, setting max_queue value to the same as number of # iterations will make the summary writer only flush the data to storage # once per loop. with (tf.contrib.summary.create_file_writer( params['model_dir'], max_queue=params['iterations_per_loop']).as_default()): with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar( 'total_loss', tf.reduce_mean(total_loss), step=global_step) tf.contrib.summary.scalar( 'total_rpn_loss', tf.reduce_mean(total_rpn_loss), step=global_step) tf.contrib.summary.scalar( 'rpn_score_loss', tf.reduce_mean(rpn_score_loss), step=global_step) tf.contrib.summary.scalar( 'rpn_box_loss', tf.reduce_mean(rpn_box_loss), step=global_step) tf.contrib.summary.scalar( 'total_fast_rcnn_loss', tf.reduce_mean(total_fast_rcnn_loss), step=global_step) tf.contrib.summary.scalar( 'fast_rcnn_class_loss', tf.reduce_mean(fast_rcnn_class_loss), step=global_step) tf.contrib.summary.scalar( 'fast_rcnn_box_loss', tf.reduce_mean(fast_rcnn_box_loss), step=global_step) if params['include_mask']: tf.contrib.summary.scalar( 'mask_loss', tf.reduce_mean(mask_loss), step=global_step) tf.contrib.summary.scalar( 'learning_rate', tf.reduce_mean(learning_rate), step=global_step) return tf.contrib.summary.all_summary_ops() # To log the loss, current learning rate, and epoch for Tensorboard, the # summary op needs to be run on the host CPU via host_call. host_call # expects [batch_size, ...] Tensors, thus reshape to introduce a batch # dimension. These Tensors are implicitly concatenated to # [params['batch_size']]. global_step_t = tf.reshape(global_step, [1]) total_loss_t = tf.reshape(total_loss, [1]) total_rpn_loss_t = tf.reshape(total_rpn_loss, [1]) rpn_score_loss_t = tf.reshape(rpn_score_loss, [1]) rpn_box_loss_t = tf.reshape(rpn_box_loss, [1]) total_fast_rcnn_loss_t = tf.reshape(total_fast_rcnn_loss, [1]) fast_rcnn_class_loss_t = tf.reshape(fast_rcnn_class_loss, [1]) fast_rcnn_box_loss_t = tf.reshape(fast_rcnn_box_loss, [1]) mask_loss_t = tf.reshape(mask_loss, [1]) learning_rate_t = tf.reshape(learning_rate, [1]) host_call = (host_call_fn, [global_step_t, total_loss_t, total_rpn_loss_t, rpn_score_loss_t, rpn_box_loss_t, total_fast_rcnn_loss_t, fast_rcnn_class_loss_t, fast_rcnn_box_loss_t, mask_loss_t, learning_rate_t]) else: train_op = None scaffold_fn = None return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, host_call=host_call, scaffold_fn=scaffold_fn) def mask_rcnn_model_fn(features, labels, mode, params): """Mask-RCNN model.""" with tf.variable_scope('', reuse=tf.AUTO_REUSE): return _model_fn( features, labels, mode, params, variable_filter_fn=remove_variables)
[ "tensorflow.contrib.tpu.bfloat16_scope", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.contrib.tpu.CrossShardOptimizer", "tensorflow.nn.l2_loss", "tensorflow.contrib.summary.always_record_summaries", "tensorflow.train.AdamOptimizer", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.get_collection", "tensorflow.contrib.opt.LARSOptimizer", "tensorflow.train.get_or_create_global_step", "tensorflow.train.MomentumOptimizer", "tensorflow.trainable_variables", "tensorflow.train.AdagradOptimizer", "tensorflow.nn.sigmoid", "tensorflow.train.RMSPropOptimizer", "tensorflow.contrib.summary.all_summary_ops", "tensorflow.train.AdadeltaOptimizer", "tensorflow.logging.info", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.train.Scaffold", "tensorflow.contrib.summary.create_file_writer", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.estimator.EstimatorSpec", "tensorflow.variable_scope" ]
models/experimental/mask_rcnn/mask_rcnn_model.py
[(125, 'anchors.Anchors', 'anchors.Anchors', (["params['min_level']", "params['max_level']", "params['num_scales']", "params['aspect_ratios']", "params['anchor_scale']", 'image_size'], {}), False, 'import anchors\n'), (241, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (242, 'learning_rates.step_learning_rate_with_linear_warmup', 'learning_rates.step_learning_rate_with_linear_warmup', (['global_step', "params['init_learning_rate']", "params['warmup_learning_rate']", "params['warmup_steps']", "params['learning_rate_levels']", "params['learning_rate_steps']"], {}), False, 'import learning_rates\n'), (250, 'losses.rpn_loss', 'losses.rpn_loss', (["model_outputs['rpn_score_outputs']", "model_outputs['rpn_box_outputs']", 'labels', 'params'], {}), False, 'import losses\n'), (255, 'losses.fast_rcnn_loss', 'losses.fast_rcnn_loss', (["model_outputs['class_outputs']", "model_outputs['box_outputs']", "model_outputs['class_targets']", "model_outputs['box_targets']", 'params'], {}), False, 'import losses\n'), (426, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'host_call': 'host_call', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate'], {'momentum': "params['momentum']"}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.transpose', 'tf.transpose', (["features['images']", '[3, 0, 1, 2]'], {}), True, 'import tensorflow as tf\n'), (132, 'mask_rcnn_architecture.resnet_fpn', 'mask_rcnn_architecture.resnet_fpn', (["features['images']", "params['min_level']", "params['max_level']", "params['resnet_depth']", "params['is_training_bn']"], {}), False, 'import mask_rcnn_architecture\n'), (238, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), True, 'import tensorflow as tf\n'), (260, 'losses.mask_rcnn_loss', 'losses.mask_rcnn_loss', (["model_outputs['mask_outputs']", "model_outputs['mask_targets']", "model_outputs['select_class_targets']", 'params'], {}), False, 'import losses\n'), (279, 'tensorflow.contrib.tpu.CrossShardOptimizer', 'tf.contrib.tpu.CrossShardOptimizer', (['optimizer'], {}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), True, 'import tensorflow as tf\n'), (436, 'tensorflow.variable_scope', 'tf.variable_scope', (['""""""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (149, 'mask_rcnn_architecture.faster_rcnn_fn', 'mask_rcnn_architecture.faster_rcnn_fn', (['fpn_feats', 'rpn_score_outputs', 'rpn_box_outputs', 'all_anchors', "features['image_info']", 'params'], {'is_training': '(False)'}), False, 'import mask_rcnn_architecture\n'), (156, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['class_outputs'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.stack', 'tf.stack', (['detections'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (172, 'mask_rcnn_architecture.faster_rcnn_fn', 'mask_rcnn_architecture.faster_rcnn_fn', (['fpn_feats', 'rpn_score_outputs', 'rpn_box_outputs', 'all_anchors', "features['image_info']", 'params'], {'is_training': '(True)', 'labels': 'labels'}), False, 'import mask_rcnn_architecture\n'), (176, 'mask_rcnn_architecture.encode_box_targets', 'mask_rcnn_architecture.encode_box_targets', (['box_rois', 'box_targets', 'class_targets', "params['bbox_reg_weights']"], {}), False, 'import mask_rcnn_architecture\n'), (216, 'tensorflow.contrib.tpu.bfloat16_scope', 'tf.contrib.tpu.bfloat16_scope', ([], {}), True, 'import tensorflow as tf\n'), (234, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (["model_outputs['mask_outputs']"], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (324, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), True, 'import tensorflow as tf\n'), (407, 'tensorflow.reshape', 'tf.reshape', (['global_step', '[1]'], {}), True, 'import tensorflow as tf\n'), (408, 'tensorflow.reshape', 'tf.reshape', (['total_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (409, 'tensorflow.reshape', 'tf.reshape', (['total_rpn_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (410, 'tensorflow.reshape', 'tf.reshape', (['rpn_score_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (411, 'tensorflow.reshape', 'tf.reshape', (['rpn_box_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.reshape', 'tf.reshape', (['total_fast_rcnn_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (413, 'tensorflow.reshape', 'tf.reshape', (['fast_rcnn_class_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (414, 'tensorflow.reshape', 'tf.reshape', (['fast_rcnn_box_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (415, 'tensorflow.reshape', 'tf.reshape', (['mask_loss', '[1]'], {}), True, 'import tensorflow as tf\n'), (416, 'tensorflow.reshape', 'tf.reshape', (['learning_rate', '[1]'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.train.AdadeltaOptimizer', 'tf.train.AdadeltaOptimizer', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (168, 'mask_rcnn_architecture.mask_rcnn_fn', 'mask_rcnn_architecture.mask_rcnn_fn', (['fpn_feats', 'params'], {'is_training': '(False)', 'detections': 'detections'}), False, 'import mask_rcnn_architecture\n'), (181, 'mask_rcnn_architecture.mask_rcnn_fn', 'mask_rcnn_architecture.mask_rcnn_fn', (['fpn_feats', 'params'], {'is_training': '(True)', 'detections': 'None', 'labels': 'labels', 'class_targets': 'class_targets', 'box_targets': 'box_targets', 'box_rois': 'box_rois', 'proposal_to_label_map': 'proposal_to_label_map'}), False, 'import mask_rcnn_architecture\n'), (271, 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES', 'prefix'], {}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.logging.info', 'tf.logging.info', (["('Will train: %s.' % vars_to_load)"], {}), True, 'import tensorflow as tf\n'), (305, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (["params['resnet_checkpoint']", 'vars_to_load'], {}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['global_step'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (159, 'anchors.generate_detections_per_image_op', 'anchors.generate_detections_per_image_op', (['softmax_class_outputs[i]', 'box_outputs[i]', 'box_rois[i]', "features['source_ids'][i]", "features['image_info'][i]", "params['test_detections_per_image']", "params['test_rpn_post_nms_topn']", "params['test_nms']", "params['bbox_reg_weights']"], {}), False, 'import anchors\n'), (219, 'six.iteritems', 'six.iteritems', (['d'], {}), False, 'import six\n'), (52, 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate'], {'momentum': "params['momentum']"}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.cast', 'tf.cast', (['v', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (373, 'tensorflow.contrib.summary.always_record_summaries', 'tf.contrib.summary.always_record_summaries', ([], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.contrib.summary.all_summary_ops', 'tf.contrib.summary.all_summary_ops', ([], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.contrib.opt.LARSOptimizer', 'tf.contrib.opt.LARSOptimizer', (['learning_rate'], {'momentum': "params['momentum']", 'weight_decay': "params['lars_weight_decay']", 'skip_list': "['batch_normalization', 'bias']"}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.contrib.summary.create_file_writer', 'tf.contrib.summary.create_file_writer', (["params['model_dir']"], {'max_queue': "params['iterations_per_loop']"}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['total_loss'], {}), True, 'import tensorflow as tf\n'), (377, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['total_rpn_loss'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['rpn_score_loss'], {}), True, 'import tensorflow as tf\n'), (383, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['rpn_box_loss'], {}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['total_fast_rcnn_loss'], {}), True, 'import tensorflow as tf\n'), (388, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fast_rcnn_class_loss'], {}), True, 'import tensorflow as tf\n'), (391, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['fast_rcnn_box_loss'], {}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (395, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['mask_loss'], {}), True, 'import tensorflow as tf\n')]
zxhuang97/planet
c5fe704d744fc434e0a163973fd8259314fadea3
# Copyright 2019 The PlaNet Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import logging import os import numpy as np import ruamel.yaml as yaml import tensorflow as tf from planet import control from planet import tools from planet.training import trainer as trainer_ from planet.tools import filter_variables_lib Objective = collections.namedtuple( 'Objective', 'name, value, goal, include, exclude') def set_up_logging(): """Configure the TensorFlow logger.""" tf.logging.set_verbosity(tf.logging.INFO) logging.getLogger('tensorflow').propagate = False logging.getLogger('tensorflow').format = '%(message)s' logging.basicConfig(level=logging.INFO, format='%(message)s') def save_config(config, logdir=None): """Save a new configuration by name. If a logging directory is specified, is will be created and the configuration will be stored there. Otherwise, a log message will be printed. Args: config: Configuration object. logdir: Location for writing summaries and checkpoints if specified. Returns: Configuration object. """ if logdir: with config.unlocked: config.logdir = logdir message = 'Start a new run and write summaries and checkpoints to {}.' tf.logging.info(message.format(config.logdir)) tf.gfile.MakeDirs(config.logdir) config_path = os.path.join(config.logdir, 'config.yaml') with tf.gfile.GFile(config_path, 'w') as file_: yaml.dump( config, file_, yaml.Dumper, allow_unicode=True, default_flow_style=False) else: message = ( 'Start a new run without storing summaries and checkpoints since no ' 'logging directory was specified.') tf.logging.info(message) return config def load_config(logdir): """Load a configuration from the log directory. Args: logdir: The logging directory containing the configuration file. Raises: IOError: The logging directory does not contain a configuration file. Returns: Configuration object. """ print(logdir) config_path = logdir and os.path.join(logdir, 'config.yaml') if not config_path or not tf.gfile.Exists(config_path): message = ( 'Cannot resume an existing run since the logging directory does not ' 'contain a configuration file.') raise IOError(message) with tf.gfile.GFile(config_path, 'r') as file_: print('try to load') config = yaml.load(file_, yaml.Loader) message = 'Resume run and write summaries and checkpoints to {}.' tf.logging.info(message.format(config.logdir)) return config def get_batch(datasets, phase, reset): """Read batches from multiple datasets based on the training phase. The test dataset is reset at the beginning of every test phase. The training dataset is repeated infinitely and doesn't need a reset. Args: datasets: Dictionary of datasets with training phases as keys. phase: Tensor of the training phase name. reset: Whether to reset the datasets. Returns: data: a batch of data from either the train or test set. """ with datasets.unlocked: datasets.train = datasets.train.make_one_shot_iterator() datasets.test = datasets.test.make_one_shot_iterator() data = tf.cond( tf.equal(phase, 'train'), datasets.train.get_next, datasets.test.get_next) if not isinstance(data, dict): data = {'data': data} if 'length' not in data: example = data[list(data.keys())[0]] data['length'] = ( tf.zeros((tf.shape(example)[0],), tf.int32) + tf.shape(example)[1]) return data def train(model_fn, datasets, logdir, config): """Train a model on a datasets. The model function receives the following arguments: data batch, trainer phase, whether it should log, and the config. The configuration object should contain the attributes `batch_shape`, `train_steps`, `test_steps`, `max_steps`, in addition to the attributes expected by the model function. Args: model_fn: Function greating the model graph. datasets: Dictionary with keys `train` and `test` and datasets as values. logdir: Optional logging directory for summaries and checkpoints. config: Configuration object. Yields: Test score of every epoch. Raises: KeyError: if config is falsey. """ if not config: raise KeyError('You must specify a configuration.') logdir = logdir and os.path.expanduser(logdir) # print('tttttttttt') # print(config) try: config = load_config(logdir) except RuntimeError: print('Failed to load existing config.') except IOError: config = save_config(config, logdir) trainer = trainer_.Trainer(logdir, config=config) cleanups = [] try: with tf.variable_scope('graph', use_resource=True): data = get_batch(datasets, trainer.phase, trainer.reset) score, summary, cleanups = model_fn(data, trainer, config) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) if config.train_steps: trainer.add_phase( 'train', config.train_steps, score, summary, batch_size=config.batch_shape[0], report_every=None, log_every=config.train_log_every, checkpoint_every=config.train_checkpoint_every) if config.test_steps: trainer.add_phase( 'test', config.test_steps, score, summary, batch_size=config.batch_shape[0], report_every=config.test_steps, log_every=config.test_steps, checkpoint_every=config.test_checkpoint_every) for saver in config.savers: trainer.add_saver(**saver) for score in trainer.iterate(config.max_steps): yield score finally: for cleanup in cleanups: cleanup() def test(model_fn, datasets, logdir, config): """Train a model on a datasets. The model function receives the following arguments: data batch, trainer phase, whether it should log, and the config. The configuration object should contain the attributes `batch_shape`, `train_steps`, `test_steps`, `max_steps`, in addition to the attributes expected by the model function. Args: model_fn: Function greating the model graph. datasets: Dictionary with keys `train` and `test` and datasets as values. logdir: Optional logging directory for summaries and checkpoints. config: Configuration object. Yields: Test score of every epoch. Raises: KeyError: if config is falsey. """ if not config: raise KeyError('You must specify a configuration.') logdir = logdir and os.path.expanduser(logdir) try: config = load_config(logdir) except RuntimeError: print('Failed to load existing config.') except IOError: config = save_config(config, logdir) trainer = trainer_.Trainer(logdir, config=config) cleanups = [] try: with tf.variable_scope('graph', use_resource=True): data = get_batch(datasets, trainer.phase, trainer.reset) score, summary, cleanups = model_fn(data, trainer, config, logdir) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) if config.test_steps: trainer.add_phase( 'test', config.test_steps, score, summary, batch_size=config.batch_shape[0], report_every=config.test_steps, log_every=config.test_steps, checkpoint_every=config.test_checkpoint_every) for saver in config.savers: trainer.add_saver(**saver) for i, score in enumerate(trainer.iterate(config.max_steps)): yield score if i == 19: break finally: for cleanup in cleanups: cleanup() def compute_objectives(posterior, prior, target, graph, config, trainer): raw_features = graph.cell.features_from_state(posterior) heads = graph.heads objectives = [] summaries = [] cstr_pct = 0.0 for name, scale in config.loss_scales.items(): if config.loss_scales[name] == 0.0: continue if name in config.heads and name not in config.gradient_heads: features = tf.stop_gradient(raw_features) include = r'.*/head_{}/.*'.format(name) exclude = None else: features = raw_features include = r'.*' exclude = None if name == 'divergence': loss = graph.cell.divergence_from_states(posterior, prior) if config.free_nats is not None: loss = tf.maximum(0.0, loss - float(config.free_nats)) objectives.append(Objective('divergence', loss, min, include, exclude)) elif name == 'overshooting': shape = tools.shape(graph.data['action']) length = tf.tile(tf.constant(shape[1])[None], [shape[0]]) _, priors, posteriors, mask = tools.overshooting( graph.cell, {}, graph.embedded, graph.data['action'], length, config.overshooting_distance, posterior) posteriors, priors, mask = tools.nested.map( lambda x: x[:, :, 1:-1], (posteriors, priors, mask)) if config.os_stop_posterior_grad: posteriors = tools.nested.map(tf.stop_gradient, posteriors) loss = graph.cell.divergence_from_states(posteriors, priors) if config.free_nats is not None: loss = tf.maximum(0.0, loss - float(config.free_nats)) objectives.append(Objective('overshooting', loss, min, include, exclude)) elif name == 'reward' and config.r_loss == 'contra': pred = heads[name](features) if config.contra_unit == 'traj': print('Using traj loss') contra_loss, cstr_pct = contra_traj_lossV6(pred, target[name], horizon=config.contra_horizon) elif config.contra_unit == 'weighted': print('Using weighted trajectory loss ', config.contra_horizon) contra_loss, cstr_pct = contra_traj_lossV7( pred, target[name], horizon=config.contra_horizon, temp=config.temp) elif config.contra_unit == 'simclr': print('Using simclr trajectory loss ', config.contra_horizon) contra_loss, cstr_pct = contra_traj_lossV8(pred, target[name], horizon=config.contra_horizon) elif config.contra_unit == 'rank': print('Using ranking trajectory loss ', config.contra_horizon) contra_loss, cstr_pct = contra_traj_lossV9( pred, target[name], horizon=config.contra_horizon, margin=config.margin) objectives.append((Objective(name, contra_loss, min, include, exclude))) elif name == 'reward' and config.r_loss == 'l2': pred = heads[name](features) l2_loss = tf.compat.v1.losses.mean_squared_error(target[name], pred) # l2_loss = tf.nn.l2_loss(pred - target[name]) objectives.append((Objective(name, l2_loss, min, include, exclude))) else: if not config.aug_same and config.aug: recon_feat = tf.concat([features, target['aug']], -1) print('Use recon feature ', name, recon_feat) logprob = heads[name](recon_feat).log_prob(target[name]) # logprob = heads[name](features).log_prob(target['ori_img']) else: logprob = heads[name](features).log_prob(target[name]) objectives.append(Objective(name, logprob, max, include, exclude)) objectives = [o._replace(value=tf.reduce_mean(o.value)) for o in objectives] return objectives, cstr_pct def contra_step_lossV1(pred, tgt, temp=10.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) soft_sign = tf.tanh((tgt1 - tgt2) * temp) loss = tf.maximum(0.0, soft_sign * ((tgt1 - tgt2) - (pred1 - pred2))) loss = tf.reduce_mean(loss) return loss def contra_step_lossV2(pred, tgt): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) loss = tf.reduce_mean(loss) return loss def contra_step_lossV3(pred, tgt, margin=1.0): # Step-wise contrastive loss pred1, pred2 = tf.split(pred, 2, axis=0) tgt1, tgt2 = tf.split(tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin) loss = tf.reduce_mean(loss) return loss def contra_step_lossV4(pred, tgt): # 50*50 # Step-wise contrastive loss even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(pred, even) pred2 = tf.gather(pred, odd) tgt1 = tf.gather(tgt, even) tgt2 = tf.gather(tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) # loss = tf.maximum(0.0, tf.math.abs(tgt_larg - pred_larg) - tf.math.abs(tgt_small - pred_small)) loss = tf.reduce_mean(loss) return loss def contra_step_lossV5(pred, tgt, resample=1): # p = tf.print('begin loss v5', [resample, pred.shape,tgt.shape]) # with tf.control_dependencies([p]): pred_flat = tf.reshape(pred, [-1]) tgt_flat = tf.reshape(tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) num_sam = tools.shape(batch)[0] index = tf.range(num_sam) divider = tf.constant(resample, dtype=tf.float32) def sample_compute(cur_loss, i): batch1 = tf.gather(batch, tf.random.shuffle(index)) batch2 = tf.gather(batch, tf.random.shuffle(index)) pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) loss = cur_loss + compute_contra_loss(pred1, pred2, tgt1, tgt2) print(loss) return (loss, i + 1) # def sample_compute(i): # batch1 = tf.gather(batch, tf.random.shuffle(index)) # batch2 = tf.gather(batch, tf.random.shuffle(index)) # pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) # pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) # tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1]) # tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) # loss = compute_contra_loss(pred1, pred2, tgt1, tgt2) # print(loss) # return loss i = tf.constant(0) loss = tf.constant(0.) final_loss = tf.while_loop(lambda l, i: i < resample, sample_compute, [loss, i])[0] # final_loss = tf.scan(sample_compute, tf.range(resample), loss)[-1] # final_loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems= tf.range(resample), dtype=tf.float32, parallel_iterations=1) # print('final', final_loss) # final_loss = loss avg_loss = tf.reduce_mean(final_loss) / divider # p = tf.print('cur_loss', [final_loss, avg_loss]) # with tf.control_dependencies([p]): # avg_loss = tf.identity(avg_loss) # print(final_loss, avg_loss) # p = tf.print('debug loss ', [final_loss, avg_loss]) # with tf.control_dependencies([p]): # avg_loss = 1. * avg_loss # print(avg_loss) # exit() return avg_loss def compute_contra_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss def sample_pair(batch): num_sam = tools.shape(batch)[0] index = tf.range(num_sam) tgt1 = tf.slice(batch, [0, 1], [num_sam, 1]) pred1 = tf.slice(batch, [0, 0], [num_sam, 1]) def uniform(): batch2 = tf.gather(batch, tf.random.shuffle(index)) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) return pred1, pred2, tgt1, tgt2 return uniform def contra_traj_lossV5(pred, tgt, horizon=12, resample=1, hard_ratio=1.0): horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred_flat = tf.reshape(horizon_pred, [-1]) tgt_flat = tf.reshape(horizon_tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) sample_func = sample_pair(batch) def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32) p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV6(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV7(pred, tgt, horizon=12, temp=100): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) unorm_w = tf.exp((tgt_flat1 + tgt_flat2)/temp) loss = unorm_w * loss / (tf.reduce_sum(unorm_w)) a = tf.print(tf.reduce_sum(unorm_w)) with tf.control_dependencies([a]): final_loss = tf.reduce_sum(loss) return final_loss, cstr_pct def contra_traj_lossV8(pred, tgt, horizon=12): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) horizon_pred1, horizon_pred2 = tf.split(horizon_pred, 2, axis=0) horizon_tgt1, horizon_tgt2 = tf.split(horizon_tgt, 2, axis=0) pred_flat1, pred_flat2 = tf.reshape(horizon_pred1, [-1, 1]), tf.reshape(horizon_pred2, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt1, [-1, 1]), tf.reshape(horizon_tgt2, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., tgt_posi_dif - pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV9(pred, tgt, horizon=12, margin=1): horizon_pred, horizon_tgt = horizon_sumV1(pred, horizon), horizon_sumV1(tgt, horizon) # horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) pred_flat1, pred_flat2 = tf.reshape(horizon_pred, [-1, 1]), tf.reshape(horizon_pred, [1, -1]) tgt_flat1, tgt_flat2 = tf.reshape(horizon_tgt, [-1, 1]), tf.reshape(horizon_tgt, [1, -1]) tgt_dif = tgt_flat1 - tgt_flat2 pred_dif = pred_flat1 - pred_flat2 geq = tf.cast(tgt_dif > 0, tf.bool) # tgt_posi_dif = tf.where(geq, tgt_dif, -tgt_dif) pred_posi_dif = tf.where(geq, pred_dif, -pred_dif) loss = tf.maximum(0., margin-pred_posi_dif) cstr_pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.cast(tf.reduce_prod(tf.shape(loss)), tf.float32) final_loss = tf.reduce_mean(loss) return final_loss, cstr_pct def contra_traj_lossV4(pred, tgt, horizon=12, resample=1, hard_ratio=1.0): horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred_flat = tf.reshape(horizon_pred, [-1]) tgt_flat = tf.reshape(horizon_tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) sample_func = sample_pair(batch) def sample_compute(_): pairs = sample_func() loss = compute_contra_loss(*pairs, hard_ratio=hard_ratio) pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32) p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV1(pred, tgt, temp=10.0): # Trajectory-wise contrastive loss traj_pred = tf.reduce_mean(pred, axis=1) traj_tgt = tf.reduce_mean(tgt, axis=1) p1, p2 = tf.split(traj_pred, 2, axis=0) t1, t2 = tf.split(traj_tgt, 2, axis=0) soft_sign = tf.tanh((t1 - t2) * temp) loss = tf.maximum(0.0, soft_sign * ((t1 - t2) - (p1 - p2))) loss = tf.reduce_mean(loss) return loss def horizon_sumV1(input, horizon=12): bs, epi_len = input.shape[:2] new_w = epi_len - horizon + 1 weights = np.zeros([epi_len, new_w]) for i in range(new_w): weights[i:i + horizon, i] = 1.0 weights = tf.convert_to_tensor(weights, dtype=tf.float32) horizon_sum = tf.matmul(input, weights) return horizon_sum def horizon_sumV2(pred, tgt, horizon=12): bs, epi_len = 50, 50 weights_list = [] for h in range(1, horizon + 1): new_w = epi_len - h + 1 weights = np.zeros([epi_len, epi_len]) for i in range(new_w): weights[i:i + h, i] = 1.0 weights_list += [weights] weights_tensors = tf.stack([tf.convert_to_tensor(weights, dtype=tf.float32) for weights in weights_list]) rand_horizon = tf.random_uniform((), 0, horizon, dtype=tf.int32) new_w = epi_len - rand_horizon cur_weights = tf.slice(weights_tensors[tf.cast(rand_horizon, tf.int32)], [0, 0], [epi_len, new_w]) # cur_weights = tf.slice(weights_tensors, [tf.cast(rand_horizon, tf.int32), 0, 0], [1, epi_len, new_w]) horizon_pred = tf.matmul(pred, cur_weights) horizon_tgt = tf.matmul(tgt, cur_weights) return horizon_pred, horizon_tgt def contra_traj_lossV2(pred, tgt, horizon=9): # Step-wise contrastive loss horizon_pred = horizon_sumV1(pred, horizon) horizon_tgt = horizon_sumV1(tgt, horizon) pred1, pred2 = tf.split(horizon_pred, 2, axis=0) tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = tf.reduce_mean(loss) return loss # randrom horizon def contra_traj_lossV3(pred, tgt, horizon=12): # Step-wise contrastive loss horizon_pred, horizon_tgt = horizon_sumV2(pred, tgt, horizon) # pred1, pred2 = tf.split(horizon_pred, 2, axis=0) # tgt1, tgt2 = tf.split(horizon_tgt, 2, axis=0) even = [2 * i for i in range(25)] odd = [2 * i + 1 for i in range(25)] pred1 = tf.gather(horizon_pred, even) pred2 = tf.gather(horizon_pred, odd) tgt1 = tf.gather(horizon_tgt, even) tgt2 = tf.gather(horizon_tgt, odd) geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, ((tgt_larg - tgt_small) - (pred_larg - pred_small))) loss = tf.reduce_mean(loss) return loss def apply_optimizers(objectives, trainer, config): # Make sure all losses are computed and apply loss scales. processed = [] values = [ob.value for ob in objectives] for ob in objectives: loss = {min: ob.value, max: -ob.value}[ob.goal] loss *= config.loss_scales[ob.name] with tf.control_dependencies(values): loss = tf.identity(loss) processed.append(ob._replace(value=loss, goal=min)) # Merge objectives that operate on the whole model to compute only one # backward pass and to share optimizer statistics. objectives = [] losses = [] for ob in processed: if ob.include == r'.*' and ob.exclude is None: assert ob.goal == min losses.append(ob.value) else: objectives.append(ob) objectives.append(Objective('main', tf.reduce_sum(losses), min, r'.*', None)) # Apply optimizers and collect loss summaries. summaries = [] grad_norms = {} # for ob in processed: # variables = filter_variables_lib.filter_variables(ob.include, ob.exclude) # gradient = tf.gradients(ob.value, variables) # grad_norm = tf.global_norm(gradient) # with tf.name_scope('loss_{}'.format(ob.name)): # summaries.append(tf.summary.scalar('grad_norm', grad_norm)) for ob in objectives: assert ob.name in list(config.loss_scales.keys()) + ['main'], ob assert ob.goal == min, ob assert ob.name in config.optimizers, ob optimizer = config.optimizers[ob.name]( include=ob.include, exclude=ob.exclude, step=trainer.step, log=trainer.log, debug=config.debug, name=ob.name) condition = tf.equal(trainer.phase, 'train') summary, grad_norm = optimizer.maybe_minimize(condition, ob.value) summaries.append(summary) grad_norms[ob.name] = grad_norm return summaries, grad_norms def simulate_episodes( config, params, graph, cleanups, expensive_summaries, gif_summary, name): def env_ctor(): env = params.task.env_ctor() if params.save_episode_dir: env = control.wrappers.CollectGymDataset(env, params.save_episode_dir) env = control.wrappers.ConcatObservation(env, ['image']) return env bind_or_none = lambda x, **kw: x and functools.partial(x, **kw) cell = graph.cell agent_config = tools.AttrDict( cell=cell, encoder=graph.encoder, planner=functools.partial(params.planner, graph=graph), objective=bind_or_none(params.objective, graph=graph), exploration=params.exploration, preprocess_fn=config.preprocess_fn, postprocess_fn=config.postprocess_fn, aug_fn=config.aug_fn, logdir=config.logdir, agent=config.planner, rival=config.rival ) params = params.copy() with params.unlocked: params.update(agent_config) with agent_config.unlocked: agent_config.update(params) summary, return_, cleanup = control.simulate( graph.step, env_ctor, params.task.max_length, params.num_agents, agent_config, config.isolate_envs, expensive_summaries, gif_summary, name=name) cleanups.append(cleanup) # Work around tf.cond() tensor return type. return summary, return_ def print_metrics(metrics, step, every, name='metrics'): means, updates = [], [] for key, value in metrics.items(): key = 'metrics_{}_{}'.format(name, key) mean = tools.StreamingMean((), tf.float32, key) means.append(mean) updates.append(mean.submit(value)) with tf.control_dependencies(updates): # message = 'step/' + '/'.join(metrics.keys()) + ' = ' message = '{}: step/{} ='.format(name, '/'.join(metrics.keys())) gs = tf.train.get_or_create_global_step() print_metrics = tf.cond( tf.equal(step % every, 0), lambda: tf.print(message, [gs] + [mean.clear() for mean in means]), tf.no_op) return print_metrics def collect_initial_episodes(config): items = config.random_collects.items() items = sorted(items, key=lambda x: x[0]) existing = {} for name, params in items: outdir = params.save_episode_dir tf.gfile.MakeDirs(outdir) if outdir not in existing: existing[outdir] = len(tf.gfile.Glob(os.path.join(outdir, '*.npz'))) if params.num_episodes <= existing[outdir]: existing[outdir] -= params.num_episodes else: remaining = params.num_episodes - existing[outdir] existing[outdir] = 0 env_ctor = params.task.env_ctor print('Collecting {} initial episodes ({}).'.format(remaining, name)) control.random_episodes(env_ctor, remaining, outdir)
[ "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.gfile.Exists", "tensorflow.stack", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.equal", "tensorflow.reduce_sum", "tensorflow.tanh", "tensorflow.gfile.MakeDirs", "tensorflow.where", "tensorflow.while_loop", "tensorflow.train.get_or_create_global_step", "tensorflow.stop_gradient", "tensorflow.gather", "tensorflow.logging.set_verbosity", "numpy.zeros", "tensorflow.matmul", "tensorflow.shape", "tensorflow.identity", "tensorflow.exp", "tensorflow.random.shuffle", "tensorflow.logging.info", "tensorflow.no_op", "tensorflow.split", "tensorflow.print", "tensorflow.size", "tensorflow.compat.v1.losses.mean_squared_error", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.slice", "tensorflow.maximum", "tensorflow.math.count_nonzero", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.math.top_k", "tensorflow.random_uniform" ]
planet/training/utility.py
[(32, 'collections.namedtuple', 'collections.namedtuple', (['"""Objective"""', '"""name, value, goal, include, exclude"""'], {}), False, 'import collections\n'), (38, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (41, 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(message)s"""'}), False, 'import logging\n'), (165, 'planet.training.trainer.Trainer', 'trainer_.Trainer', (['logdir'], {'config': 'config'}), True, 'from planet.training import trainer as trainer_\n'), (226, 'planet.training.trainer.Trainer', 'trainer_.Trainer', (['logdir'], {'config': 'config'}), True, 'from planet.training import trainer as trainer_\n'), (332, 'tensorflow.split', 'tf.split', (['pred', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (333, 'tensorflow.split', 'tf.split', (['tgt', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.tanh', 'tf.tanh', (['((tgt1 - tgt2) * temp)'], {}), True, 'import tensorflow as tf\n'), (335, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(soft_sign * (tgt1 - tgt2 - (pred1 - pred2)))'], {}), True, 'import tensorflow as tf\n'), (336, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.split', 'tf.split', (['pred', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (344, 'tensorflow.split', 'tf.split', (['tgt', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.cast', 'tf.cast', (['(tgt1 - tgt2 > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (346, 'tensorflow.where', 'tf.where', (['geq', 'tgt1', 'tgt2'], {}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.where', 'tf.where', (['geq', 'tgt2', 'tgt1'], {}), True, 'import tensorflow as tf\n'), (348, 'tensorflow.where', 'tf.where', (['geq', 'pred1', 'pred2'], {}), True, 'import tensorflow as tf\n'), (349, 'tensorflow.where', 'tf.where', (['geq', 'pred2', 'pred1'], {}), True, 'import tensorflow as tf\n'), (351, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_larg - tgt_small - (pred_larg - pred_small))'], {}), True, 'import tensorflow as tf\n'), (352, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (359, 'tensorflow.split', 'tf.split', (['pred', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.split', 'tf.split', (['tgt', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.cast', 'tf.cast', (['(tgt1 - tgt2 > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.where', 'tf.where', (['geq', 'tgt1', 'tgt2'], {}), True, 'import tensorflow as tf\n'), (363, 'tensorflow.where', 'tf.where', (['geq', 'tgt2', 'tgt1'], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.where', 'tf.where', (['geq', 'pred1', 'pred2'], {}), True, 'import tensorflow as tf\n'), (365, 'tensorflow.where', 'tf.where', (['geq', 'pred2', 'pred1'], {}), True, 'import tensorflow as tf\n'), (367, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_larg - tgt_small - (pred_larg - pred_small) + margin)'], {}), True, 'import tensorflow as tf\n'), (368, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (377, 'tensorflow.gather', 'tf.gather', (['pred', 'even'], {}), True, 'import tensorflow as tf\n'), (378, 'tensorflow.gather', 'tf.gather', (['pred', 'odd'], {}), True, 'import tensorflow as tf\n'), (379, 'tensorflow.gather', 'tf.gather', (['tgt', 'even'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.gather', 'tf.gather', (['tgt', 'odd'], {}), True, 'import tensorflow as tf\n'), (382, 'tensorflow.cast', 'tf.cast', (['(tgt1 - tgt2 > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (383, 'tensorflow.where', 'tf.where', (['geq', 'tgt1', 'tgt2'], {}), True, 'import tensorflow as tf\n'), (384, 'tensorflow.where', 'tf.where', (['geq', 'tgt2', 'tgt1'], {}), True, 'import tensorflow as tf\n'), (385, 'tensorflow.where', 'tf.where', (['geq', 'pred1', 'pred2'], {}), True, 'import tensorflow as tf\n'), (386, 'tensorflow.where', 'tf.where', (['geq', 'pred2', 'pred1'], {}), True, 'import tensorflow as tf\n'), (388, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_larg - tgt_small - (pred_larg - pred_small))'], {}), True, 'import tensorflow as tf\n'), (390, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (397, 'tensorflow.reshape', 'tf.reshape', (['pred', '[-1]'], {}), True, 'import tensorflow as tf\n'), (398, 'tensorflow.reshape', 'tf.reshape', (['tgt', '[-1]'], {}), True, 'import tensorflow as tf\n'), (399, 'tensorflow.stack', 'tf.stack', (['[pred_flat, tgt_flat]', '(1)'], {}), True, 'import tensorflow as tf\n'), (401, 'tensorflow.range', 'tf.range', (['num_sam'], {}), True, 'import tensorflow as tf\n'), (402, 'tensorflow.constant', 'tf.constant', (['resample'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (426, 'tensorflow.constant', 'tf.constant', (['(0)'], {}), True, 'import tensorflow as tf\n'), (427, 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (447, 'tensorflow.cast', 'tf.cast', (['(tgt1 - tgt2 > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (448, 'tensorflow.where', 'tf.where', (['geq', 'tgt1', 'tgt2'], {}), True, 'import tensorflow as tf\n'), (449, 'tensorflow.where', 'tf.where', (['geq', 'tgt2', 'tgt1'], {}), True, 'import tensorflow as tf\n'), (450, 'tensorflow.where', 'tf.where', (['geq', 'pred1', 'pred2'], {}), True, 'import tensorflow as tf\n'), (451, 'tensorflow.where', 'tf.where', (['geq', 'pred2', 'pred1'], {}), True, 'import tensorflow as tf\n'), (452, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_larg - tgt_small - (pred_larg - pred_small))'], {}), True, 'import tensorflow as tf\n'), (462, 'tensorflow.cast', 'tf.cast', (['(tgt1 - tgt2 > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (463, 'tensorflow.where', 'tf.where', (['geq', 'tgt1', 'tgt2'], {}), True, 'import tensorflow as tf\n'), (464, 'tensorflow.where', 'tf.where', (['geq', 'tgt2', 'tgt1'], {}), True, 'import tensorflow as tf\n'), (465, 'tensorflow.where', 'tf.where', (['geq', 'pred1', 'pred2'], {}), True, 'import tensorflow as tf\n'), (466, 'tensorflow.where', 'tf.where', (['geq', 'pred2', 'pred1'], {}), True, 'import tensorflow as tf\n'), (467, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_larg - tgt_small - (pred_larg - pred_small))'], {}), True, 'import tensorflow as tf\n'), (478, 'tensorflow.range', 'tf.range', (['num_sam'], {}), True, 'import tensorflow as tf\n'), (479, 'tensorflow.slice', 'tf.slice', (['batch', '[0, 1]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (480, 'tensorflow.slice', 'tf.slice', (['batch', '[0, 0]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (495, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[-1]'], {}), True, 'import tensorflow as tf\n'), (496, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[-1]'], {}), True, 'import tensorflow as tf\n'), (497, 'tensorflow.stack', 'tf.stack', (['[pred_flat, tgt_flat]', '(1)'], {}), True, 'import tensorflow as tf\n'), (513, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (525, 'tensorflow.cast', 'tf.cast', (['(tgt_dif > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (526, 'tensorflow.where', 'tf.where', (['geq', 'tgt_dif', '(-tgt_dif)'], {}), True, 'import tensorflow as tf\n'), (527, 'tensorflow.where', 'tf.where', (['geq', 'pred_dif', '(-pred_dif)'], {}), True, 'import tensorflow as tf\n'), (528, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_posi_dif - pred_posi_dif)'], {}), True, 'import tensorflow as tf\n'), (531, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (542, 'tensorflow.cast', 'tf.cast', (['(tgt_dif > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (543, 'tensorflow.where', 'tf.where', (['geq', 'tgt_dif', '(-tgt_dif)'], {}), True, 'import tensorflow as tf\n'), (544, 'tensorflow.where', 'tf.where', (['geq', 'pred_dif', '(-pred_dif)'], {}), True, 'import tensorflow as tf\n'), (545, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_posi_dif - pred_posi_dif)'], {}), True, 'import tensorflow as tf\n'), (548, 'tensorflow.exp', 'tf.exp', (['((tgt_flat1 + tgt_flat2) / temp)'], {}), True, 'import tensorflow as tf\n'), (559, 'tensorflow.split', 'tf.split', (['horizon_pred', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (560, 'tensorflow.split', 'tf.split', (['horizon_tgt', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (565, 'tensorflow.cast', 'tf.cast', (['(tgt_dif > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (566, 'tensorflow.where', 'tf.where', (['geq', 'tgt_dif', '(-tgt_dif)'], {}), True, 'import tensorflow as tf\n'), (567, 'tensorflow.where', 'tf.where', (['geq', 'pred_dif', '(-pred_dif)'], {}), True, 'import tensorflow as tf\n'), (568, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_posi_dif - pred_posi_dif)'], {}), True, 'import tensorflow as tf\n'), (571, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (582, 'tensorflow.cast', 'tf.cast', (['(tgt_dif > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (584, 'tensorflow.where', 'tf.where', (['geq', 'pred_dif', '(-pred_dif)'], {}), True, 'import tensorflow as tf\n'), (585, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(margin - pred_posi_dif)'], {}), True, 'import tensorflow as tf\n'), (588, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (595, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[-1]'], {}), True, 'import tensorflow as tf\n'), (596, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[-1]'], {}), True, 'import tensorflow as tf\n'), (597, 'tensorflow.stack', 'tf.stack', (['[pred_flat, tgt_flat]', '(1)'], {}), True, 'import tensorflow as tf\n'), (613, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (620, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pred'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (621, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tgt'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (622, 'tensorflow.split', 'tf.split', (['traj_pred', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (623, 'tensorflow.split', 'tf.split', (['traj_tgt', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (624, 'tensorflow.tanh', 'tf.tanh', (['((t1 - t2) * temp)'], {}), True, 'import tensorflow as tf\n'), (625, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(soft_sign * (t1 - t2 - (p1 - p2)))'], {}), True, 'import tensorflow as tf\n'), (626, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (633, 'numpy.zeros', 'np.zeros', (['[epi_len, new_w]'], {}), True, 'import numpy as np\n'), (636, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['weights'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (637, 'tensorflow.matmul', 'tf.matmul', (['input', 'weights'], {}), True, 'import tensorflow as tf\n'), (652, 'tensorflow.random_uniform', 'tf.random_uniform', (['()', '(0)', 'horizon'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.matmul', 'tf.matmul', (['pred', 'cur_weights'], {}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.matmul', 'tf.matmul', (['tgt', 'cur_weights'], {}), True, 'import tensorflow as tf\n'), (666, 'tensorflow.split', 'tf.split', (['horizon_pred', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (667, 'tensorflow.split', 'tf.split', (['horizon_tgt', '(2)'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (669, 'tensorflow.cast', 'tf.cast', (['(tgt1 - tgt2 > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (670, 'tensorflow.where', 'tf.where', (['geq', 'tgt1', 'tgt2'], {}), True, 'import tensorflow as tf\n'), (671, 'tensorflow.where', 'tf.where', (['geq', 'tgt2', 'tgt1'], {}), True, 'import tensorflow as tf\n'), (672, 'tensorflow.where', 'tf.where', (['geq', 'pred1', 'pred2'], {}), True, 'import tensorflow as tf\n'), (673, 'tensorflow.where', 'tf.where', (['geq', 'pred2', 'pred1'], {}), True, 'import tensorflow as tf\n'), (675, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_larg - tgt_small - (pred_larg - pred_small))'], {}), True, 'import tensorflow as tf\n'), (676, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (690, 'tensorflow.gather', 'tf.gather', (['horizon_pred', 'even'], {}), True, 'import tensorflow as tf\n'), (691, 'tensorflow.gather', 'tf.gather', (['horizon_pred', 'odd'], {}), True, 'import tensorflow as tf\n'), (692, 'tensorflow.gather', 'tf.gather', (['horizon_tgt', 'even'], {}), True, 'import tensorflow as tf\n'), (693, 'tensorflow.gather', 'tf.gather', (['horizon_tgt', 'odd'], {}), True, 'import tensorflow as tf\n'), (695, 'tensorflow.cast', 'tf.cast', (['(tgt1 - tgt2 > 0)', 'tf.bool'], {}), True, 'import tensorflow as tf\n'), (696, 'tensorflow.where', 'tf.where', (['geq', 'tgt1', 'tgt2'], {}), True, 'import tensorflow as tf\n'), (697, 'tensorflow.where', 'tf.where', (['geq', 'tgt2', 'tgt1'], {}), True, 'import tensorflow as tf\n'), (698, 'tensorflow.where', 'tf.where', (['geq', 'pred1', 'pred2'], {}), True, 'import tensorflow as tf\n'), (699, 'tensorflow.where', 'tf.where', (['geq', 'pred2', 'pred1'], {}), True, 'import tensorflow as tf\n'), (701, 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(tgt_larg - tgt_small - (pred_larg - pred_small))'], {}), True, 'import tensorflow as tf\n'), (702, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (785, 'planet.control.simulate', 'control.simulate', (['graph.step', 'env_ctor', 'params.task.max_length', 'params.num_agents', 'agent_config', 'config.isolate_envs', 'expensive_summaries', 'gif_summary'], {'name': 'name'}), False, 'from planet import control\n'), (39, 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), False, 'import logging\n'), (40, 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), False, 'import logging\n'), (62, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['config.logdir'], {}), True, 'import tensorflow as tf\n'), (63, 'os.path.join', 'os.path.join', (['config.logdir', '"""config.yaml"""'], {}), False, 'import os\n'), (73, 'tensorflow.logging.info', 'tf.logging.info', (['message'], {}), True, 'import tensorflow as tf\n'), (90, 'os.path.join', 'os.path.join', (['logdir', '"""config.yaml"""'], {}), False, 'import os\n'), (96, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['config_path', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (98, 'ruamel.yaml.load', 'yaml.load', (['file_', 'yaml.Loader'], {}), True, 'import ruamel.yaml as yaml\n'), (122, 'tensorflow.equal', 'tf.equal', (['phase', '"""train"""'], {}), True, 'import tensorflow as tf\n'), (156, 'os.path.expanduser', 'os.path.expanduser', (['logdir'], {}), False, 'import os\n'), (218, 'os.path.expanduser', 'os.path.expanduser', (['logdir'], {}), False, 'import os\n'), (400, 'planet.tools.shape', 'tools.shape', (['batch'], {}), False, 'from planet import tools\n'), (407, 'tensorflow.slice', 'tf.slice', (['batch1', '[0, 0]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (408, 'tensorflow.slice', 'tf.slice', (['batch2', '[0, 0]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (409, 'tensorflow.slice', 'tf.slice', (['batch1', '[0, 1]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (410, 'tensorflow.slice', 'tf.slice', (['batch2', '[0, 1]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.while_loop', 'tf.while_loop', (['(lambda l, i: i < resample)', 'sample_compute', '[loss, i]'], {}), True, 'import tensorflow as tf\n'), (433, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['final_loss'], {}), True, 'import tensorflow as tf\n'), (455, 'tensorflow.reshape', 'tf.reshape', (['loss', '[-1]'], {}), True, 'import tensorflow as tf\n'), (456, 'tensorflow.math.top_k', 'tf.math.top_k', (['loss'], {'k': 'hard_num'}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.reshape', 'tf.reshape', (['loss', '[-1]'], {}), True, 'import tensorflow as tf\n'), (471, 'tensorflow.math.top_k', 'tf.math.top_k', (['loss'], {'k': 'hard_num'}), True, 'import tensorflow as tf\n'), (477, 'planet.tools.shape', 'tools.shape', (['batch'], {}), False, 'from planet import tools\n'), (484, 'tensorflow.slice', 'tf.slice', (['batch2', '[0, 0]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (485, 'tensorflow.slice', 'tf.slice', (['batch2', '[0, 1]', '[num_sam, 1]'], {}), True, 'import tensorflow as tf\n'), (521, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (521, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (522, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (522, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (530, 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['loss'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (538, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (538, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (539, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (539, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (546, 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['loss'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (549, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['unorm_w'], {}), True, 'import tensorflow as tf\n'), (551, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['unorm_w'], {}), True, 'import tensorflow as tf\n'), (552, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[a]'], {}), True, 'import tensorflow as tf\n'), (553, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (561, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred1', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (561, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred2', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (562, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt1', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (562, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt2', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (570, 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['loss'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (578, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (578, 'tensorflow.reshape', 'tf.reshape', (['horizon_pred', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (579, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[-1, 1]'], {}), True, 'import tensorflow as tf\n'), (579, 'tensorflow.reshape', 'tf.reshape', (['horizon_tgt', '[1, -1]'], {}), True, 'import tensorflow as tf\n'), (587, 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['loss'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (646, 'numpy.zeros', 'np.zeros', (['[epi_len, epi_len]'], {}), True, 'import numpy as np\n'), (749, 'tensorflow.equal', 'tf.equal', (['trainer.phase', '"""train"""'], {}), True, 'import tensorflow as tf\n'), (762, 'planet.control.wrappers.ConcatObservation', 'control.wrappers.ConcatObservation', (['env', "['image']"], {}), False, 'from planet import control\n'), (797, 'planet.tools.StreamingMean', 'tools.StreamingMean', (['()', 'tf.float32', 'key'], {}), False, 'from planet import tools\n'), (800, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['updates'], {}), True, 'import tensorflow as tf\n'), (803, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (817, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['outdir'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['config_path', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (65, 'ruamel.yaml.dump', 'yaml.dump', (['config', 'file_', 'yaml.Dumper'], {'allow_unicode': '(True)', 'default_flow_style': '(False)'}), True, 'import ruamel.yaml as yaml\n'), (91, 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['config_path'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""graph"""'], {'use_resource': '(True)'}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""graph"""'], {'use_resource': '(True)'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['raw_features'], {}), True, 'import tensorflow as tf\n'), (405, 'tensorflow.random.shuffle', 'tf.random.shuffle', (['index'], {}), True, 'import tensorflow as tf\n'), (406, 'tensorflow.random.shuffle', 'tf.random.shuffle', (['index'], {}), True, 'import tensorflow as tf\n'), (483, 'tensorflow.random.shuffle', 'tf.random.shuffle', (['index'], {}), True, 'import tensorflow as tf\n'), (503, 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['loss'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (503, 'tensorflow.size', 'tf.size', (['loss'], {'out_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (508, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[p]'], {}), True, 'import tensorflow as tf\n'), (509, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (511, 'tensorflow.range', 'tf.range', (['resample'], {}), True, 'import tensorflow as tf\n'), (604, 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['loss'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (604, 'tensorflow.size', 'tf.size', (['loss'], {'out_type': 'tf.float32'}), True, 'import tensorflow as tf\n'), (608, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[p]'], {}), True, 'import tensorflow as tf\n'), (609, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (611, 'tensorflow.range', 'tf.range', (['resample'], {}), True, 'import tensorflow as tf\n'), (650, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['weights'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (713, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['values'], {}), True, 'import tensorflow as tf\n'), (714, 'tensorflow.identity', 'tf.identity', (['loss'], {}), True, 'import tensorflow as tf\n'), (726, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['losses'], {}), True, 'import tensorflow as tf\n'), (761, 'planet.control.wrappers.CollectGymDataset', 'control.wrappers.CollectGymDataset', (['env', 'params.save_episode_dir'], {}), False, 'from planet import control\n'), (765, 'functools.partial', 'functools.partial', (['x'], {}), False, 'import functools\n'), (770, 'functools.partial', 'functools.partial', (['params.planner'], {'graph': 'graph'}), False, 'import functools\n'), (805, 'tensorflow.equal', 'tf.equal', (['(step % every)', '(0)'], {}), True, 'import tensorflow as tf\n'), (827, 'planet.control.random_episodes', 'control.random_episodes', (['env_ctor', 'remaining', 'outdir'], {}), False, 'from planet import control\n'), (130, 'tensorflow.shape', 'tf.shape', (['example'], {}), True, 'import tensorflow as tf\n'), (277, 'planet.tools.shape', 'tools.shape', (["graph.data['action']"], {}), False, 'from planet import tools\n'), (279, 'planet.tools.overshooting', 'tools.overshooting', (['graph.cell', '{}', 'graph.embedded', "graph.data['action']", 'length', 'config.overshooting_distance', 'posterior'], {}), False, 'from planet import tools\n'), (282, 'planet.tools.nested.map', 'tools.nested.map', (['(lambda x: x[:, :, 1:-1])', '(posteriors, priors, mask)'], {}), False, 'from planet import tools\n'), (324, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['o.value'], {}), True, 'import tensorflow as tf\n'), (505, 'tensorflow.random_uniform', 'tf.random_uniform', (['()'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (506, 'tensorflow.print', 'tf.print', (['"""csrt acc """', '[pct]'], {}), True, 'import tensorflow as tf\n'), (507, 'tensorflow.no_op', 'tf.no_op', ([], {}), True, 'import tensorflow as tf\n'), (530, 'tensorflow.shape', 'tf.shape', (['loss'], {}), True, 'import tensorflow as tf\n'), (546, 'tensorflow.shape', 'tf.shape', (['loss'], {}), True, 'import tensorflow as tf\n'), (570, 'tensorflow.shape', 'tf.shape', (['loss'], {}), True, 'import tensorflow as tf\n'), (587, 'tensorflow.shape', 'tf.shape', (['loss'], {}), True, 'import tensorflow as tf\n'), (605, 'tensorflow.random_uniform', 'tf.random_uniform', (['()'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (606, 'tensorflow.print', 'tf.print', (['"""csrt acc """', '[pct]'], {}), True, 'import tensorflow as tf\n'), (607, 'tensorflow.no_op', 'tf.no_op', ([], {}), True, 'import tensorflow as tf\n'), (654, 'tensorflow.cast', 'tf.cast', (['rand_horizon', 'tf.int32'], {}), True, 'import tensorflow as tf\n'), (172, 'planet.tools.count_weights', 'tools.count_weights', ([], {}), False, 'from planet import tools\n'), (233, 'planet.tools.count_weights', 'tools.count_weights', ([], {}), False, 'from planet import tools\n'), (285, 'planet.tools.nested.map', 'tools.nested.map', (['tf.stop_gradient', 'posteriors'], {}), False, 'from planet import tools\n'), (454, 'planet.tools.shape', 'tools.shape', (['pred1'], {}), False, 'from planet import tools\n'), (469, 'planet.tools.shape', 'tools.shape', (['pred1'], {}), False, 'from planet import tools\n'), (819, 'os.path.join', 'os.path.join', (['outdir', '"""*.npz"""'], {}), False, 'import os\n'), (130, 'tensorflow.shape', 'tf.shape', (['example'], {}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.constant', 'tf.constant', (['shape[1]'], {}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.compat.v1.losses.mean_squared_error', (['target[name]', 'pred'], {}), True, 'import tensorflow as tf\n'), (316, 'tensorflow.concat', 'tf.concat', (["[features, target['aug']]", '(-1)'], {}), True, 'import tensorflow as tf\n')]
scrawfor1/TensorFlow
7e3b8b23835ab0ac55d390aed2349af6e05dbe3b
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.io_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os.path import time import contextlib import shutil import tempfile import tensorflow as tf import numpy as np import six from google.protobuf.any_pb2 import Any from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import queue_runner_pb2 from tensorflow.python.framework import function from tensorflow.python.platform import gfile class SaverTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Start a second session. In that session the parameter nodes # have not been initialized either. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session() as sess: v0_2 = tf.Variable(1000.0, name="v0") v1_2 = tf.Variable(2000.0, name="v1") save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def testInt64(self): save_path = os.path.join(self.get_temp_dir(), "int64") with self.test_session() as sess: # Build a graph with 1 node, and save and restore for them. v = tf.Variable(np.int64(15), name="v") save = tf.train.Saver({"v": v}, restore_sequentially=True) tf.initialize_all_variables().run() # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) with self.test_session() as sess: v = tf.Variable(np.int64(-1), name="v") save = tf.train.Saver({"v": v}) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v" in e.message): sess.run(v) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(np.int64(15), v.eval()) def testSomeErrors(self): with tf.Graph().as_default(): v0 = tf.Variable([10.0], name="v0") v1 = tf.Variable([20.0], name="v1") v2 = tf.Variable([20.0], name="v2") v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1])) # By default the name used for "v2" will be "v1" and raise an error. with self.assertRaisesRegexp(ValueError, "same name: v1"): tf.train.Saver([v0, v1, v2]) # The names are different and will work. tf.train.Saver({"vee1": v1, "other": [v2]}) def testBasicsWithListOfVariables(self): save_path = os.path.join(self.get_temp_dir(), "basics_with_list") with self.test_session(graph=tf.Graph()) as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver([v0, v1]) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Start a second session. In that session the variables # have not been initialized either. with self.test_session(graph=tf.Graph()) as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") save = tf.train.Saver([v0, v1]) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session(graph=tf.Graph()) as sess: v0_2 = tf.Variable(1000.0, name="v0") v1_2 = tf.Variable(2000.0, name="v1") save2 = tf.train.Saver([v0_2, v1_2]) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def _SaveAndLoad(self, var_name, var_value, other_value, save_path): with self.test_session() as sess: var = tf.Variable(var_value, name=var_name) save = tf.train.Saver({var_name: var}) var.initializer.run() val = save.save(sess, save_path) self.assertEqual(save_path, val) with self.test_session() as sess: var = tf.Variable(other_value, name=var_name) save = tf.train.Saver({var_name: var}) save.restore(sess, save_path) self.assertAllClose(var_value, var.eval()) def testCacheRereadsFile(self): save_path = os.path.join(self.get_temp_dir(), "cache_rereads") # Save and reload one Variable named "var0". self._SaveAndLoad("var0", 0.0, 1.0, save_path) # Save and reload one Variable named "var1" in the same file. # The cached readers should know to re-read the file. self._SaveAndLoad("var1", 1.1, 2.2, save_path) def testGPU(self): if not tf.test.is_built_with_cuda(): return save_path = os.path.join(self.get_temp_dir(), "gpu") with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_1 = tf.Variable(123.45) save = tf.train.Saver({"v0": v0_1}) tf.initialize_all_variables().run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_2 = tf.Variable(543.21) save = tf.train.Saver({"v0": v0_2}) tf.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables()) init.run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(0.0) twos = tf.Variable([0.0, 0.0, 0.0]) # Saver with no arg, defaults to 'all variables'. save = tf.train.Saver() save.restore(sess, save_path) self.assertAllClose(1.0, one.eval()) self.assertAllClose([2.0, 2.0, 2.0], twos.eval()) def testSaveWithGlobalStep(self): save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step") global_step_int = 5 # Save and reload one Variable named "var0". self._SaveAndLoad("var0", 0.0, 1.0, save_path) for use_tensor in [True, False]: with self.test_session() as sess: var = tf.Variable(1.0, name="var0") save = tf.train.Saver({var.op.name: var}) var.initializer.run() if use_tensor: global_step = tf.constant(global_step_int) val = save.save(sess, save_path, global_step=global_step) else: val = save.save(sess, save_path, global_step=global_step_int) expected_save_path = "%s-%d" % (save_path, global_step_int) self.assertEqual(expected_save_path, val) class SaveRestoreShardedTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "sharded") # Build a graph with 2 parameter nodes on different devices. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(20, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True) tf.initialize_all_variables().run() val = save.save(sess, save_path) self.assertEqual(save_path + "-?????-of-00002", val) meta_graph_filename = save._MetaGraphFilename(val) self.assertEqual(save_path + ".meta", meta_graph_filename) # Restore a different "v0" from shard 0 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") save = tf.train.Saver({"v0": v0}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(111, v0.eval()) save.restore(sess, save_path + "-00000-of-00002") self.assertEqual(10, v0.eval()) # Restore a different "v1" from shard 1 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v1 = tf.Variable(222) save = tf.train.Saver({"v1": v1}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(222, v1.eval()) save.restore(sess, save_path + "-00001-of-00002") self.assertEqual(20, v1.eval()) # Now try a restore with the sharded filename. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(222, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(111, v0.eval()) self.assertEqual(222, v1.eval()) save_path = os.path.join(self.get_temp_dir(), "sharded") save.restore(sess, save_path + "-?????-of-?????") self.assertEqual(10, v0.eval()) self.assertEqual(20, v1.eval()) self.assertEqual( tf.train.latest_checkpoint(self.get_temp_dir()), os.path.join(self.get_temp_dir(), "sharded-?????-of-00002")) def testSaverDef(self): with self.test_session(): v0 = tf.Variable(123, name="v0") save = tf.train.Saver({"v0": v0}, sharded=True) sd = save.as_saver_def() self.assertTrue(sd.sharded) class MaxToKeepTest(tf.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_non_sharded") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable(10.0, name="v") save = tf.train.Saver({"v": v}, max_to_keep=2) tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertFalse(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) # Create a second helper, identical to the first. save2 = tf.train.Saver(saver_def=save.as_saver_def()) save2.set_last_checkpoints(save.last_checkpoints) # Create a third helper, with the same configuration but no knowledge of # previous checkpoints. save3 = tf.train.Saver(saver_def=save.as_saver_def()) # Exercise the first helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save.last_checkpoints) self.assertFalse(gfile.Exists(s1)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Exercise the second helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save2.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save2.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Exercise the third helper. # Adding s2 again (but helper is unaware of previous s2) s2 = save3.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s2], save3.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should not be deleted because helper is unaware of it) s1 = save3.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save3.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) def testSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(222, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2) tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertEqual(2, len(gfile.Glob(s1))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertEqual(0, len(gfile.Glob(s1))) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) self.assertEqual(2, len(gfile.Glob(s3))) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3))) class KeepCheckpointEveryNHoursTest(tf.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "keep_checkpoint_every_n_hours") try: gfile.DeleteRecursively(save_dir) except OSError: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable([10.0], name="v") # Run the initializer NOW to avoid the 0.5s overhead of the first Run() # call, which throws the test timing off in fastbuild mode. tf.initialize_all_variables().run() # Create a saver that will keep the last 2 checkpoints plus one every 0.7 # seconds. start_time = time.time() save = tf.train.Saver({"v": v}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600) self.assertEqual([], save.last_checkpoints) # Wait till 0.7 second have elapsed so s1 will be old enough to keep. time.sleep((time.time() + 0.7) - start_time) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) # We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(), # would normally delete s1, because max_to_keep is 2. However, s1 is # older than 0.7s so we must keep it. s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) # s1 should still be here, we are Not checking now to reduce time # variance in the test. # We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next # call to Save(), will delete s2, because max_to_keep is 2, and because # we already kept the old s1. s2 is very close in time to s1 so it gets # deleted. s4 = save.save(sess, os.path.join(save_dir, "s4")) self.assertEqual([s3, s4], save.last_checkpoints) # Check that s1 is still here, but s2 is gone. self.assertTrue(gfile.Exists(s1)) self.assertFalse(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s4)) class SaveRestoreWithVariableNameMap(tf.test.TestCase): def testNonReshape(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" # Use a variable name map to set the saved tensor names val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) # Verify that the original names are not in the Saved file save = tf.train.Saver({"v0": v0, "v1": v1}) with self.assertRaisesOpError("not found in checkpoint"): save.restore(sess, save_path) # Verify that the mapped names are present in the Saved file and can be # Restored using remapped names. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") with self.assertRaisesOpError("uninitialized value v0"): sess.run(v0) with self.assertRaisesOpError("uninitialized value v1"): sess.run(v1) save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Add a prefix to the node names in the current graph and Restore using # remapped names. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="restore_prefix/v0") v1 = tf.Variable(-1.0, name="restore_prefix/v1") with self.assertRaisesOpError("uninitialized value restore_prefix/v0"): sess.run(v0) with self.assertRaisesOpError("uninitialized value restore_prefix/v1"): sess.run(v1) # Restore the saved values in the parameter nodes. save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) class LatestCheckpointWithRelativePaths(tf.test.TestCase): @staticmethod @contextlib.contextmanager def tempWorkingDir(temppath): cwd = os.getcwd() os.chdir(temppath) try: yield finally: os.chdir(cwd) @staticmethod @contextlib.contextmanager def tempDir(): tempdir = tempfile.mkdtemp() try: yield tempdir finally: shutil.rmtree(tempdir) def testRelativePath(self): # Make sure we have a clean directory to work in. with self.tempDir() as tempdir: # Jump to that directory until this test is done. with self.tempWorkingDir(tempdir): # Save training snapshots to a relative path. traindir = "train/" os.mkdir(traindir) filename = "snapshot" filepath = os.path.join(traindir, filename) with self.test_session() as sess: # Build a simple graph. v0 = tf.Variable(0.0) inc = v0.assign_add(1.0) save = tf.train.Saver({"v0": v0}) # Record a short training history. tf.initialize_all_variables().run() save.save(sess, filepath, global_step=0) inc.eval() save.save(sess, filepath, global_step=1) inc.eval() save.save(sess, filepath, global_step=2) with self.test_session() as sess: # Build a new graph with different initialization. v0 = tf.Variable(-1.0) # Create a new saver. save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables().run() # Get the most recent checkpoint name from the training history file. name = tf.train.latest_checkpoint(traindir) self.assertIsNotNone(name) # Restore "v0" from that checkpoint. save.restore(sess, name) self.assertEqual(v0.eval(), 2.0) class CheckpointStateTest(tf.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.exists(test_dir): shutil.rmtree(test_dir) gfile.MakeDirs(test_dir) return test_dir def testAbsPath(self): save_dir = self._TestDir("abs_paths") abs_path = os.path.join(save_dir, "model-0") ckpt = tf.train.generate_checkpoint_state_proto(save_dir, abs_path) self.assertEqual(ckpt.model_checkpoint_path, abs_path) self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path)) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path) def testRelPath(self): train_dir = "train" model = os.path.join(train_dir, "model-0") # model_checkpoint_path should have no "train" directory part. new_rel_path = "model-0" ckpt = tf.train.generate_checkpoint_state_proto(train_dir, model) self.assertEqual(ckpt.model_checkpoint_path, new_rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path) def testAllModelCheckpointPaths(self): save_dir = self._TestDir("all_models_test") abs_path = os.path.join(save_dir, "model-0") for paths in [None, [], ["model-2"]]: ckpt = tf.train.generate_checkpoint_state_proto( save_dir, abs_path, all_model_checkpoint_paths=paths) self.assertEqual(ckpt.model_checkpoint_path, abs_path) self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path)) self.assertEqual( len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path) def testUpdateCheckpointState(self): save_dir = self._TestDir("update_checkpoint_state") os.chdir(save_dir) # Make a temporary train directory. train_dir = "train" os.mkdir(train_dir) abs_path = os.path.join(save_dir, "model-0") rel_path = "train/model-2" tf.train.update_checkpoint_state( train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path]) ckpt = tf.train.get_checkpoint_state(train_dir) self.assertEqual(ckpt.model_checkpoint_path, rel_path) self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2) self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path) self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path) class MetaGraphTest(tf.test.TestCase): def _TestDir(self, test_name): test_dir = os.path.join(self.get_temp_dir(), test_name) if os.path.exists(test_dir): shutil.rmtree(test_dir) gfile.MakeDirs(test_dir) return test_dir def testAddCollectionDef(self): test_dir = self._TestDir("good_collection") filename = os.path.join(test_dir, "metafile") with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") var = tf.Variable(tf.constant(0, dtype=tf.int64)) count_up_to = var.count_up_to(3) input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue") qr = tf.train.QueueRunner(input_queue, [count_up_to]) tf.initialize_all_variables() # Creates a saver. save = tf.train.Saver({"v0": v0}) # Adds a set of collections. tf.add_to_collection("int_collection", 3) tf.add_to_collection("float_collection", 3.5) tf.add_to_collection("string_collection", "hello") tf.add_to_collection("variable_collection", v0) # Add QueueRunners. tf.train.add_queue_runner(qr) # Adds user_defined proto in three formats: string, bytes and Any. queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue") tf.add_to_collection("user_defined_string_collection", str(queue_runner)) tf.add_to_collection("user_defined_bytes_collection", queue_runner.SerializeToString()) any_buf = Any() any_buf.Pack(queue_runner) tf.add_to_collection("user_defined_any_collection", any_buf) # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph(filename) self.assertTrue(meta_graph_def.HasField("saver_def")) self.assertTrue(meta_graph_def.HasField("graph_def")) collection_def = meta_graph_def.collection_def self.assertEqual(len(collection_def), 10) with tf.Graph().as_default(): # Restores from MetaGraphDef. new_saver = tf.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def) def testAddCollectionDefFails(self): with self.test_session(): # Creates a graph. v0 = tf.Variable(10.0, name="v0") # Creates a saver. save = tf.train.Saver({"v0": v0}) # Generates MetaGraphDef. meta_graph_def = meta_graph_pb2.MetaGraphDef() # Verifies that collection with unsupported key will not be added. tf.add_to_collection(save, 3) save._add_collection_def(meta_graph_def, save) self.assertEqual(len(meta_graph_def.collection_def), 0) # Verifies that collection where item type does not match expected # type will not be added. tf.add_to_collection("int_collection", 3) tf.add_to_collection("int_collection", 3.5) save._add_collection_def(meta_graph_def, "int_collection") self.assertEqual(len(meta_graph_def.collection_def), 0) def _testMultiSaverCollectionSave(self): test_dir = self._TestDir("saver_collection") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") saver1_ckpt = os.path.join(test_dir, "saver1.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Creates a graph. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(11.0, name="v1") # Creates 2 savers. saver0 = tf.train.Saver({"v0": v0}, name="saver0") saver1 = tf.train.Saver({"v1": v1}, name="saver1") tf.add_to_collection("savers", saver0) tf.add_to_collection("savers", saver1) tf.initialize_all_variables().run() # Saves to different checkpoints. saver0.save(sess, saver0_ckpt) saver1.save(sess, saver1_ckpt) # Generates MetaGraphDef. meta_graph_def = tf.train.export_meta_graph(filename) meta_graph_def0 = saver0.export_meta_graph() meta_graph_def1 = saver1.export_meta_graph() # Verifies that there is no saver_def in meta_graph_def. self.assertFalse(meta_graph_def.HasField("saver_def")) # Verifies that there is saver_def in meta_graph_def0 and 1. self.assertTrue(meta_graph_def0.HasField("saver_def")) self.assertTrue(meta_graph_def1.HasField("saver_def")) # Verifies SAVERS is saved as bytes_list for meta_graph_def. collection_def = meta_graph_def.collection_def["savers"] kind = collection_def.WhichOneof("kind") self.assertEqual(kind, "bytes_list") # Verifies that there are 2 entries in SAVERS collection. savers = getattr(collection_def, kind) self.assertEqual(2, len(savers.value)) # Verifies SAVERS collection is saved as bytes_list for meta_graph_def0. collection_def = meta_graph_def0.collection_def["savers"] kind = collection_def.WhichOneof("kind") self.assertEqual(kind, "bytes_list") # Verifies that there are 3 entries in SAVERS collection. savers = getattr(collection_def, kind) self.assertEqual(2, len(savers.value)) def _testMultiSaverCollectionRestore(self): test_dir = os.path.join(self.get_temp_dir(), "saver_collection") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") saver1_ckpt = os.path.join(test_dir, "saver1.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Imports from meta_graph. tf.train.import_meta_graph(filename) # Retrieves SAVERS collection. Verifies there are 2 entries. savers = tf.get_collection("savers") self.assertEqual(2, len(savers)) # Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1. new_saver0 = savers[0] new_saver0.restore(sess, saver0_ckpt) v0 = sess.graph.get_tensor_by_name("v0:0") v1 = sess.graph.get_tensor_by_name("v1:0") self.assertEqual(10.0, v0.eval()) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Retrieves saver1. Verifies that new_saver1 can restore v1. new_saver1 = savers[1] new_saver1.restore(sess, saver1_ckpt) v1 = sess.graph.get_tensor_by_name("v1:0") self.assertEqual(11.0, v1.eval()) def testMultiSaverCollection(self): self._testMultiSaverCollectionSave() self._testMultiSaverCollectionRestore() def testBinaryAndTextFormat(self): test_dir = self._TestDir("binary_and_text") filename = os.path.join(test_dir, "metafile") with self.test_session(graph=tf.Graph()): # Creates a graph. tf.Variable(10.0, name="v0") # Exports the graph as binary format. tf.train.export_meta_graph(filename, as_text=False) with self.test_session(graph=tf.Graph()): # Imports the binary format graph. saver = tf.train.import_meta_graph(filename) # Exports the graph as text format. saver.export_meta_graph(filename, as_text=True) with self.test_session(graph=tf.Graph()): # Imports the text format graph. tf.train.import_meta_graph(filename) # Writes wrong contents to the file. tf.train.write_graph(saver.as_saver_def(), os.path.dirname(filename), os.path.basename(filename)) with self.test_session(graph=tf.Graph()): # Import should fail. with self.assertRaisesWithPredicateMatch( IOError, lambda e: "Cannot parse file"): tf.train.import_meta_graph(filename) # Deletes the file gfile.Remove(filename) with self.assertRaisesWithPredicateMatch( IOError, lambda e: "does not exist"): tf.train.import_meta_graph(filename) def testSliceVariable(self): test_dir = self._TestDir("slice_saver") filename = os.path.join(test_dir, "metafile") with self.test_session(): v1 = tf.Variable([20.0], name="v1") v2 = tf.Variable([20.0], name="v2") v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1])) # The names are different and will work. slice_saver = tf.train.Saver({"first": v1, "second": v2}) tf.initialize_all_variables().run() # Exports to meta_graph meta_graph_def = slice_saver.export_meta_graph(filename) with tf.Graph().as_default(): # Restores from MetaGraphDef. new_saver = tf.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_meta_graph_def = new_saver.export_meta_graph() # It should be the same as the original. self.assertProtoEquals(meta_graph_def, new_meta_graph_def) def _testGraphExtensionSave(self): test_dir = self._TestDir("graph_extension") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Creates an inference graph. # Hidden 1 images = tf.constant(1.2, tf.float32, shape=[100, 28]) with tf.name_scope("hidden1"): weights = tf.Variable( tf.truncated_normal([28, 128], stddev=1.0 / math.sqrt(float(28))), name="weights") biases = tf.Variable(tf.zeros([128]), name="biases") hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) # Hidden 2 with tf.name_scope("hidden2"): weights = tf.Variable( tf.truncated_normal([128, 32], stddev=1.0 / math.sqrt(float(128))), name="weights") biases = tf.Variable(tf.zeros([32]), name="biases") hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) # Linear with tf.name_scope("softmax_linear"): weights = tf.Variable( tf.truncated_normal([32, 10], stddev=1.0 / math.sqrt(float(32))), name="weights") biases = tf.Variable(tf.zeros([10]), name="biases") logits = tf.matmul(hidden2, weights) + biases tf.add_to_collection("logits", logits) # Runs to logit. tf.initialize_all_variables().run() sess.run(logits) # Creates a saver. saver0 = tf.train.Saver() saver0.save(sess, saver0_ckpt) # Generates MetaGraphDef. saver0.export_meta_graph(filename) def _testGraphExtensionRestore(self): test_dir = os.path.join(self.get_temp_dir(), "graph_extension") filename = os.path.join(test_dir, "metafile") saver0_ckpt = os.path.join(test_dir, "saver0.ckpt") with self.test_session(graph=tf.Graph()) as sess: # Restores from MetaGraphDef. new_saver = tf.train.import_meta_graph(filename) # Generates a new MetaGraphDef. new_saver.export_meta_graph() # Restores from checkpoint. new_saver.restore(sess, saver0_ckpt) # Addes loss and train. labels = tf.constant(0, tf.int32, shape=[100], name="labels") batch_size = tf.size(labels) labels = tf.expand_dims(labels, 1) indices = tf.expand_dims(tf.range(0, batch_size), 1) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, 10]), 1.0, 0.0) logits = tf.get_collection("logits")[0] cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, onehot_labels, name="xentropy") loss = tf.reduce_mean(cross_entropy, name="xentropy_mean") tf.scalar_summary(loss.op.name, loss) # Creates the gradient descent optimizer with the given learning rate. optimizer = tf.train.GradientDescentOptimizer(0.01) # Runs train_op. train_op = optimizer.minimize(loss) sess.run(train_op) def testGraphExtension(self): self._testGraphExtensionSave() self._testGraphExtensionRestore() def testStrippedOpListDef(self): with self.test_session(): # Creates a graph. v0 = tf.Variable(0.0) var = tf.Variable(10.0) tf.add(v0, var) @function.Defun(x=tf.float32) def minus_one(x): return x - 1 minus_one(tf.identity(v0)) save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables() # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph() ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op] self.assertEqual(ops, ["Add", "Assign", "Const", "Identity", "NoOp", "RestoreSlice", "SaveSlices", "Sub", "Variable"]) if __name__ == "__main__": tf.test.main()
[ "tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef", "tensorflow.train.update_checkpoint_state", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.concat", "tensorflow.zeros", "tensorflow.test.is_built_with_cuda", "tensorflow.train.QueueRunner", "tensorflow.python.platform.gfile.Exists", "tensorflow.python.platform.gfile.Remove", "tensorflow.pack", "tensorflow.all_variables", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.train.export_meta_graph", "tensorflow.get_collection", "tensorflow.python.framework.function.Defun", "tensorflow.test.main", "tensorflow.python.platform.gfile.DeleteRecursively", "tensorflow.python.platform.gfile.MakeDirs", "tensorflow.train.add_queue_runner", "tensorflow.train.import_meta_graph", "tensorflow.initialize_all_variables", "tensorflow.ConfigProto", "tensorflow.add", "tensorflow.name_scope", "tensorflow.train.Saver", "tensorflow.matmul", "tensorflow.FIFOQueue", "tensorflow.core.protobuf.queue_runner_pb2.QueueRunnerDef", "tensorflow.Variable.SaveSliceInfo", "tensorflow.identity", "numpy.int64", "tensorflow.train.GradientDescentOptimizer", "tensorflow.add_to_collection", "tensorflow.size", "tensorflow.train.get_checkpoint_state", "tensorflow.train.generate_checkpoint_state_proto", "tensorflow.constant", "tensorflow.train.latest_checkpoint", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.scalar_summary", "tensorflow.python.platform.gfile.Glob", "tensorflow.expand_dims" ]
tensorflow/python/training/saver_test.py
[(1051, 'tensorflow.test.main', 'tf.test.main', ([], {}), True, 'import tensorflow as tf\n'), (357, 'tensorflow.python.platform.gfile.MakeDirs', 'gfile.MakeDirs', (['save_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (464, 'tensorflow.python.platform.gfile.MakeDirs', 'gfile.MakeDirs', (['save_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (508, 'tensorflow.python.platform.gfile.MakeDirs', 'gfile.MakeDirs', (['save_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (634, 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), False, 'import tempfile\n'), (692, 'tensorflow.python.platform.gfile.MakeDirs', 'gfile.MakeDirs', (['test_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (698, 'tensorflow.train.generate_checkpoint_state_proto', 'tf.train.generate_checkpoint_state_proto', (['save_dir', 'abs_path'], {}), True, 'import tensorflow as tf\n'), (709, 'tensorflow.train.generate_checkpoint_state_proto', 'tf.train.generate_checkpoint_state_proto', (['train_dir', 'model'], {}), True, 'import tensorflow as tf\n'), (736, 'tensorflow.train.update_checkpoint_state', 'tf.train.update_checkpoint_state', (['train_dir', 'rel_path'], {'all_model_checkpoint_paths': '[abs_path, rel_path]'}), True, 'import tensorflow as tf\n'), (740, 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['train_dir'], {}), True, 'import tensorflow as tf\n'), (753, 'tensorflow.python.platform.gfile.MakeDirs', 'gfile.MakeDirs', (['test_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (48, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.Variable', 'tf.Variable', (['(20.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0, 'v1': v1}"], {'restore_sequentially': '(True)'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0, 'v1': v1}"], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.Variable', 'tf.Variable', (['(1000.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.Variable', 'tf.Variable', (['(2000.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0_2, 'v1': v1_2}"], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v': v}"], {'restore_sequentially': '(True)'}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.Variable', 'tf.Variable', (['[10.0]'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.Variable', 'tf.Variable', (['[20.0]'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.Variable', 'tf.Variable', (['[20.0]'], {'name': '"""v2"""'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'vee1': v1, 'other': [v2]}"], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.Variable', 'tf.Variable', (['(20.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.train.Saver', 'tf.train.Saver', (['[v0, v1]'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.train.Saver', 'tf.train.Saver', (['[v0, v1]'], {}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.Variable', 'tf.Variable', (['(1000.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.Variable', 'tf.Variable', (['(2000.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.train.Saver', 'tf.train.Saver', (['[v0_2, v1_2]'], {}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.Variable', 'tf.Variable', (['var_value'], {'name': 'var_name'}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.train.Saver', 'tf.train.Saver', (['{var_name: var}'], {}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.Variable', 'tf.Variable', (['other_value'], {'name': 'var_name'}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.train.Saver', 'tf.train.Saver', (['{var_name: var}'], {}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.test.is_built_with_cuda', 'tf.test.is_built_with_cuda', ([], {}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0_1}"], {}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0_2}"], {}), True, 'import tensorflow as tf\n'), (241, 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.Variable', 'tf.Variable', (['[2.0, 2.0, 2.0]'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (250, 'tensorflow.Variable', 'tf.Variable', (['[0.0, 0.0, 0.0]'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0, 'v1': v1}"], {'sharded': '(True)'}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {'sharded': '(True)'}), True, 'import tensorflow as tf\n'), (314, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v1': v1}"], {'sharded': '(True)'}), True, 'import tensorflow as tf\n'), (328, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0, 'v1': v1}"], {'sharded': '(True)'}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.Variable', 'tf.Variable', (['(123)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (344, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {'sharded': '(True)'}), True, 'import tensorflow as tf\n'), (354, 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['save_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (360, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v"""'}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v': v}"], {'max_to_keep': '(2)'}), True, 'import tensorflow as tf\n'), (461, 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['save_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (473, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0, 'v1': v1}"], {'sharded': '(True)', 'max_to_keep': '(2)'}), True, 'import tensorflow as tf\n'), (505, 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['save_dir'], {}), False, 'from tensorflow.python.platform import gfile\n'), (511, 'tensorflow.Variable', 'tf.Variable', (['[10.0]'], {'name': '"""v"""'}), True, 'import tensorflow as tf\n'), (517, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (518, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v': v}"], {'max_to_keep': '(2)', 'keep_checkpoint_every_n_hours': '(0.7 / 3600)'}), True, 'import tensorflow as tf\n'), (561, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (562, 'tensorflow.Variable', 'tf.Variable', (['(20.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (563, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'save_prefix/v0': v0, 'save_prefix/v1': v1}"], {}), True, 'import tensorflow as tf\n'), (577, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0, 'v1': v1}"], {}), True, 'import tensorflow as tf\n'), (584, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (585, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'save_prefix/v0': v0, 'save_prefix/v1': v1}"], {}), True, 'import tensorflow as tf\n'), (602, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""restore_prefix/v0"""'}), True, 'import tensorflow as tf\n'), (603, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {'name': '"""restore_prefix/v1"""'}), True, 'import tensorflow as tf\n'), (611, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'save_prefix/v0': v0, 'save_prefix/v1': v1}"], {}), True, 'import tensorflow as tf\n'), (638, 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), False, 'import shutil\n'), (691, 'shutil.rmtree', 'shutil.rmtree', (['test_dir'], {}), False, 'import shutil\n'), (718, 'tensorflow.train.generate_checkpoint_state_proto', 'tf.train.generate_checkpoint_state_proto', (['save_dir', 'abs_path'], {'all_model_checkpoint_paths': 'paths'}), True, 'import tensorflow as tf\n'), (752, 'shutil.rmtree', 'shutil.rmtree', (['test_dir'], {}), False, 'import shutil\n'), (761, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (764, 'tensorflow.FIFOQueue', 'tf.FIFOQueue', (['(30)', 'tf.float32'], {'shared_name': '"""collection_queue"""'}), True, 'import tensorflow as tf\n'), (765, 'tensorflow.train.QueueRunner', 'tf.train.QueueRunner', (['input_queue', '[count_up_to]'], {}), True, 'import tensorflow as tf\n'), (766, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (768, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {}), True, 'import tensorflow as tf\n'), (770, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""int_collection"""', '(3)'], {}), True, 'import tensorflow as tf\n'), (771, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""float_collection"""', '(3.5)'], {}), True, 'import tensorflow as tf\n'), (772, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""string_collection"""', '"""hello"""'], {}), True, 'import tensorflow as tf\n'), (773, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""variable_collection"""', 'v0'], {}), True, 'import tensorflow as tf\n'), (775, 'tensorflow.train.add_queue_runner', 'tf.train.add_queue_runner', (['qr'], {}), True, 'import tensorflow as tf\n'), (777, 'tensorflow.core.protobuf.queue_runner_pb2.QueueRunnerDef', 'queue_runner_pb2.QueueRunnerDef', ([], {'queue_name': '"""test_queue"""'}), False, 'from tensorflow.core.protobuf import queue_runner_pb2\n'), (781, 'google.protobuf.any_pb2.Any', 'Any', ([], {}), False, 'from google.protobuf.any_pb2 import Any\n'), (783, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""user_defined_any_collection"""', 'any_buf'], {}), True, 'import tensorflow as tf\n'), (794, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (803, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (805, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {}), True, 'import tensorflow as tf\n'), (807, 'tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef', 'meta_graph_pb2.MetaGraphDef', ([], {}), False, 'from tensorflow.core.protobuf import meta_graph_pb2\n'), (810, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['save', '(3)'], {}), True, 'import tensorflow as tf\n'), (816, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""int_collection"""', '(3)'], {}), True, 'import tensorflow as tf\n'), (817, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""int_collection"""', '(3.5)'], {}), True, 'import tensorflow as tf\n'), (828, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (829, 'tensorflow.Variable', 'tf.Variable', (['(11.0)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (831, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {'name': '"""saver0"""'}), True, 'import tensorflow as tf\n'), (832, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v1': v1}"], {'name': '"""saver1"""'}), True, 'import tensorflow as tf\n'), (833, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""savers"""', 'saver0'], {}), True, 'import tensorflow as tf\n'), (834, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""savers"""', 'saver1'], {}), True, 'import tensorflow as tf\n'), (840, 'tensorflow.train.export_meta_graph', 'tf.train.export_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (873, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (875, 'tensorflow.get_collection', 'tf.get_collection', (['"""savers"""'], {}), True, 'import tensorflow as tf\n'), (901, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (903, 'tensorflow.train.export_meta_graph', 'tf.train.export_meta_graph', (['filename'], {'as_text': '(False)'}), True, 'import tensorflow as tf\n'), (906, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (911, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (921, 'tensorflow.python.platform.gfile.Remove', 'gfile.Remove', (['filename'], {}), False, 'from tensorflow.python.platform import gfile\n'), (930, 'tensorflow.Variable', 'tf.Variable', (['[20.0]'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (931, 'tensorflow.Variable', 'tf.Variable', (['[20.0]'], {'name': '"""v2"""'}), True, 'import tensorflow as tf\n'), (935, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'first': v1, 'second': v2}"], {}), True, 'import tensorflow as tf\n'), (942, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (955, 'tensorflow.constant', 'tf.constant', (['(1.2)', 'tf.float32'], {'shape': '[100, 28]'}), True, 'import tensorflow as tf\n'), (988, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (999, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (1005, 'tensorflow.constant', 'tf.constant', (['(0)', 'tf.int32'], {'shape': '[100]', 'name': '"""labels"""'}), True, 'import tensorflow as tf\n'), (1006, 'tensorflow.size', 'tf.size', (['labels'], {}), True, 'import tensorflow as tf\n'), (1007, 'tensorflow.expand_dims', 'tf.expand_dims', (['labels', '(1)'], {}), True, 'import tensorflow as tf\n'), (1009, 'tensorflow.concat', 'tf.concat', (['(1)', '[indices, labels]'], {}), True, 'import tensorflow as tf\n'), (1013, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', (['logits', 'onehot_labels'], {'name': '"""xentropy"""'}), True, 'import tensorflow as tf\n'), (1016, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""xentropy_mean"""'}), True, 'import tensorflow as tf\n'), (1018, 'tensorflow.scalar_summary', 'tf.scalar_summary', (['loss.op.name', 'loss'], {}), True, 'import tensorflow as tf\n'), (1020, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), True, 'import tensorflow as tf\n'), (1033, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (1034, 'tensorflow.Variable', 'tf.Variable', (['(10.0)'], {}), True, 'import tensorflow as tf\n'), (1035, 'tensorflow.add', 'tf.add', (['v0', 'var'], {}), True, 'import tensorflow as tf\n'), (1036, 'tensorflow.python.framework.function.Defun', 'function.Defun', ([], {'x': 'tf.float32'}), False, 'from tensorflow.python.framework import function\n'), (1040, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {}), True, 'import tensorflow as tf\n'), (1041, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (104, 'numpy.int64', 'np.int64', (['(15)'], {}), True, 'import numpy as np\n'), (115, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v': v}"], {}), True, 'import tensorflow as tf\n'), (124, 'numpy.int64', 'np.int64', (['(15)'], {}), True, 'import numpy as np\n'), (131, 'tensorflow.Variable.SaveSliceInfo', 'tf.Variable.SaveSliceInfo', (['"""v1"""', '[1]', '[0]', '[1]'], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.train.Saver', 'tf.train.Saver', (['[v0, v1, v2]'], {}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.Variable', 'tf.Variable', (['(123.45)'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.Variable', 'tf.Variable', (['(543.21)'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.all_variables', 'tf.all_variables', ([], {}), True, 'import tensorflow as tf\n'), (264, 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'name': '"""var0"""'}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.train.Saver', 'tf.train.Saver', (['{var.op.name: var}'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.Variable', 'tf.Variable', (['(10)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.Variable', 'tf.Variable', (['(20)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (301, 'tensorflow.Variable', 'tf.Variable', (['(111)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (313, 'tensorflow.Variable', 'tf.Variable', (['(222)'], {}), True, 'import tensorflow as tf\n'), (325, 'tensorflow.Variable', 'tf.Variable', (['(111)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.Variable', 'tf.Variable', (['(222)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (367, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (371, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (372, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (376, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (377, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (378, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (393, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (395, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (397, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (403, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (405, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (407, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (416, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (419, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (421, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (427, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (429, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (431, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (440, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (443, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (445, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (451, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (453, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (455, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (470, 'tensorflow.Variable', 'tf.Variable', (['(111)'], {'name': '"""v0"""'}), True, 'import tensorflow as tf\n'), (472, 'tensorflow.Variable', 'tf.Variable', (['(222)'], {'name': '"""v1"""'}), True, 'import tensorflow as tf\n'), (547, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (548, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (549, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (550, 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['s4'], {}), False, 'from tensorflow.python.platform import gfile\n'), (762, 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (919, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (924, 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['filename'], {}), True, 'import tensorflow as tf\n'), (932, 'tensorflow.Variable.SaveSliceInfo', 'tf.Variable.SaveSliceInfo', (['"""v1"""', '[1]', '[0]', '[1]'], {}), True, 'import tensorflow as tf\n'), (956, 'tensorflow.name_scope', 'tf.name_scope', (['"""hidden1"""'], {}), True, 'import tensorflow as tf\n'), (965, 'tensorflow.name_scope', 'tf.name_scope', (['"""hidden2"""'], {}), True, 'import tensorflow as tf\n'), (974, 'tensorflow.name_scope', 'tf.name_scope', (['"""softmax_linear"""'], {}), True, 'import tensorflow as tf\n'), (982, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""logits"""', 'logits'], {}), True, 'import tensorflow as tf\n'), (1008, 'tensorflow.range', 'tf.range', (['(0)', 'batch_size'], {}), True, 'import tensorflow as tf\n'), (1011, 'tensorflow.pack', 'tf.pack', (['[batch_size, 10]'], {}), True, 'import tensorflow as tf\n'), (1012, 'tensorflow.get_collection', 'tf.get_collection', (['"""logits"""'], {}), True, 'import tensorflow as tf\n'), (1039, 'tensorflow.identity', 'tf.identity', (['v0'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (114, 'numpy.int64', 'np.int64', (['(-1)'], {}), True, 'import numpy as np\n'), (127, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (222, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (226, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (240, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (268, 'tensorflow.constant', 'tf.constant', (['global_step_int'], {}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'CPU': 2}"}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'CPU': 2}"}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'CPU': 2}"}), True, 'import tensorflow as tf\n'), (315, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (323, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'CPU': 2}"}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (468, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'CPU': 2}"}), True, 'import tensorflow as tf\n'), (474, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (479, 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (484, 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (486, 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (491, 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['s1'], {}), False, 'from tensorflow.python.platform import gfile\n'), (493, 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['s2'], {}), False, 'from tensorflow.python.platform import gfile\n'), (495, 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['s3'], {}), False, 'from tensorflow.python.platform import gfile\n'), (514, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (564, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (659, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {}), True, 'import tensorflow as tf\n'), (671, 'tensorflow.Variable', 'tf.Variable', (['(-1.0)'], {}), True, 'import tensorflow as tf\n'), (674, 'tensorflow.train.Saver', 'tf.train.Saver', (["{'v0': v0}"], {}), True, 'import tensorflow as tf\n'), (678, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['traindir'], {}), True, 'import tensorflow as tf\n'), (792, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (826, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (835, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (871, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (899, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (904, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (909, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (915, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (936, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (940, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (952, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (961, 'tensorflow.zeros', 'tf.zeros', (['[128]'], {}), True, 'import tensorflow as tf\n'), (970, 'tensorflow.zeros', 'tf.zeros', (['[32]'], {}), True, 'import tensorflow as tf\n'), (979, 'tensorflow.zeros', 'tf.zeros', (['[10]'], {}), True, 'import tensorflow as tf\n'), (981, 'tensorflow.matmul', 'tf.matmul', (['hidden2', 'weights'], {}), True, 'import tensorflow as tf\n'), (985, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (997, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (523, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (963, 'tensorflow.matmul', 'tf.matmul', (['images', 'weights'], {}), True, 'import tensorflow as tf\n'), (972, 'tensorflow.matmul', 'tf.matmul', (['hidden1', 'weights'], {}), True, 'import tensorflow as tf\n'), (662, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (675, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n')]
TimoHackel/ILA-SCNN
99ff4b3f68877d660dc56e086b6a12d6846b379a
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import nn_ops from tensorflow.python.client import timeline import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test import tensorflow as tf import random import numpy as np import time import sparse_tools as sp from direct_sparse_module import sparse_nn_ops as sc_module import os import sys def verifyValues( tensor_in_sizes, filter_in_sizes, stride, rho_data = 0.1, rho_filter = 1, padding = 'SAME', dim = 5, max_density = 0.1, num_trials = 3, filter_type = 'K-RELU', test_type = '', dense=True ): if isinstance(stride, collections.Iterable): strides = [1] + list(stride) + [1] else: strides = [1, stride, stride, stride, 1] out_sizes = np.copy(tensor_in_sizes) out_sizes[-1] = filter_in_sizes[-1] out_entry_count = np.prod(out_sizes) * max_density bias = np.zeros([filter_in_sizes[-1]], dtype=np.float32) no_strides = [1, 1, 1, 1, 1] [t1ind, t1val, t1sh] = sp.createRandomSparseTensor(rho_data, tensor_in_sizes, -3, 3) s1 = tf.SparseTensor(indices=t1ind, values=t1val, dense_shape=t1sh) d1 = sp.sparse_to_dense(t1ind, t1val, t1sh) [t2ind, t2val, t2sh] = sp.createRandomSparseTensor(rho_filter, filter_in_sizes) s2 = tf.SparseTensor(indices=t2ind, values=t2val, dense_shape=t2sh) d2 = sp.sparse_to_dense(t2ind, t2val, t2sh) print("strides: \n", strides) print("input shape", tensor_in_sizes) print("filter shape", filter_in_sizes) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.7 with tf.device("/gpu:0"): convd = sc_module.direct_sparse_data_conversion(t1ind, t1val, t1sh) convf = sc_module.direct_sparse_filter_conversion(t2ind, t2val, t2sh, t1sh) with tf.Session(config=config) as sess: pd = sess.run(convd) pf = sess.run(convf) tf.reset_default_graph() ts = 0 with tf.device("/gpu:0"): approx_scskconv = sc_module.direct_sparse_conv_kd(pd.out_indices, pd.out_values, pd.out_shape, pd.out_block_channel_mapping, pf.out_indices, pf.out_values, pf.out_shape, pf.out_channel_mapping, bias, strides, padding, out_entry_count, dim, max_density, filter_type); with tf.Session(config=config) as sess: t6 = time.time() sv3 = sess.run(approx_scskconv) t5 = time.time() for i in range(0, num_trials): sess.run(approx_scskconv) t6 = time.time() ts = abs(t6 - t5) / max(num_trials,1) print("time approx sparse: ", ts) tf.reset_default_graph() time.sleep(1) if dense: td = 0 with tf.device("/gpu:0"): conv = nn_ops.conv3d(d1, d2, strides, padding) with tf.Session(config=config) as sess: t22 = time.time() expected = sess.run(conv) t11 = time.time() for i in range(0, num_trials): sess.run(conv) t22 = time.time() td = abs(t22 - t11) / max(num_trials,1) print("time dense gpu: ", td) tf.reset_default_graph() print("time ratio: ", ts / td) return [expected, sv3, ts, td] def do_test(res, f_density, batch_size): pid = os.getpid() print(pid) num_trials = 5 res = res channel_count = 1 channel_count_out = 8 filter_res = 3 batch_size = batch_size max_density = 1/res in_density = 1/res f_density = f_density filter_type = 'K-RELU' test_type = '' ret_value = verifyValues( tensor_in_sizes=[batch_size, res, res, res, channel_count], #[batch, depth, height, width, in_channels] filter_in_sizes=[filter_res, filter_res, filter_res, channel_count, channel_count_out], #[depth, height, width, in_channels, out_channels] stride=1, rho_data=1 * in_density, rho_filter=1 * f_density, padding='SAME', max_density=max_density, num_trials=num_trials, filter_type=filter_type, test_type=test_type) for res in [2**i for i in range(4, 9)]: for f_density in [0.1, 0.3, 0.5, 1]: for batch in [8]: print('========================================================================') print('========================================================================') print('res = {} f_density = {} batch = {}'.format(res, f_density, batch)) do_test(res, f_density, batch)
[ "tensorflow.python.ops.nn_ops.conv3d", "tensorflow.device", "tensorflow.SparseTensor", "tensorflow.ConfigProto", "numpy.copy", "tensorflow.reset_default_graph", "numpy.prod", "tensorflow.Session", "numpy.zeros" ]
tensorflow/core/user_ops/gpu_tests/RuntimeMemorySparseDense.py
[(43, 'numpy.copy', 'np.copy', (['tensor_in_sizes'], {}), True, 'import numpy as np\n'), (47, 'numpy.zeros', 'np.zeros', (['[filter_in_sizes[-1]]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (49, 'sparse_tools.createRandomSparseTensor', 'sp.createRandomSparseTensor', (['rho_data', 'tensor_in_sizes', '(-3)', '(3)'], {}), True, 'import sparse_tools as sp\n'), (50, 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 't1ind', 'values': 't1val', 'dense_shape': 't1sh'}), True, 'import tensorflow as tf\n'), (51, 'sparse_tools.sparse_to_dense', 'sp.sparse_to_dense', (['t1ind', 't1val', 't1sh'], {}), True, 'import sparse_tools as sp\n'), (53, 'sparse_tools.createRandomSparseTensor', 'sp.createRandomSparseTensor', (['rho_filter', 'filter_in_sizes'], {}), True, 'import sparse_tools as sp\n'), (54, 'tensorflow.SparseTensor', 'tf.SparseTensor', ([], {'indices': 't2ind', 'values': 't2val', 'dense_shape': 't2sh'}), True, 'import tensorflow as tf\n'), (55, 'sparse_tools.sparse_to_dense', 'sp.sparse_to_dense', (['t2ind', 't2val', 't2sh'], {}), True, 'import sparse_tools as sp\n'), (61, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (87, 'time.sleep', 'time.sleep', (['(1)'], {}), False, 'import time\n'), (109, 'os.getpid', 'os.getpid', ([], {}), False, 'import os\n'), (45, 'numpy.prod', 'np.prod', (['out_sizes'], {}), True, 'import numpy as np\n'), (64, 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), True, 'import tensorflow as tf\n'), (65, 'direct_sparse_module.sparse_nn_ops.direct_sparse_data_conversion', 'sc_module.direct_sparse_data_conversion', (['t1ind', 't1val', 't1sh'], {}), True, 'from direct_sparse_module import sparse_nn_ops as sc_module\n'), (66, 'direct_sparse_module.sparse_nn_ops.direct_sparse_filter_conversion', 'sc_module.direct_sparse_filter_conversion', (['t2ind', 't2val', 't2sh', 't1sh'], {}), True, 'from direct_sparse_module import sparse_nn_ops as sc_module\n'), (67, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), True, 'import tensorflow as tf\n'), (75, 'direct_sparse_module.sparse_nn_ops.direct_sparse_conv_kd', 'sc_module.direct_sparse_conv_kd', (['pd.out_indices', 'pd.out_values', 'pd.out_shape', 'pd.out_block_channel_mapping', 'pf.out_indices', 'pf.out_values', 'pf.out_shape', 'pf.out_channel_mapping', 'bias', 'strides', 'padding', 'out_entry_count', 'dim', 'max_density', 'filter_type'], {}), True, 'from direct_sparse_module import sparse_nn_ops as sc_module\n'), (76, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (77, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (79, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (82, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (102, 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.python.ops.nn_ops.conv3d', 'nn_ops.conv3d', (['d1', 'd2', 'strides', 'padding'], {}), False, 'from tensorflow.python.ops import nn_ops\n'), (93, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (94, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (96, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (99, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
Drunkar/tensor2tensor
8d3d175d649680c8e5b98a1b1c1c5e782ff492ac
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mesh-Tensorflow Model in tensor2tensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import mesh_tensorflow as mtf import six from tensor2tensor.utils import learning_rate from tensor2tensor.utils import metrics from tensor2tensor.utils import t2t_model import tensorflow as tf from tensorflow.contrib.tpu.python.tpu import tpu_estimator class MtfModel(t2t_model.T2TModel): """Toy model to test mesh_tensorflow.""" @classmethod def estimator_model_fn(cls, hparams, features, labels, mode, config=None, params=None, decode_hparams=None): hparams = copy.deepcopy(hparams) use_tpu = params and params.get("use_tpu", False) hparams.use_tpu = use_tpu # merge decode_hparams into hparams if present if mode == tf.estimator.ModeKeys.PREDICT and decode_hparams is not None: for k, v in six.iteritems(decode_hparams.values()): if hasattr(hparams, k) and getattr(hparams, k) != v: tf.logging.warning("Overriding hparams.%s with %s from decode_hparams" % (k, v)) setattr(hparams, k, v) # Instantiate model data_parallelism = None if not use_tpu and config: data_parallelism = config.data_parallelism model = cls( hparams, mode, data_parallelism=data_parallelism, decode_hparams=decode_hparams) global_step = tf.train.get_global_step() mesh_shape = mtf.convert_to_shape(hparams.mesh_shape) layout_rules = mtf.convert_to_layout_rules(hparams.layout) if use_tpu: ctx = params["context"] num_hosts = ctx.num_hosts host_placement_fn = ctx.tpu_host_placement_function device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)] # TODO(ylc): Better estimation of replica cache size? replica_cache_size = 300 * 1000000 # 300M per replica # Worker 0 caches all the TPU binaries. worker0_mem = replica_cache_size * ctx.num_replicas devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1) var_placer = mtf.utils.BalancedVariablePlacer(device_list, devices_memeory_usage) mesh_devices = [""] * mesh_shape.size mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl( mesh_shape, layout_rules, mesh_devices, ctx.device_assignment) else: var_placer = None if data_parallelism is None or len(data_parallelism.ps_devices) == 1: mesh_devices = [""] * mesh_shape.size else: assert len(data_parallelism.ps_devices) == mesh_shape.size mesh_devices = data_parallelism.ps_devices mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl( mesh_shape, layout_rules, mesh_devices) graph = mtf.Graph() mesh = mtf.Mesh(graph, "my_mesh", var_placer) # PREDICT mode if mode == tf.estimator.ModeKeys.PREDICT: return model.estimator_spec_predict(features, mesh, mesh_impl, use_tpu) logits, loss = model.mtf_model_fn(features, mesh) if use_tpu and logits is not None: logits = mtf.anonymize(logits) # TRAIN mode if mode == tf.estimator.ModeKeys.TRAIN: var_grads = mtf.gradients( [loss], [v.outputs[0] for v in graph.trainable_variables]) lr = learning_rate.learning_rate_schedule(hparams) tf.summary.scalar("learning_rate", lr) mtf_lr = mtf.import_tf_tensor( mesh, tf.convert_to_tensor(lr, dtype=tf.float32), mtf.Shape([])) optimizer = mtf.optimize.make_optimizer(hparams, mtf_lr) update_ops = [] for grad, var in zip(var_grads, graph.trainable_variables): update_ops.extend(optimizer.apply_grad(grad, var)) lowering = mtf.Lowering(graph, {mesh: mesh_impl}) tf_loss = lowering.export_to_tf_tensor(loss) tf_loss = tf.to_float(tf_loss) if logits and mode != tf.estimator.ModeKeys.TRAIN: tf_logits = lowering.export_to_tf_tensor(logits) if mode == tf.estimator.ModeKeys.TRAIN: tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(tf.assign_add(global_step, 1)) # tf.logging.info("tf_update_ops: {}".format(tf_update_ops)) train_op = tf.group(tf_update_ops) with mtf.utils.outside_all_rewrites(): # Copy master variables to slices. Must be called first. restore_hook = mtf.MtfRestoreHook(lowering) saver = tf.train.Saver( tf.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False, save_relative_paths=True) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) saver_listener = mtf.MtfCheckpointSaverListener(lowering) saver_hook = tf.train.CheckpointSaverHook( hparams.model_dir, save_steps=1000, saver=saver, listeners=[saver_listener]) # EVAL mode if mode == tf.estimator.ModeKeys.EVAL: tf_logits = lowering.export_to_tf_tensor(logits) return model.estimator_spec_eval(features, tf_logits, labels, tf_loss, restore_hook, use_tpu) if use_tpu: # TPU host call. Important: need to be called before remove_summaries() if hparams.tpu_enable_host_call: host_call = t2t_model.create_host_call(hparams.model_dir) else: host_call = None t2t_model.remove_summaries() return tpu_estimator.TPUEstimatorSpec( mode=tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, host_call=host_call, training_hooks=[restore_hook, saver_hook]) else: return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, training_chief_hooks=[restore_hook, saver_hook]) def estimator_spec_eval( self, features, logits, labels, loss, restore_hook, use_tpu): """Construct EstimatorSpec for EVAL mode.""" hparams = self.hparams problem = hparams.problem if logits.get_shape().ndims == 3: logits = tf.expand_dims(tf.expand_dims(logits, 2), 3) eval_metrics_fns = metrics.create_evaluation_metrics([problem], hparams) if use_tpu: def metric_fn(tf_logits, labels): with tf.device("cpu:0"), mtf.utils.outside_all_rewrites(): eval_metrics = {} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): if metric_name.split("/")[-1] not in t2t_model.TPU_METRIC_BLACKLIST: eval_metrics[metric_name] = metric_fn( tf_logits, None, tf.identity(labels)) return eval_metrics return tpu_estimator.TPUEstimatorSpec( tf.estimator.ModeKeys.EVAL, evaluation_hooks=[restore_hook], loss=loss, eval_metrics=(metric_fn, [logits, labels])) else: eval_metrics = {} predictions = {"predictions": logits} for metric_name, metric_fn in six.iteritems(eval_metrics_fns): eval_metrics[metric_name] = metric_fn(logits, features, features["targets"]) return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.EVAL, predictions=predictions, eval_metric_ops=eval_metrics, evaluation_hooks=[restore_hook], loss=loss) def estimator_spec_predict(self, features, mesh, mesh_impl, use_tpu): mtf_samples = self.sample(features, mesh) lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) outputs = lowering.export_to_tf_tensor(mtf_samples) if self.has_input: ndims = len(outputs.shape.as_list()) actual_batch_size = tf.shape(features["inputs"])[0] outputs = tf.slice( outputs, [0] * ndims, [actual_batch_size] + [-1] * (ndims - 1)) predictions = { "outputs": outputs } if features.get("infer_targets") is not None: predictions["infer_targets"] = features["infer_targets"] if features.get("inputs") is not None: predictions["inputs"] = features["inputs"] if use_tpu: t2t_model.remove_summaries() return tpu_estimator.TPUEstimatorSpec( mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[mtf.MtfRestoreHook(lowering)]) else: return tf.estimator.EstimatorSpec( tf.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[mtf.MtfRestoreHook(lowering)]) def sample(self, features, mesh): """Sample from the model.""" raise NotImplementedError("TODO(noam): write generic slow mtf sample.") def mtf_model_fn(self, features, mesh): raise NotImplementedError("Not implemented")
[ "tensorflow.convert_to_tensor", "tensorflow.logging.warning", "tensorflow.device", "tensorflow.summary.scalar", "tensorflow.assign_add", "tensorflow.shape", "tensorflow.slice", "tensorflow.train.CheckpointSaverHook", "tensorflow.global_variables", "tensorflow.expand_dims", "tensorflow.train.get_global_step", "tensorflow.identity", "tensorflow.contrib.tpu.python.tpu.tpu_estimator.TPUEstimatorSpec", "tensorflow.to_float", "tensorflow.estimator.EstimatorSpec", "tensorflow.group", "tensorflow.add_to_collection" ]
tensor2tensor/utils/mtf_model.py
[(48, 'copy.deepcopy', 'copy.deepcopy', (['hparams'], {}), False, 'import copy\n'), (69, 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), True, 'import tensorflow as tf\n'), (71, 'mesh_tensorflow.convert_to_shape', 'mtf.convert_to_shape', (['hparams.mesh_shape'], {}), True, 'import mesh_tensorflow as mtf\n'), (72, 'mesh_tensorflow.convert_to_layout_rules', 'mtf.convert_to_layout_rules', (['hparams.layout'], {}), True, 'import mesh_tensorflow as mtf\n'), (98, 'mesh_tensorflow.Graph', 'mtf.Graph', ([], {}), True, 'import mesh_tensorflow as mtf\n'), (99, 'mesh_tensorflow.Mesh', 'mtf.Mesh', (['graph', '"""my_mesh"""', 'var_placer'], {}), True, 'import mesh_tensorflow as mtf\n'), (121, 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['graph', '{mesh: mesh_impl}'], {}), True, 'import mesh_tensorflow as mtf\n'), (124, 'tensorflow.to_float', 'tf.to_float', (['tf_loss'], {}), True, 'import tensorflow as tf\n'), (184, 'tensor2tensor.utils.metrics.create_evaluation_metrics', 'metrics.create_evaluation_metrics', (['[problem]', 'hparams'], {}), False, 'from tensor2tensor.utils import metrics\n'), (216, 'mesh_tensorflow.Lowering', 'mtf.Lowering', (['mesh.graph', '{mesh: mesh_impl}'], {}), True, 'import mesh_tensorflow as mtf\n'), (83, 'mesh_tensorflow.utils.BalancedVariablePlacer', 'mtf.utils.BalancedVariablePlacer', (['device_list', 'devices_memeory_usage'], {}), True, 'import mesh_tensorflow as mtf\n'), (86, 'mesh_tensorflow.simd_mesh_impl.SimdMeshImpl', 'mtf.simd_mesh_impl.SimdMeshImpl', (['mesh_shape', 'layout_rules', 'mesh_devices', 'ctx.device_assignment'], {}), True, 'import mesh_tensorflow as mtf\n'), (95, 'mesh_tensorflow.placement_mesh_impl.PlacementMeshImpl', 'mtf.placement_mesh_impl.PlacementMeshImpl', (['mesh_shape', 'layout_rules', 'mesh_devices'], {}), True, 'import mesh_tensorflow as mtf\n'), (106, 'mesh_tensorflow.anonymize', 'mtf.anonymize', (['logits'], {}), True, 'import mesh_tensorflow as mtf\n'), (110, 'mesh_tensorflow.gradients', 'mtf.gradients', (['[loss]', '[v.outputs[0] for v in graph.trainable_variables]'], {}), True, 'import mesh_tensorflow as mtf\n'), (112, 'tensor2tensor.utils.learning_rate.learning_rate_schedule', 'learning_rate.learning_rate_schedule', (['hparams'], {}), False, 'from tensor2tensor.utils import learning_rate\n'), (113, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'lr'], {}), True, 'import tensorflow as tf\n'), (116, 'mesh_tensorflow.optimize.make_optimizer', 'mtf.optimize.make_optimizer', (['hparams', 'mtf_lr'], {}), True, 'import mesh_tensorflow as mtf\n'), (132, 'tensorflow.group', 'tf.group', (['tf_update_ops'], {}), True, 'import tensorflow as tf\n'), (134, 'mesh_tensorflow.utils.outside_all_rewrites', 'mtf.utils.outside_all_rewrites', ([], {}), True, 'import mesh_tensorflow as mtf\n'), (136, 'mesh_tensorflow.MtfRestoreHook', 'mtf.MtfRestoreHook', (['lowering'], {}), True, 'import mesh_tensorflow as mtf\n'), (144, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.SAVERS', 'saver'], {}), True, 'import tensorflow as tf\n'), (145, 'mesh_tensorflow.MtfCheckpointSaverListener', 'mtf.MtfCheckpointSaverListener', (['lowering'], {}), True, 'import mesh_tensorflow as mtf\n'), (146, 'tensorflow.train.CheckpointSaverHook', 'tf.train.CheckpointSaverHook', (['hparams.model_dir'], {'save_steps': '(1000)', 'saver': 'saver', 'listeners': '[saver_listener]'}), True, 'import tensorflow as tf\n'), (165, 'tensor2tensor.utils.t2t_model.remove_summaries', 't2t_model.remove_summaries', ([], {}), False, 'from tensor2tensor.utils import t2t_model\n'), (166, 'tensorflow.contrib.tpu.python.tpu.tpu_estimator.TPUEstimatorSpec', 'tpu_estimator.TPUEstimatorSpec', ([], {'mode': 'tf.estimator.ModeKeys.TRAIN', 'loss': 'tf_loss', 'train_op': 'train_op', 'host_call': 'host_call', 'training_hooks': '[restore_hook, saver_hook]'}), False, 'from tensorflow.contrib.tpu.python.tpu import tpu_estimator\n'), (173, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['tf.estimator.ModeKeys.TRAIN'], {'loss': 'tf_loss', 'train_op': 'train_op', 'training_chief_hooks': '[restore_hook, saver_hook]'}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.contrib.tpu.python.tpu.tpu_estimator.TPUEstimatorSpec', 'tpu_estimator.TPUEstimatorSpec', (['tf.estimator.ModeKeys.EVAL'], {'evaluation_hooks': '[restore_hook]', 'loss': 'loss', 'eval_metrics': '(metric_fn, [logits, labels])'}), False, 'from tensorflow.contrib.tpu.python.tpu import tpu_estimator\n'), (203, 'six.iteritems', 'six.iteritems', (['eval_metrics_fns'], {}), False, 'import six\n'), (207, 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['tf.estimator.ModeKeys.EVAL'], {'predictions': 'predictions', 'eval_metric_ops': 'eval_metrics', 'evaluation_hooks': '[restore_hook]', 'loss': 'loss'}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.slice', 'tf.slice', (['outputs', '([0] * ndims)', '([actual_batch_size] + [-1] * (ndims - 1))'], {}), True, 'import tensorflow as tf\n'), (233, 'tensor2tensor.utils.t2t_model.remove_summaries', 't2t_model.remove_summaries', ([], {}), False, 'from tensor2tensor.utils import t2t_model\n'), (115, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['lr'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (115, 'mesh_tensorflow.Shape', 'mtf.Shape', (['[]'], {}), True, 'import mesh_tensorflow as mtf\n'), (130, 'tensorflow.assign_add', 'tf.assign_add', (['global_step', '(1)'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (161, 'tensor2tensor.utils.t2t_model.create_host_call', 't2t_model.create_host_call', (['hparams.model_dir'], {}), False, 'from tensor2tensor.utils import t2t_model\n'), (183, 'tensorflow.expand_dims', 'tf.expand_dims', (['logits', '(2)'], {}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.shape', 'tf.shape', (["features['inputs']"], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.logging.warning', 'tf.logging.warning', (["('Overriding hparams.%s with %s from decode_hparams' % (k, v))"], {}), True, 'import tensorflow as tf\n'), (188, 'tensorflow.device', 'tf.device', (['"""cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (188, 'mesh_tensorflow.utils.outside_all_rewrites', 'mtf.utils.outside_all_rewrites', ([], {}), True, 'import mesh_tensorflow as mtf\n'), (190, 'six.iteritems', 'six.iteritems', (['eval_metrics_fns'], {}), False, 'import six\n'), (237, 'mesh_tensorflow.MtfRestoreHook', 'mtf.MtfRestoreHook', (['lowering'], {}), True, 'import mesh_tensorflow as mtf\n'), (242, 'mesh_tensorflow.MtfRestoreHook', 'mtf.MtfRestoreHook', (['lowering'], {}), True, 'import mesh_tensorflow as mtf\n'), (193, 'tensorflow.identity', 'tf.identity', (['labels'], {}), True, 'import tensorflow as tf\n')]
vsilyaev/tensorflow
f41959ccb2d9d4c722fe8fc3351401d53bcf4900
"""## Variables @@Variable ## Variable helper functions TensorFlow provides a set of functions to help manage the set of variables collected in the graph. @@all_variables @@trainable_variables @@initialize_all_variables @@initialize_variables @@assert_variables_initialized ## Saving and Restoring Variables. @@Saver @@latest_checkpoint @@get_checkpoint_state @@update_checkpoint_state ## Sharing Variables TensorFlow provides several classes and operations that you can use to create variables contingent on certain conditions. @@get_variable @@get_variable_scope @@variable_scope @@constant_initializer @@random_normal_initializer @@truncated_normal_initializer @@random_uniform_initializer @@uniform_unit_scaling_initializer @@zeros_initializer ## Sparse Variable Updates The sparse update ops modify a subset of the entries in a dense `Variable`, either overwriting the entries or adding / subtracting a delta. These are useful for training embedding models and similar lookup-based networks, since only a small subset of embedding vectors change in any given step. Since a sparse update of a large tensor may be generated automatically during gradient computation (as in the gradient of [`tf.gather`](array_ops.md#gather)), an [`IndexedSlices`](#IndexedSlices) class is provided that encapsulates a set of sparse indices and values. `IndexedSlices` objects are detected and handled automatically by the optimizers in most cases. @@scatter_update @@scatter_add @@scatter_sub @@sparse_mask @@IndexedSlices """ from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import common_shapes from tensorflow.python.ops import gen_state_ops # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.gen_state_ops import * # pylint: disable=protected-access def variable_op(shape, dtype, name="Variable", set_shape=True, container="", shared_name=""): """Create a variable Operation. See also variables.Variable. Args: shape: The shape of the tensor managed by this variable dtype: The underlying type of the tensor values. name: optional name to use for the variable op. set_shape: If True, set the shape property of the returned Tensor to the shape argument. container: An optional string. Defaults to "". If non-empty, this variable is placed in the given container. Otherwise, a default container is used. shared_name: An optional string. Defaults to "". If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. Returns: A variable tensor. """ ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name) # TODO(mrry): Move this to where it is used, so we can get rid of this op # wrapper? if set_shape: ret.set_shape(shape) return ret # NOTE(mrry): Shapes are conditionally set in the Python wrapper. ops.RegisterShape("Variable")(common_shapes.unknown_shape) @ops.RegisterShape("TemporaryVariable") def _TemporaryVariableShape(op): """Shape function for the TemporaryVariable op.""" shape = tensor_util.TensorShapeProtoToList(op.get_attr("shape")) return [tensor_shape.TensorShape(shape)] @ops.RegisterShape("DestroyTemporaryVariable") def _DestroyTemporaryVariableShape(op): """Shape function for the DestroyTemporaryVariable op.""" return [op.inputs[0].get_shape()] def init_variable(v, init, name="init"): """Initializes variable with "init". This op does the following: if init is a Tensor, v = init if callable(init): v = init(VariableShape(v), v.dtype) Args: v: Variable to initialize init: Tensor to assign to v, Or an object convertible to Tensor e.g. nparray, Or an Initializer that generates a tensor given the shape and type of v. An "Initializer" is a callable that returns a tensor that "v" should be set to. It will be called as init(shape, dtype). name: Optional name for the op. Returns: The operation that initializes v. """ with ops.op_scope([v, init], None, v.op.name + "/"): with ops.name_scope(name) as scope: with ops.device(v.device or ops.get_default_graph().get_default_device()): if callable(init): assert v.get_shape().is_fully_defined(), "Variable shape unknown." # TODO(mrry): Convert to v.shape when the property and # accessor are reconciled (and all initializers support # tf.TensorShape objects). value = init(v.get_shape().as_list(), v.dtype.base_dtype) value = ops.convert_to_tensor(value, name="value") return assign(v, value, name=scope) else: init = ops.convert_to_tensor(init, name="init") return assign(v, init, name=scope) @ops.RegisterShape("Assign") def _AssignShape(op): """Shape function for the Assign op.""" if op.get_attr("validate_shape"): # NOTE(mrry): Return a known shape here. This makes it awkward to # chain a validated-shape assignment and a reshaping assignment, # but that is a sufficiently niche case that supporting it does # not seem worthwhile. return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] return [op.inputs[1].get_shape()] @ops.RegisterShape("AssignAdd") @ops.RegisterShape("AssignSub") def _AssignUpdateShape(op): """Shape function for the AssignAdd and AssignSub dense update ops.""" return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] @ops.RegisterShape("CountUpTo") def _CountUpToShape(op): """Shape function for the CountUpTo op.""" return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())] @ops.RegisterShape("ScatterAdd") @ops.RegisterShape("ScatterSub") @ops.RegisterShape("ScatterUpdate") def _ScatterUpdateShape(op): """Shape function for the sparse update ops.""" var_shape = op.inputs[0].get_shape() indices_shape = op.inputs[1].get_shape() unused_updates_shape = op.inputs[2].get_shape().merge_with( indices_shape.concatenate(var_shape[1:])) return [var_shape]
[ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.gen_state_ops._variable", "tensorflow.python.framework.ops.op_scope", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.convert_to_tensor" ]
tensorflow/python/ops/state_ops.py
[(107, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""TemporaryVariable"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (114, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""DestroyTemporaryVariable"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (155, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""Assign"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (167, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""AssignAdd"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (168, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""AssignSub"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (174, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""CountUpTo"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (180, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""ScatterAdd"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (181, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""ScatterSub"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (182, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""ScatterUpdate"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (94, 'tensorflow.python.ops.gen_state_ops._variable', 'gen_state_ops._variable', ([], {'shape': 'shape', 'dtype': 'dtype', 'name': 'name', 'container': 'container', 'shared_name': 'shared_name'}), False, 'from tensorflow.python.ops import gen_state_ops\n'), (104, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""Variable"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (111, 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['shape'], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (139, 'tensorflow.python.framework.ops.op_scope', 'ops.op_scope', (['[v, init]', 'None', "(v.op.name + '/')"], {}), False, 'from tensorflow.python.framework import ops\n'), (140, 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name'], {}), False, 'from tensorflow.python.framework import ops\n'), (177, 'tensorflow.python.framework.tensor_shape.scalar', 'tensor_shape.scalar', ([], {}), False, 'from tensorflow.python.framework import tensor_shape\n'), (148, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['value'], {'name': '"""value"""'}), False, 'from tensorflow.python.framework import ops\n'), (151, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['init'], {'name': '"""init"""'}), False, 'from tensorflow.python.framework import ops\n'), (141, 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), False, 'from tensorflow.python.framework import ops\n')]
prouast/ctc-intake-detection
6dbfb9bbb0bb09980e4530b31742cb0d5357bf08
"""Using CTC for detection of events.""" import tensorflow as tf from tensorflow_ctc_ext_beam_search_decoder import ctc_ext_beam_search_decoder @tf.function def greedy_decode(inputs, seq_length, blank_index, def_val, shift): """Naive inference by retrieving most likely output at each time-step. Args: inputs: The prediction in form of logits. [batch_size, time_steps, num_classes] seq_length: The length of the sequences blank_index: The index of blank which will be set to def_val (or None) def_val: The value associated with the default event shift: Necessary shift to convert to representation Returns: decoded: The decoded sequence [seq_length] """ # Infer predictions using argmax decoded = tf.cast(tf.argmax(inputs, axis=-1), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded) # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded) return decoded, None @tf.function def ctc_decode(inputs, batch_size, seq_length, blank_index, def_val, shift, beam_width=10): """Perform ctc decoding""" # Decode uses time major inputs = tf.transpose(a=inputs, perm=[1, 0, 2]) seq_lengths = tf.fill([batch_size], seq_length) # Perform beam search indices, values, shape, indices_u, values_u, shape_u, log_probs = ctc_ext_beam_search_decoder( inputs=inputs, sequence_length=seq_lengths, beam_width=beam_width, blank_index=blank_index, top_paths=1, blank_label=0) decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded) decoded_u = tf.where(tf.not_equal(decoded_u, 0), decoded_u+shift, decoded_u) # Set default vals decoded = tf.where(tf.equal(decoded, 0), def_val, decoded) decoded_u = tf.where(tf.equal(decoded_u, 0), def_val, decoded_u) # We know the shape pf decoded_u, and first dim for decoded decoded_u.set_shape([batch_size, seq_length]) decoded = tf.reshape(decoded, [batch_size, -1]) return decoded_u, decoded
[ "tensorflow.not_equal", "tensorflow.fill", "tensorflow.transpose", "tensorflow.sparse.to_dense", "tensorflow.reshape", "tensorflow.equal", "tensorflow.sparse.SparseTensor", "tensorflow.argmax" ]
ctc.py
[(31, 'tensorflow.transpose', 'tf.transpose', ([], {'a': 'inputs', 'perm': '[1, 0, 2]'}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.fill', 'tf.fill', (['[batch_size]', 'seq_length'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow_ctc_ext_beam_search_decoder.ctc_ext_beam_search_decoder', 'ctc_ext_beam_search_decoder', ([], {'inputs': 'inputs', 'sequence_length': 'seq_lengths', 'beam_width': 'beam_width', 'blank_index': 'blank_index', 'top_paths': '(1)', 'blank_label': '(0)'}), False, 'from tensorflow_ctc_ext_beam_search_decoder import ctc_ext_beam_search_decoder\n'), (38, 'tensorflow.sparse.SparseTensor', 'tf.sparse.SparseTensor', (['indices[0]', 'values[0]', 'shape[0]'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.sparse.SparseTensor', 'tf.sparse.SparseTensor', (['indices_u[0]', 'values_u[0]', 'shape_u[0]'], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.reshape', 'tf.reshape', (['decoded', '[batch_size, -1]'], {}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.argmax', 'tf.argmax', (['inputs'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.not_equal', 'tf.not_equal', (['decoded', '(0)'], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.equal', 'tf.equal', (['decoded', '(0)'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['decoded'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['decoded_u'], {}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.not_equal', 'tf.not_equal', (['decoded', '(0)'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.not_equal', 'tf.not_equal', (['decoded_u', '(0)'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.equal', 'tf.equal', (['decoded', '(0)'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.equal', 'tf.equal', (['decoded_u', '(0)'], {}), True, 'import tensorflow as tf\n')]
ChangeTheWorld20191008/PaddleOCR
9b2ee55c4411b5692fae7322b074ce074e597c3b
import os import sys import time __dir__ = os.path.dirname(os.path.abspath(__file__)) sys.path.append(__dir__) sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) import cv2 import numpy as np import tensorflow as tf from tensorflow import ConfigProto from tensorflow.python.saved_model import tag_constants import tools.infer.utility as utility from ppocr.utils.logging import get_logger from ppocr.postprocess import build_post_process from ppocr.data import create_operators from ppocr.data import transform logger = get_logger() class ObjectDetector: def __init__(self, model_path='./model', label_file='./model/label.names', num_classes=2, score_threshold=0.5, image_sz=(416, 416, 3)): self._model_path = model_path self._label_file = label_file self._num_classes = num_classes self._score_threshold = score_threshold self._image_sz = image_sz[0:2] self._config = ConfigProto() self._config.gpu_options.allow_growth = True self._graph = tf.Graph() with self._graph.as_default(): self._sess = tf.Session(config=self._config) tf.saved_model.load( self._sess, [tag_constants.SERVING], self._model_path) self._image_tensor = self._sess.graph.get_tensor_by_name( 'serving_default_input_1:0') self._output_tensor = self._sess.graph.get_tensor_by_name( 'StatefulPartitionedCall:0') self._boxes = tf.placeholder( tf.float32, shape=(None, None, None, 4)) self._scores = tf.placeholder( tf.float32, shape=(None, None, self._num_classes)) self._boxes_predi, self._scores_predi, self._classes_predi,\ self._valid_detections_predi = \ tf.image.combined_non_max_suppression( boxes=self._boxes, scores=self._scores, max_output_size_per_class=50, max_total_size=50, iou_threshold=0.45, score_threshold=self._score_threshold) self._label_map = self._load_labelmap(self._label_file) def _load_labelmap(self, label_file): category_index = {} index = 1 for line in open(label_file): category_index[index] = line.rstrip("\n") index += 1 return category_index def detect(self, image, object_name): image_h, image_w, _ = image.shape ori_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_data = cv2.resize(ori_image, self._image_sz) det_image = image_data / 255. image_np_expanded = np.expand_dims(det_image, axis=0) image_np_expanded = np.asarray(image_np_expanded).astype(np.float32) pred_bbox = self._sess.run( self._output_tensor, feed_dict={self._image_tensor: image_np_expanded}) boxes_pred, scores_pred, classes_pred, valid_detections_pred = \ self._sess.run( [self._boxes_predi, self._scores_predi, self._classes_predi, self._valid_detections_predi], feed_dict={ self._boxes: np.reshape( pred_bbox[:, :, 0:4], (pred_bbox[:, :, 0:4].shape[0], -1, 1, 4)), self._scores: pred_bbox[:, :, 4:]}) boxes = boxes_pred[0][:valid_detections_pred[0]] scores = scores_pred[0][:valid_detections_pred[0]] classes = classes_pred[0][:valid_detections_pred[0]] + 1 labels = [self._label_map[classes_id] for classes_id in classes] car_boxes = [] car_scores = [] for box, score, label in zip(boxes, scores, labels): if label == object_name: car_boxes.append( [int(box[1] * image_w), int(box[0] * image_h), int(box[3] * image_w), int(box[2] * image_h)]) car_scores.append(score) return car_boxes, car_scores def close(self): self._sess.close() self._sess = None class TextDetector(object): def __init__(self, args): self.args = args self.use_onnx = args.use_onnx pre_process_list = [{ 'DetResizeForTest': { 'limit_side_len': args.det_limit_side_len, 'limit_type': args.det_limit_type, } }, { 'NormalizeImage': { 'std': [0.229, 0.224, 0.225], 'mean': [0.485, 0.456, 0.406], 'scale': '1./255.', 'order': 'hwc' } }, { 'ToCHWImage': None }, { 'KeepKeys': { 'keep_keys': ['image', 'shape'] } }] postprocess_params = {} postprocess_params['name'] = 'DBPostProcess' postprocess_params["thresh"] = args.det_db_thresh postprocess_params["box_thresh"] = args.det_db_box_thresh postprocess_params["max_candidates"] = 1000 postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio postprocess_params["use_dilation"] = args.use_dilation postprocess_params["score_mode"] = args.det_db_score_mode self.postprocess_op = build_post_process(postprocess_params) self.predictor, self.input_tensor, self.output_tensors, self.config = \ utility.create_predictor(args, 'det', logger) self.preprocess_op = create_operators(pre_process_list) def order_points_clockwise(self, pts): """ reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py # sort the points based on their x-coordinates """ xSorted = pts[np.argsort(pts[:, 0]), :] # grab the left-most and right-most points from the sorted # x-roodinate points leftMost = xSorted[:2, :] rightMost = xSorted[2:, :] # now, sort the left-most coordinates according to their # y-coordinates so we can grab the top-left and bottom-left # points, respectively leftMost = leftMost[np.argsort(leftMost[:, 1]), :] (tl, bl) = leftMost rightMost = rightMost[np.argsort(rightMost[:, 1]), :] (tr, br) = rightMost rect = np.array([tl, tr, br, bl], dtype="float32") return rect def clip_det_res(self, points, img_height, img_width): for pno in range(points.shape[0]): points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1)) points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1)) return points def filter_tag_det_res(self, dt_boxes, image_shape): img_height, img_width = image_shape[0:2] dt_boxes_new = [] for box in dt_boxes: box = self.order_points_clockwise(box) box = self.clip_det_res(box, img_height, img_width) rect_width = int(np.linalg.norm(box[0] - box[1])) rect_height = int(np.linalg.norm(box[0] - box[3])) if rect_width <= 3 or rect_height <= 3: continue dt_boxes_new.append(box) dt_boxes = np.array(dt_boxes_new) return dt_boxes def filter_tag_det_res_only_clip(self, dt_boxes, image_shape): img_height, img_width = image_shape[0:2] dt_boxes_new = [] for box in dt_boxes: box = self.clip_det_res(box, img_height, img_width) dt_boxes_new.append(box) dt_boxes = np.array(dt_boxes_new) return dt_boxes def __call__(self, img): ori_im = img.copy() data = {'image': img} st = time.time() data = transform(data, self.preprocess_op) img, shape_list = data img = np.expand_dims(img, axis=0) shape_list = np.expand_dims(shape_list, axis=0) img = img.copy() self.input_tensor.copy_from_cpu(img) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) preds = {} preds['maps'] = outputs[0] # self.predictor.try_shrink_memory() post_result, score_result = self.postprocess_op(preds, shape_list) dt_boxes = post_result[0]['points'] scores = score_result[0]['score'] dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape) et = time.time() return dt_boxes, et - st, scores def draw_text_det_res(img, dt_boxes, scores): for box, score in zip(dt_boxes, scores): box = np.array(box).astype(np.int32).reshape(-1, 2) cv2.polylines(img, [box], True, color=(255, 255, 0), thickness=2) left = box[0][0] top = box[0][1] cv2.putText( img, f"{score:.2f}", (left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2) return img def draw_bounding_box(img, boxes, scores): image_h, image_w, _ = img.shape for box, score in zip(boxes, scores): bbox_thick = int(0.6 * (image_h + image_w) / 600) c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(img, c1, c2, (255, 255, 0), bbox_thick) cv2.putText( img, f"{score:.2f}", (c1[0], c1[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA) return img def main_and_inter_iou(main_box, inter_box): # determine the (x, y)-coordinates of the intersection rectangle xA = max(main_box[0], inter_box[0]) yA = max(main_box[1], inter_box[1]) xB = min(main_box[2], inter_box[2]) yB = min(main_box[3], inter_box[3]) # compute the area of intersection rectangle inter_area = abs(max((xB - xA, 0)) * max((yB - yA), 0)) if inter_area == 0: return 0, 0 # compute the area of both the prediction and ground-truth # rectangles main_box_area = abs( (main_box[2] - main_box[0]) * (main_box[3] - main_box[1])) inter_box_area = abs( (inter_box[2] - inter_box[0]) * (inter_box[3] - inter_box[1])) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area main_iou = inter_area / float(main_box_area) all_iou = inter_area / float(inter_box_area + main_box_area - inter_area) # return the intersection over union value return main_iou, all_iou def aspect_ratio_filter(box, aspect_ratio_list): p_one = box[0] p_two = box[1] p_three = box[3] euc_dis_x = ((p_one[0]-p_two[0])**2+(p_one[1]-p_two[1])**2)**0.5 euc_dis_y = ((p_one[0]-p_three[0])**2+(p_one[1]-p_three[1])**2)**0.5 logger.info(f"[TMP]: aspect ratio is {euc_dis_x/euc_dis_y}") if aspect_ratio_list[0] <= euc_dis_x/euc_dis_y <= aspect_ratio_list[1]: return True return False
[ "tensorflow.Graph", "numpy.expand_dims", "tensorflow.image.combined_non_max_suppression", "tensorflow.saved_model.load", "numpy.asarray", "numpy.reshape", "tensorflow.placeholder", "numpy.linalg.norm", "tensorflow.ConfigProto", "tensorflow.Session", "numpy.argsort", "numpy.array" ]
tools/infer/my_utils.py
[(6, 'sys.path.append', 'sys.path.append', (['__dir__'], {}), False, 'import sys\n'), (21, 'ppocr.utils.logging.get_logger', 'get_logger', ([], {}), False, 'from ppocr.utils.logging import get_logger\n'), (5, 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), False, 'import os\n'), (7, 'os.path.join', 'os.path.join', (['__dir__', '"""../.."""'], {}), False, 'import os\n'), (33, 'tensorflow.ConfigProto', 'ConfigProto', ([], {}), False, 'from tensorflow import ConfigProto\n'), (36, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (75, 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), False, 'import cv2\n'), (77, 'cv2.resize', 'cv2.resize', (['ori_image', 'self._image_sz'], {}), False, 'import cv2\n'), (79, 'numpy.expand_dims', 'np.expand_dims', (['det_image'], {'axis': '(0)'}), True, 'import numpy as np\n'), (149, 'ppocr.postprocess.build_post_process', 'build_post_process', (['postprocess_params'], {}), False, 'from ppocr.postprocess import build_post_process\n'), (151, 'tools.infer.utility.create_predictor', 'utility.create_predictor', (['args', '"""det"""', 'logger'], {}), True, 'import tools.infer.utility as utility\n'), (153, 'ppocr.data.create_operators', 'create_operators', (['pre_process_list'], {}), False, 'from ppocr.data import create_operators\n'), (177, 'numpy.array', 'np.array', (['[tl, tr, br, bl]'], {'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (197, 'numpy.array', 'np.array', (['dt_boxes_new'], {}), True, 'import numpy as np\n'), (206, 'numpy.array', 'np.array', (['dt_boxes_new'], {}), True, 'import numpy as np\n'), (213, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (215, 'ppocr.data.transform', 'transform', (['data', 'self.preprocess_op'], {}), False, 'from ppocr.data import transform\n'), (217, 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), True, 'import numpy as np\n'), (218, 'numpy.expand_dims', 'np.expand_dims', (['shape_list'], {'axis': '(0)'}), True, 'import numpy as np\n'), (237, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (245, 'cv2.polylines', 'cv2.polylines', (['img', '[box]', '(True)'], {'color': '(255, 255, 0)', 'thickness': '(2)'}), False, 'import cv2\n'), (248, 'cv2.putText', 'cv2.putText', (['img', 'f"""{score:.2f}"""', '(left, top - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(255, 255, 0)', '(2)'], {}), False, 'import cv2\n'), (259, 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', '(255, 255, 0)', 'bbox_thick'], {}), False, 'import cv2\n'), (260, 'cv2.putText', 'cv2.putText', (['img', 'f"""{score:.2f}"""', '(c1[0], c1[1] - 2)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 0)', '(bbox_thick // 2)'], {'lineType': 'cv2.LINE_AA'}), False, 'import cv2\n'), (39, 'tensorflow.Session', 'tf.Session', ([], {'config': 'self._config'}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.saved_model.load', 'tf.saved_model.load', (['self._sess', '[tag_constants.SERVING]', 'self._model_path'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, None, 4)'}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, None, self._num_classes)'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.image.combined_non_max_suppression', 'tf.image.combined_non_max_suppression', ([], {'boxes': 'self._boxes', 'scores': 'self._scores', 'max_output_size_per_class': '(50)', 'max_total_size': '(50)', 'iou_threshold': '(0.45)', 'score_threshold': 'self._score_threshold'}), True, 'import tensorflow as tf\n'), (80, 'numpy.asarray', 'np.asarray', (['image_np_expanded'], {}), True, 'import numpy as np\n'), (192, 'numpy.linalg.norm', 'np.linalg.norm', (['(box[0] - box[1])'], {}), True, 'import numpy as np\n'), (193, 'numpy.linalg.norm', 'np.linalg.norm', (['(box[0] - box[3])'], {}), True, 'import numpy as np\n'), (91, 'numpy.reshape', 'np.reshape', (['pred_bbox[:, :, 0:4]', '(pred_bbox[:, :, 0:4].shape[0], -1, 1, 4)'], {}), True, 'import numpy as np\n'), (161, 'numpy.argsort', 'np.argsort', (['pts[:, (0)]'], {}), True, 'import numpy as np\n'), (171, 'numpy.argsort', 'np.argsort', (['leftMost[:, (1)]'], {}), True, 'import numpy as np\n'), (174, 'numpy.argsort', 'np.argsort', (['rightMost[:, (1)]'], {}), True, 'import numpy as np\n'), (243, 'numpy.array', 'np.array', (['box'], {}), True, 'import numpy as np\n')]
Str0ngerCheng/Blog-Back-Up
37b89c37a2b3d652e5eb7c3ab8c8cd31e8badde7
# coding: utf-8 import numpy as np import tensorflow as tf from tensorflow.python import debug as tf_debug import matplotlib.pyplot as plt '''超参数''' num_steps = 5 batch_size = 200 num_classes = 2 state_size = 4 learning_rate = 0.1 '''生成数据 就是按照文章中提到的规则,这里生成1000000个 ''' def gen_data(size=1000000): X = np.array(np.random.choice(2, size=(size,))) Y = [] '''根据规则生成Y''' for i in range(size): threshold = 0.5 if X[i-3] == 1: threshold += 0.5 if X[i-8] == 1: threshold -=0.25 if np.random.rand() > threshold: Y.append(0) else: Y.append(1) return X, np.array(Y) '''生成batch数据''' def gen_batch(raw_data, batch_size, num_step): raw_x, raw_y = raw_data data_length = len(raw_x) batch_patition_length = data_length // batch_size # ->5000 data_x = np.zeros([batch_size, batch_patition_length], dtype=np.int32) # ->(200, 5000) data_y = np.zeros([batch_size, batch_patition_length], dtype=np.int32) # ->(200, 5000) '''填到矩阵的对应位置''' for i in range(batch_size): data_x[i] = raw_x[batch_patition_length*i:batch_patition_length*(i+1)]# 每一行取batch_patition_length个数,即5000 data_y[i] = raw_y[batch_patition_length*i:batch_patition_length*(i+1)] epoch_size = batch_patition_length // num_steps # ->5000/5=1000 就是每一轮的大小 for i in range(epoch_size): # 抽取 epoch_size 个数据 x = data_x[:, i * num_steps:(i + 1) * num_steps] # ->(200, 5) y = data_y[:, i * num_steps:(i + 1) * num_steps] yield (x, y) # yield 是生成器,生成器函数在生成值后会自动挂起并暂停他们的执行和状态(最后就是for循环结束后的结果,共有1000个(x, y)) def gen_epochs(n, num_steps): for i in range(n): yield gen_batch(gen_data(), batch_size, num_steps) '''定义placeholder''' x = tf.placeholder(tf.int32, [batch_size, num_steps], name="x") y = tf.placeholder(tf.int32, [batch_size, num_steps], name='y') init_state = tf.zeros([batch_size, state_size]) '''RNN输入''' rnn_inputs = tf.one_hot(x, num_classes) #rnn_inputs = tf.unstack(x_one_hot, axis=1) '''不需要了,使用tensorflow中定义好的cell即可''' #'''定义RNN cell''' #with tf.variable_scope('rnn_cell'): #W = tf.get_variable('W', [num_classes + state_size, state_size]) #b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0)) #def rnn_cell(rnn_input, state): #with tf.variable_scope('rnn_cell', reuse=True): #W = tf.get_variable('W', [num_classes+state_size, state_size]) #b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0)) #return tf.tanh(tf.matmul(tf.concat([rnn_input, state],1),W) + b) #'''将rnn cell添加到计算图中''' #state = init_state #rnn_outputs = [] #for rnn_input in rnn_inputs: #state = rnn_cell(rnn_input, state) # state会重复使用,循环 #rnn_outputs.append(state) #final_state = rnn_outputs[-1] # 得到最后的state cell = tf.contrib.rnn.BasicRNNCell(num_units=state_size) rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state) '''预测,损失,优化''' with tf.variable_scope('softmax'): W = tf.get_variable('W', [state_size, num_classes]) b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0)) '''因为rnn_outputs是三维的,这里需要将其转成2维的, 矩阵运算后再转换回来[batch_size, num_steps, num_classes]''' logits = tf.reshape(tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), W) +b, \ shape=[batch_size, num_steps, num_classes]) predictions = tf.nn.softmax(logits) y_as_list = tf.unstack(y, num=num_steps, axis=1) losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=logits) total_loss = tf.reduce_mean(losses) train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss) '''训练网络''' def train_rnn(num_epochs, num_steps, state_size=4, verbose=True): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) #sess = tf_debug.LocalCLIDebugWrapperSession(sess) training_losses = [] for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)): training_loss = 0 training_state = np.zeros((batch_size, state_size)) # ->(200, 4) if verbose: print('\nepoch', idx) for step, (X, Y) in enumerate(epoch): tr_losses, training_loss_, training_state, _ = \ sess.run([losses, total_loss, final_state, train_step], feed_dict={x:X, y:Y, init_state:training_state}) training_loss += training_loss_ if step % 100 == 0 and step > 0: if verbose: print('第 {0} 步的平均损失 {1}'.format(step, training_loss/100)) training_losses.append(training_loss/100) training_loss = 0 return training_losses training_losses = train_rnn(num_epochs=2, num_steps=num_steps, state_size=state_size) print(training_losses[0]) plt.plot(training_losses) plt.show()
[ "tensorflow.nn.dynamic_rnn", "tensorflow.get_variable", "tensorflow.zeros", "matplotlib.pyplot.plot", "tensorflow.contrib.rnn.BasicRNNCell", "tensorflow.Session", "numpy.zeros", "tensorflow.train.AdagradOptimizer", "tensorflow.unstack", "numpy.random.choice", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.one_hot", "numpy.random.rand", "numpy.array", "matplotlib.pyplot.show", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.constant_initializer", "tensorflow.variable_scope" ]
code/rnn/rnn_tensorflow_dynamic_rnn.py
[(56, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, num_steps]'], {'name': '"""x"""'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, num_steps]'], {'name': '"""y"""'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.zeros', 'tf.zeros', (['[batch_size, state_size]'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.one_hot', 'tf.one_hot', (['x', 'num_classes'], {}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.contrib.rnn.BasicRNNCell', 'tf.contrib.rnn.BasicRNNCell', ([], {'num_units': 'state_size'}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'rnn_inputs'], {'initial_state': 'init_state'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.unstack', 'tf.unstack', (['y'], {'num': 'num_steps', 'axis': '(1)'}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'logits'}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), True, 'import tensorflow as tf\n'), (125, 'matplotlib.pyplot.plot', 'plt.plot', (['training_losses'], {}), True, 'import matplotlib.pyplot as plt\n'), (126, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (40, 'numpy.zeros', 'np.zeros', (['[batch_size, batch_patition_length]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (41, 'numpy.zeros', 'np.zeros', (['[batch_size, batch_patition_length]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (86, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""softmax"""'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""', '[state_size, num_classes]'], {}), True, 'import tensorflow as tf\n'), (19, 'numpy.random.choice', 'np.random.choice', (['(2)'], {'size': '(size,)'}), True, 'import numpy as np\n'), (32, 'numpy.array', 'np.array', (['Y'], {}), True, 'import numpy as np\n'), (98, 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['learning_rate'], {}), True, 'import tensorflow as tf\n'), (103, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (28, 'numpy.random.rand', 'np.random.rand', ([], {}), True, 'import numpy as np\n'), (88, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.reshape', 'tf.reshape', (['rnn_outputs', '[-1, state_size]'], {}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (109, 'numpy.zeros', 'np.zeros', (['(batch_size, state_size)'], {}), True, 'import numpy as np\n')]
siwendy/finetune-transformer-lm
a15ba3384090faa656fd591e5b6e3328e25a4fc7
import os import time import math import json import joblib import random import argparse import numpy as np import tensorflow as tf from tqdm import tqdm from functools import partial from sklearn.utils import shuffle from sklearn.metrics import accuracy_score from opt import adam, warmup_cosine, warmup_linear, warmup_constant from datasets import rocstories from analysis import rocstories as rocstories_analysis from text_utils import TextEncoder from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path def gelu(x): return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3)))) def swish(x): return x*tf.nn.sigmoid(x) opt_fns = { 'adam':adam, } act_fns = { 'relu':tf.nn.relu, 'swish':swish, 'gelu':gelu } lr_schedules = { 'warmup_cosine':warmup_cosine, 'warmup_linear':warmup_linear, 'warmup_constant':warmup_constant, } def _norm(x, g=None, b=None, e=1e-5, axis=[1]): u = tf.reduce_mean(x, axis=axis, keep_dims=True) s = tf.reduce_mean(tf.square(x-u), axis=axis, keep_dims=True) x = (x - u) * tf.rsqrt(s + e) if g is not None and b is not None: x = x*g + b return x def norm(x, scope, axis=[-1]): with tf.variable_scope(scope): n_state = shape_list(x)[-1] g = tf.get_variable("g", [n_state], initializer=tf.constant_initializer(1)) b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0)) return _norm(x, g, b, axis=axis) def dropout(x, pdrop, train): if train and pdrop > 0: x = tf.nn.dropout(x, 1-pdrop) return x def mask_attn_weights(w): n = shape_list(w)[-1] b = tf.matrix_band_part(tf.ones([n, n]), -1, 0) b = tf.reshape(b, [1, 1, n, n]) w = w*b + -1e9*(1-b) return w def _attn(q, k, v, train=False, scale=False): #w=[-1,head,n_ctx,n_ctx] w = tf.matmul(q, k) if scale: n_state = shape_list(v)[-1] w = w*tf.rsqrt(tf.cast(n_state, tf.float32)) w = mask_attn_weights(w) w = tf.nn.softmax(w) w = dropout(w, attn_pdrop, train) #w=[-1,head,n_ctx,n_ctx],v=[-1,head,n_ctx,emb] a = tf.matmul(w, v) return a def split_states(x, n): x_shape = shape_list(x) m = x_shape[-1] new_x_shape = x_shape[:-1]+[n, m//n] return tf.reshape(x, new_x_shape) def merge_states(x): x_shape = shape_list(x) new_x_shape = x_shape[:-2]+[np.prod(x_shape[-2:])] return tf.reshape(x, new_x_shape) def split_heads(x, n, k=False): #[-1,n_ctx,head,head_emb] if k: return tf.transpose(split_states(x, n), [0, 2, 3, 1]) else: return tf.transpose(split_states(x, n), [0, 2, 1, 3]) def merge_heads(x): #[-1,head,n_ctx,emb] return merge_states(tf.transpose(x, [0, 2, 1, 3])) def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False): with tf.variable_scope(scope): #x = [-1,n_ctx,512] nx = shape_list(x)[-1] #rf = 1,nx=emb,nf=3*emb w = tf.get_variable("w", [rf, nx, nf], initializer=w_init) b = tf.get_variable("b", [nf], initializer=b_init) if rf == 1: #faster 1x1 conv c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf]) else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b return c def attn(x, scope, n_state, n_head, train=False, scale=False): assert n_state%n_head==0 with tf.variable_scope(scope): #c [-1,n_ctx,3*emb] c = conv1d(x, 'c_attn', n_state*3, 1, train=train) #q,k,v [-1,n_ctx,emb] q, k, v = tf.split(c, 3, 2) #q [-1,head,n_ctx,emb] v [-1,head,emb,n_ctx] v [-1,head,n_ctx,emb] q = split_heads(q, n_head) k = split_heads(k, n_head, k=True) v = split_heads(v, n_head) #a [-1,head,n_ctx,emb] a = _attn(q, k, v, train=train, scale=scale) #a [-1,n_ctx,head,emb] a = merge_heads(a) #a [-1,n_ctx,emb] a = conv1d(a, 'c_proj', n_state, 1, train=train) a = dropout(a, resid_pdrop, train) return a def mlp(x, scope, n_state, train=False): with tf.variable_scope(scope): nx = shape_list(x)[-1] act = act_fns[afn] h = act(conv1d(x, 'c_fc', n_state, 1, train=train)) h2 = conv1d(h, 'c_proj', nx, 1, train=train) h2 = dropout(h2, resid_pdrop, train) return h2 def block(x, scope, train=False, scale=False): with tf.variable_scope(scope): #nx = emb_size nx = shape_list(x)[-1] #a [-1,n_ctx,emb] a = attn(x, 'attn', nx, n_head, train=train, scale=scale) n = norm(x+a, 'ln_1') m = mlp(n, 'mlp', nx*4, train=train) h = norm(n+m, 'ln_2') return h def embed(X, we): #X [-1,,2] we = convert_gradient_to_tensor(we) e = tf.gather(we, X) h = tf.reduce_sum(e, 2) return h def clf(x, ny, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), train=False): with tf.variable_scope('clf'): nx = shape_list(x)[-1] w = tf.get_variable("w", [nx, ny], initializer=w_init) b = tf.get_variable("b", [ny], initializer=b_init) return tf.matmul(x, w)+b def model(X, M, Y, train=False, reuse=False): with tf.variable_scope('model', reuse=reuse): we = tf.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=tf.random_normal_initializer(stddev=0.02)) we = dropout(we, embd_pdrop, train) #X:[n_batch_train, 2, n_ctx, 2] -> [n_batch_train*2,n_ctx,2] X = tf.reshape(X, [-1, n_ctx, 2]) M = tf.reshape(M, [-1, n_ctx]) h = embed(X, we) #h=[-1,n_ctx,emb] for layer in range(n_layer): h = block(h, 'h%d'%layer, train=train, scale=True) #h=[-1,n_ctx,emb] lm_h [-1,emb] lm_h = tf.reshape(h[:, :-1], [-1, n_embd]) lm_logits = tf.matmul(lm_h, we, transpose_b=True) lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1])) lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1]) lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1) clf_h = tf.reshape(h, [-1, n_embd]) pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32) clf_h = tf.gather(clf_h, tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx) clf_h = tf.reshape(clf_h, [-1, 2, n_embd]) if train and clf_pdrop > 0: shape = shape_list(clf_h) shape[1] = 1 clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape) clf_h = tf.reshape(clf_h, [-1, n_embd]) clf_logits = clf(clf_h, 1, train=train) clf_logits = tf.reshape(clf_logits, [-1, 2]) clf_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y) return clf_logits, clf_losses, lm_losses def mgpu_train(*xs): gpu_ops = [] gpu_grads = [] xs = (tf.split(x, n_gpu, 0) for x in xs) for i, xs in enumerate(zip(*xs)): do_reuse = True if i > 0 else None with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse): clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse) if lm_coef > 0: train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses) else: train_loss = tf.reduce_mean(clf_losses) params = find_trainable_variables("model") grads = tf.gradients(train_loss, params) grads = list(zip(grads, params)) gpu_grads.append(grads) gpu_ops.append([clf_logits, clf_losses, lm_losses]) ops = [tf.concat(op, 0) for op in zip(*gpu_ops)] grads = average_grads(gpu_grads) grads = [g for g, p in grads] train = opt_fns[opt](params, grads, lr, partial(lr_schedules[lr_schedule], warmup=lr_warmup), n_updates_total, l2=l2, max_grad_norm=max_grad_norm, vector_l2=vector_l2, b1=b1, b2=b2, e=e) return [train]+ops def mgpu_predict(*xs): gpu_ops = [] xs = (tf.split(x, n_gpu, 0) for x in xs) for i, xs in enumerate(zip(*xs)): with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=True): clf_logits, clf_losses, lm_losses = model(*xs, train=False, reuse=True) gpu_ops.append([clf_logits, clf_losses, lm_losses]) ops = [tf.concat(op, 0) for op in zip(*gpu_ops)] return ops def transform_roc(X1, X2, X3): n_batch = len(X1) xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32) mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32) start = encoder['_start_'] delimiter = encoder['_delimiter_'] for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)): x12 = [start]+x1[:max_len]+[delimiter]+x2[:max_len]+[clf_token] x13 = [start]+x1[:max_len]+[delimiter]+x3[:max_len]+[clf_token] l12 = len(x12) l13 = len(x13) xmb[i, 0, :l12, 0] = x12 xmb[i, 1, :l13, 0] = x13 mmb[i, 0, :l12] = 1 mmb[i, 1, :l13] = 1 xmb[:, :, :, 1] = np.arange(n_vocab+n_special, n_vocab+n_special+n_ctx) return xmb, mmb def iter_apply(Xs, Ms, Ys): fns = [lambda x:np.concatenate(x, 0), lambda x:float(np.sum(x))] results = [] for xmb, mmb, ymb in iter_data(Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True): n = len(xmb) if n == n_batch_train: res = sess.run([eval_mgpu_logits, eval_mgpu_clf_loss], {X_train:xmb, M_train:mmb, Y_train:ymb}) else: res = sess.run([eval_logits, eval_clf_loss], {X:xmb, M:mmb, Y:ymb}) res = [r*n for r in res] results.append(res) results = zip(*results) return [fn(res) for res, fn in zip(results, fns)] def iter_predict(Xs, Ms): logits = [] for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True): n = len(xmb) if n == n_batch_train: logits.append(sess.run(eval_mgpu_logits, {X_train:xmb, M_train:mmb})) else: logits.append(sess.run(eval_logits, {X:xmb, M:mmb})) logits = np.concatenate(logits, 0) return logits def save(path): ps = sess.run(params) joblib.dump(ps, make_path(path)) def log(): global best_score tr_logits, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid], trY[:n_valid]) va_logits, va_cost = iter_apply(vaX, vaM, vaY) tr_cost = tr_cost/len(trY[:n_valid]) va_cost = va_cost/n_valid tr_acc = accuracy_score(trY[:n_valid], np.argmax(tr_logits, 1))*100. va_acc = accuracy_score(vaY, np.argmax(va_logits, 1))*100. logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc) print('%d %d %.3f %.3f %.2f %.2f'%(n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc)) if submit: score = va_acc if score > best_score: best_score = score save(os.path.join(save_dir, desc, 'best_params.jl')) argmax = lambda x:np.argmax(x, 1) pred_fns = { 'rocstories':argmax, } filenames = { 'rocstories':'ROCStories.tsv', } label_decoders = { 'rocstories':None, } def predict(): filename = filenames[dataset] pred_fn = pred_fns[dataset] label_decoder = label_decoders[dataset] predictions = pred_fn(iter_predict(teX, teM)) if label_decoder is not None: predictions = [label_decoder[prediction] for prediction in predictions] path = os.path.join(submission_dir, filename) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as f: f.write('{}\t{}\n'.format('index', 'prediction')) for i, prediction in enumerate(predictions): f.write('{}\t{}\n'.format(i, prediction)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--desc', type=str) parser.add_argument('--dataset', type=str) parser.add_argument('--log_dir', type=str, default='log/') parser.add_argument('--save_dir', type=str, default='save/') parser.add_argument('--data_dir', type=str, default='data/') parser.add_argument('--submission_dir', type=str, default='submission/') parser.add_argument('--submit', action='store_true') parser.add_argument('--analysis', action='store_true') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--n_iter', type=int, default=3) parser.add_argument('--n_batch', type=int, default=8) parser.add_argument('--max_grad_norm', type=int, default=1) parser.add_argument('--lr', type=float, default=6.25e-5) parser.add_argument('--lr_warmup', type=float, default=0.002) parser.add_argument('--n_ctx', type=int, default=512) parser.add_argument('--n_embd', type=int, default=768) parser.add_argument('--n_head', type=int, default=12) parser.add_argument('--n_layer', type=int, default=12) parser.add_argument('--embd_pdrop', type=float, default=0.1) parser.add_argument('--attn_pdrop', type=float, default=0.1) parser.add_argument('--resid_pdrop', type=float, default=0.1) parser.add_argument('--clf_pdrop', type=float, default=0.1) parser.add_argument('--l2', type=float, default=0.01) parser.add_argument('--vector_l2', action='store_true') parser.add_argument('--n_gpu', type=int, default=4) parser.add_argument('--opt', type=str, default='adam') parser.add_argument('--afn', type=str, default='gelu') parser.add_argument('--lr_schedule', type=str, default='warmup_linear') parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json') parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe') parser.add_argument('--n_transfer', type=int, default=12) parser.add_argument('--lm_coef', type=float, default=0.5) parser.add_argument('--b1', type=float, default=0.9) parser.add_argument('--b2', type=float, default=0.999) parser.add_argument('--e', type=float, default=1e-8) args = parser.parse_args() print(args) globals().update(args.__dict__) random.seed(seed) np.random.seed(seed) tf.set_random_seed(seed) logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__) text_encoder = TextEncoder(encoder_path, bpe_path) encoder = text_encoder.encoder n_vocab = len(text_encoder.encoder) (trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3) = encode_dataset(rocstories(data_dir), encoder=text_encoder) n_y = 2 encoder['_start_'] = len(encoder) encoder['_delimiter_'] = len(encoder) encoder['_classify_'] = len(encoder) clf_token = encoder['_classify_'] n_special = 3 max_len = n_ctx//2-2 n_ctx = min(max([len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(trX1, trX2, trX3)]+[len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(vaX1, vaX2, vaX3)]+[len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(teX1, teX2, teX3)])+3, n_ctx) trX, trM = transform_roc(trX1, trX2, trX3) vaX, vaM = transform_roc(vaX1, vaX2, vaX3) if submit: teX, teM = transform_roc(teX1, teX2, teX3) n_train = len(trY) n_valid = len(vaY) n_batch_train = n_batch*n_gpu n_updates_total = (n_train//n_batch_train)*n_iter X_train = tf.placeholder(tf.int32, [n_batch_train, 2, n_ctx, 2]) M_train = tf.placeholder(tf.float32, [n_batch_train, 2, n_ctx]) X = tf.placeholder(tf.int32, [None, 2, n_ctx, 2]) M = tf.placeholder(tf.float32, [None, 2, n_ctx]) Y_train = tf.placeholder(tf.int32, [n_batch_train]) Y = tf.placeholder(tf.int32, [None]) train, logits, clf_losses, lm_losses = mgpu_train(X_train, M_train, Y_train) clf_loss = tf.reduce_mean(clf_losses) params = find_trainable_variables('model') sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(tf.global_variables_initializer()) shapes = json.load(open('model/params_shapes.json')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load('model/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] init_params[0] = init_params[0][:n_ctx] init_params[0] = np.concatenate([init_params[1], (np.random.randn(n_special, n_embd)*0.02).astype(np.float32), init_params[0]], 0) del init_params[1] if n_transfer == -1: n_transfer = 0 else: n_transfer = 1+n_transfer*12 sess.run([p.assign(ip) for p, ip in zip(params[:n_transfer], init_params[:n_transfer])]) eval_mgpu_logits, eval_mgpu_clf_losses, eval_mgpu_lm_losses = mgpu_predict(X_train, M_train, Y_train) eval_logits, eval_clf_losses, eval_lm_losses = model(X, M, Y, train=False, reuse=True) eval_clf_loss = tf.reduce_mean(eval_clf_losses) eval_mgpu_clf_loss = tf.reduce_mean(eval_mgpu_clf_losses) n_updates = 0 n_epochs = 0 if dataset != 'stsb': trYt = trY if submit: save(os.path.join(save_dir, desc, 'best_params.jl')) best_score = 0 for i in range(n_iter): for xmb, mmb, ymb in iter_data(*shuffle(trX, trM, trYt, random_state=np.random), n_batch=n_batch_train, truncate=True, verbose=True): cost, _ = sess.run([clf_loss, train], {X_train:xmb, M_train:mmb, Y_train:ymb}) n_updates += 1 if n_updates in [1000, 2000, 4000, 8000, 16000, 32000] and n_epochs == 0: log() n_epochs += 1 log() if submit: sess.run([p.assign(ip) for p, ip in zip(params, joblib.load(os.path.join(save_dir, desc, 'best_params.jl')))]) predict() if analysis: rocstories_analysis(data_dir, os.path.join(submission_dir, 'ROCStories.tsv'), os.path.join(log_dir, 'rocstories.jsonl'))
[ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.equal", "numpy.concatenate", "numpy.random.randn", "tensorflow.nn.conv1d", "numpy.arange", "tensorflow.gradients", "tensorflow.ConfigProto", "tensorflow.gather", "numpy.argmax", "tensorflow.square", "tensorflow.random_normal_initializer", "numpy.zeros", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.nn.sigmoid", "tensorflow.pow", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.set_random_seed", "tensorflow.split", "numpy.sum", "tensorflow.nn.softmax", "tensorflow.transpose", "numpy.random.seed", "tensorflow.reduce_mean", "sklearn.utils.shuffle", "tensorflow.reshape", "tensorflow.ones", "tensorflow.constant_initializer", "numpy.prod", "tensorflow.variable_scope", "tensorflow.rsqrt", "tensorflow.get_variable_scope" ]
train.py
[(45, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': 'axis', 'keep_dims': '(True)'}), True, 'import tensorflow as tf\n'), (67, 'tensorflow.reshape', 'tf.reshape', (['b', '[1, 1, n, n]'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['w'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.matmul', 'tf.matmul', (['w', 'v'], {}), True, 'import tensorflow as tf\n'), (89, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (92, 'tensorflow.reshape', 'tf.reshape', (['x', 'new_x_shape'], {}), True, 'import tensorflow as tf\n'), (95, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (97, 'tensorflow.reshape', 'tf.reshape', (['x', 'new_x_shape'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), True, 'import tensorflow as tf\n'), (165, 'utils.convert_gradient_to_tensor', 'convert_gradient_to_tensor', (['we'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (166, 'tensorflow.gather', 'tf.gather', (['we', 'X'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['e', '(2)'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), True, 'import tensorflow as tf\n'), (232, 'utils.average_grads', 'average_grads', (['gpu_grads'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (249, 'numpy.zeros', 'np.zeros', (['(n_batch, 2, n_ctx, 2)'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (250, 'numpy.zeros', 'np.zeros', (['(n_batch, 2, n_ctx)'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (262, 'numpy.arange', 'np.arange', (['(n_vocab + n_special)', '(n_vocab + n_special + n_ctx)'], {}), True, 'import numpy as np\n'), (268, 'utils.iter_data', 'iter_data', (['Xs', 'Ms', 'Ys'], {'n_batch': 'n_batch_train', 'truncate': '(False)', 'verbose': '(True)'}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (281, 'utils.iter_data', 'iter_data', (['Xs', 'Ms'], {'n_batch': 'n_batch_train', 'truncate': '(False)', 'verbose': '(True)'}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (287, 'numpy.concatenate', 'np.concatenate', (['logits', '(0)'], {}), True, 'import numpy as np\n'), (310, 'numpy.argmax', 'np.argmax', (['x', '(1)'], {}), True, 'import numpy as np\n'), (331, 'os.path.join', 'os.path.join', (['submission_dir', 'filename'], {}), False, 'import os\n'), (339, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (379, 'random.seed', 'random.seed', (['seed'], {}), False, 'import random\n'), (380, 'numpy.random.seed', 'np.random.seed', (['seed'], {}), True, 'import numpy as np\n'), (381, 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), True, 'import tensorflow as tf\n'), (384, 'text_utils.TextEncoder', 'TextEncoder', (['encoder_path', 'bpe_path'], {}), False, 'from text_utils import TextEncoder\n'), (407, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[n_batch_train, 2, n_ctx, 2]'], {}), True, 'import tensorflow as tf\n'), (408, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[n_batch_train, 2, n_ctx]'], {}), True, 'import tensorflow as tf\n'), (409, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, 2, n_ctx, 2]'], {}), True, 'import tensorflow as tf\n'), (410, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2, n_ctx]'], {}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[n_batch_train]'], {}), True, 'import tensorflow as tf\n'), (413, 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), True, 'import tensorflow as tf\n'), (416, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['clf_losses'], {}), True, 'import tensorflow as tf\n'), (418, 'utils.find_trainable_variables', 'find_trainable_variables', (['"""model"""'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (439, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['eval_clf_losses'], {}), True, 'import tensorflow as tf\n'), (440, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['eval_mgpu_clf_losses'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.square', 'tf.square', (['(x - u)'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.rsqrt', 'tf.rsqrt', (['(s + e)'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x', '(1 - pdrop)'], {}), True, 'import tensorflow as tf\n'), (65, 'utils.shape_list', 'shape_list', (['w'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (66, 'tensorflow.ones', 'tf.ones', (['[n, n]'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.transpose', 'tf.transpose', (['x', '[0, 2, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[rf, nx, nf]'], {'initializer': 'w_init'}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[nf]'], {'initializer': 'b_init'}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.split', 'tf.split', (['c', '(3)', '(2)'], {}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""clf"""'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""', '[nx, ny]'], {'initializer': 'w_init'}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""', '[ny]'], {'initializer': 'b_init'}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, n_ctx, 2]'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.reshape', 'tf.reshape', (['M', '[-1, n_ctx]'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.reshape', 'tf.reshape', (['h[:, :-1]', '[-1, n_embd]'], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.matmul', 'tf.matmul', (['lm_h', 'we'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.reshape', 'tf.reshape', (['h', '[-1, n_embd]'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.reshape', 'tf.reshape', (['clf_h', '[-1, 2, n_embd]'], {}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.reshape', 'tf.reshape', (['clf_h', '[-1, n_embd]'], {}), True, 'import tensorflow as tf\n'), (209, 'tensorflow.reshape', 'tf.reshape', (['clf_logits', '[-1, 2]'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'clf_logits', 'labels': 'Y'}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.split', 'tf.split', (['x', 'n_gpu', '(0)'], {}), True, 'import tensorflow as tf\n'), (231, 'tensorflow.concat', 'tf.concat', (['op', '(0)'], {}), True, 'import tensorflow as tf\n'), (234, 'functools.partial', 'partial', (['lr_schedules[lr_schedule]'], {'warmup': 'lr_warmup'}), False, 'from functools import partial\n'), (239, 'tensorflow.split', 'tf.split', (['x', 'n_gpu', '(0)'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.concat', 'tf.concat', (['op', '(0)'], {}), True, 'import tensorflow as tf\n'), (292, 'utils.make_path', 'make_path', (['path'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (332, 'os.path.dirname', 'os.path.dirname', (['path'], {}), False, 'import os\n'), (388, 'datasets.rocstories', 'rocstories', (['data_dir'], {}), False, 'from datasets import rocstories\n'), (420, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (54, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (76, 'utils.shape_list', 'shape_list', (['v'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (96, 'numpy.prod', 'np.prod', (['x_shape[-2:]'], {}), True, 'import numpy as np\n'), (113, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (145, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (155, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (172, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (175, 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(lm_losses * M[:, 1:])', '(1)'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['M[:, 1:]', '(1)'], {}), True, 'import tensorflow as tf\n'), (204, 'utils.shape_list', 'shape_list', (['clf_h'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (206, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['clf_h', '(1 - clf_pdrop)', 'shape'], {}), True, 'import tensorflow as tf\n'), (226, 'utils.find_trainable_variables', 'find_trainable_variables', (['"""model"""'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (227, 'tensorflow.gradients', 'tf.gradients', (['train_loss', 'params'], {}), True, 'import tensorflow as tf\n'), (266, 'numpy.concatenate', 'np.concatenate', (['x', '(0)'], {}), True, 'import numpy as np\n'), (300, 'numpy.argmax', 'np.argmax', (['tr_logits', '(1)'], {}), True, 'import numpy as np\n'), (301, 'numpy.argmax', 'np.argmax', (['va_logits', '(1)'], {}), True, 'import numpy as np\n'), (419, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), True, 'import tensorflow as tf\n'), (423, 'numpy.prod', 'np.prod', (['shape'], {}), True, 'import numpy as np\n'), (425, 'numpy.concatenate', 'np.concatenate', (['init_params', '(0)'], {}), True, 'import numpy as np\n'), (447, 'os.path.join', 'os.path.join', (['save_dir', 'desc', '"""best_params.jl"""'], {}), False, 'import os\n'), (55, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1)'], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.cast', 'tf.cast', (['n_state', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['x', 'w'], {'stride': '(1)', 'padding': 'pad'}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.reshape', 'tf.reshape', (['X[:, 1:, (0)]', '[-1]'], {}), True, 'import tensorflow as tf\n'), (220, 'utils.assign_to_gpu', 'assign_to_gpu', (['i', '"""/gpu:0"""'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (220, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['clf_losses'], {}), True, 'import tensorflow as tf\n'), (241, 'utils.assign_to_gpu', 'assign_to_gpu', (['i', '"""/gpu:0"""'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (241, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (266, 'numpy.sum', 'np.sum', (['x'], {}), True, 'import numpy as np\n'), (308, 'os.path.join', 'os.path.join', (['save_dir', 'desc', '"""best_params.jl"""'], {}), False, 'import os\n'), (450, 'sklearn.utils.shuffle', 'shuffle', (['trX', 'trM', 'trYt'], {'random_state': 'np.random'}), False, 'from sklearn.utils import shuffle\n'), (461, 'os.path.join', 'os.path.join', (['submission_dir', '"""ROCStories.tsv"""'], {}), False, 'import os\n'), (461, 'os.path.join', 'os.path.join', (['log_dir', '"""rocstories.jsonl"""'], {}), False, 'import os\n'), (23, 'math.sqrt', 'math.sqrt', (['(2 / math.pi)'], {}), False, 'import math\n'), (195, 'utils.shape_list', 'shape_list', (['X'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (199, 'tensorflow.equal', 'tf.equal', (['X[:, :, (0)]', 'clf_token'], {}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['clf_losses'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, nx]'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.reshape', 'tf.reshape', (['w', '[-1, nf]'], {}), True, 'import tensorflow as tf\n'), (118, 'utils.shape_list', 'shape_list', (['x'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (195, 'utils.shape_list', 'shape_list', (['X'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (223, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['lm_losses'], {}), True, 'import tensorflow as tf\n'), (428, 'numpy.random.randn', 'np.random.randn', (['n_special', 'n_embd'], {}), True, 'import numpy as np\n'), (23, 'tensorflow.pow', 'tf.pow', (['x', '(3)'], {}), True, 'import tensorflow as tf\n'), (200, 'utils.shape_list', 'shape_list', (['X'], {}), False, 'from utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path\n'), (458, 'os.path.join', 'os.path.join', (['save_dir', 'desc', '"""best_params.jl"""'], {}), False, 'import os\n')]
trix-co/trix-backend
8857691965688b07c6e3db89d9345c0a156b9260
# from __future__ import absolute_import # from __future__ import division # from __future__ import print_function import argparse import glob import logging import os import sys import time import tensorflow as tf logging.getLogger('tensorflow').disabled = True import numpy as np from fawkes.differentiator import FawkesMaskGeneration from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, \ Faces, filter_image_paths from fawkes.align_face import aligner from fawkes.utils import get_file import datetime def generate_cloak_images(protector, image_X, target_emb=None): cloaked_image_X = protector.attack(image_X, target_emb) return cloaked_image_X def check_imgs(imgs): if np.max(imgs) <= 1 and np.min(imgs) >= 0: imgs = imgs * 255.0 elif np.max(imgs) <= 255 and np.min(imgs) >= 0: pass else: raise Exception("Image values ") return imgs class Fawkes(object): def __init__(self, feature_extractor, gpu, batch_size): self.feature_extractor = feature_extractor self.gpu = gpu self.batch_size = batch_size global sess sess = init_gpu(gpu) global graph graph = tf.get_default_graph() model_dir = os.path.join(os.path.expanduser('~'), '.fawkes') if not os.path.exists(os.path.join(model_dir, "mtcnn.p.gz")): os.makedirs(model_dir, exist_ok=True) get_file("mtcnn.p.gz", "http://sandlab.cs.uchicago.edu/fawkes/files/mtcnn.p.gz", cache_dir=model_dir, cache_subdir='') self.fs_names = [feature_extractor] if isinstance(feature_extractor, list): self.fs_names = feature_extractor self.aligner = aligner(sess) self.feature_extractors_ls = [load_extractor(name) for name in self.fs_names] global protector global protector_param mode='low' th=0.04 sd=1e9 lr=10 max_step=500 batch_size=1 format='png' separate_target=True debug=False th, max_step, lr = self.mode2param(mode) protector_param = "-".join([str(x) for x in [mode, th, sd, lr, max_step, batch_size, format, separate_target, debug]]) print('h', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) print('i', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protector = FawkesMaskGeneration(sess, self.feature_extractors_ls, batch_size=batch_size, mimic_img=True, intensity_range='imagenet', initial_const=sd, learning_rate=lr, max_iterations=max_step, l_threshold=th, verbose=1 if debug else 0, maximize=False, keep_final=False, image_shape=(224, 224, 3)) print('j', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) with sess.as_default(): print("hello") def mode2param(self, mode): if mode == 'low': th = 0.003 max_step = 45 lr = 20 elif mode == 'mid': th = 0.005 max_step = 120 lr = 15 elif mode == 'high': th = 0.008 max_step = 600 lr = 10 elif mode == 'ultra': if not tf.test.is_gpu_available(): print("Please enable GPU for ultra setting...") sys.exit(1) th = 0.01 max_step = 1000 lr = 10 else: raise Exception("mode must be one of 'low', 'mid', 'high', 'ultra', 'custom'") return th, max_step, lr def run_protection(self, image_paths, mode='low', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png', separate_target=True, debug=False): image_paths, loaded_images = filter_image_paths(image_paths) if not image_paths: raise Exception("No images in the directory") faces = Faces(image_paths, loaded_images, self.aligner, verbose=1) print('d', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) original_images = faces.cropped_faces original_images = np.array(original_images) print('e', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) if separate_target: target_embedding = [] for org_img in original_images: org_img = org_img.reshape([1] + list(org_img.shape)) tar_emb = select_target_label(org_img, self.feature_extractors_ls, self.fs_names) target_embedding.append(tar_emb) target_embedding = np.concatenate(target_embedding) print('f', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) else: target_embedding = select_target_label(original_images, self.feature_extractors_ls, self.fs_names) print('g', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protected_images = generate_cloak_images(protector, original_images, target_emb=target_embedding) print('k', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) faces.cloaked_cropped_faces = protected_images print('l', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked( original_images) print('m', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) final_images = faces.merge_faces(cloak_perturbation) print('n', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) for p_img, path in zip(final_images, image_paths): file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format) dump_image(p_img, file_name, format=format) print('o', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) print("Done!") return None def main(*argv): print('a', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) if not argv: argv = list(sys.argv) try: import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) except Exception as e: pass parser = argparse.ArgumentParser() parser.add_argument('--directory', '-d', type=str, help='directory that contain images for cloaking', default='imgs/') parser.add_argument('--gpu', '-g', type=str, help='GPU id', default='0') parser.add_argument('--mode', '-m', type=str, help='cloak generation mode', default='low') parser.add_argument('--feature-extractor', type=str, help="name of the feature extractor used for optimization", default="high_extract") parser.add_argument('--th', type=float, default=0.01) parser.add_argument('--max-step', type=int, default=1000) parser.add_argument('--sd', type=int, default=1e9) parser.add_argument('--lr', type=float, default=2) parser.add_argument('--batch-size', type=int, default=1) parser.add_argument('--separate_target', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--format', type=str, help="final image format", default="png") args = parser.parse_args(argv[1:]) assert args.format in ['png', 'jpg', 'jpeg'] if args.format == 'jpg': args.format = 'jpeg' image_paths = glob.glob(os.path.join(args.directory, "*")) image_paths = [path for path in image_paths if "_cloaked" not in path.split("/")[-1]] print('b', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protector = Fawkes(args.feature_extractor, args.gpu, args.batch_size) print('c', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) protector.run_protection(image_paths, mode=args.mode, th=args.th, sd=args.sd, lr=args.lr, max_step=args.max_step, batch_size=args.batch_size, format=args.format, separate_target=args.separate_target, debug=args.debug) print('z', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')) if __name__ == '__main__': main(*sys.argv)
[ "numpy.min", "tensorflow.test.is_gpu_available", "numpy.concatenate", "numpy.max", "tensorflow.get_default_graph", "numpy.array" ]
fawkes/protection_compute_frontloaded.py
[(14, 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), False, 'import logging\n'), (189, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (48, 'utils.init_gpu', 'init_gpu', (['gpu'], {}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (50, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (62, 'fawkes.align_face.aligner', 'aligner', (['sess'], {}), False, 'from fawkes.align_face import aligner\n'), (84, 'fawkes.differentiator.FawkesMaskGeneration', 'FawkesMaskGeneration', (['sess', 'self.feature_extractors_ls'], {'batch_size': 'batch_size', 'mimic_img': '(True)', 'intensity_range': '"""imagenet"""', 'initial_const': 'sd', 'learning_rate': 'lr', 'max_iterations': 'max_step', 'l_threshold': 'th', 'verbose': '(1 if debug else 0)', 'maximize': '(False)', 'keep_final': '(False)', 'image_shape': '(224, 224, 3)'}), False, 'from fawkes.differentiator import FawkesMaskGeneration\n'), (134, 'utils.filter_image_paths', 'filter_image_paths', (['image_paths'], {}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (139, 'utils.Faces', 'Faces', (['image_paths', 'loaded_images', 'self.aligner'], {'verbose': '(1)'}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (143, 'numpy.array', 'np.array', (['original_images'], {}), True, 'import numpy as np\n'), (185, 'signal.signal', 'signal.signal', (['signal.SIGPIPE', 'signal.SIG_DFL'], {}), False, 'import signal\n'), (221, 'os.path.join', 'os.path.join', (['args.directory', '"""*"""'], {}), False, 'import os\n'), (32, 'numpy.max', 'np.max', (['imgs'], {}), True, 'import numpy as np\n'), (32, 'numpy.min', 'np.min', (['imgs'], {}), True, 'import numpy as np\n'), (52, 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), False, 'import os\n'), (54, 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (55, 'fawkes.utils.get_file', 'get_file', (['"""mtcnn.p.gz"""', '"""http://sandlab.cs.uchicago.edu/fawkes/files/mtcnn.p.gz"""'], {'cache_dir': 'model_dir', 'cache_subdir': '""""""'}), False, 'from fawkes.utils import get_file\n'), (63, 'utils.load_extractor', 'load_extractor', (['name'], {}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (152, 'numpy.concatenate', 'np.concatenate', (['target_embedding'], {}), True, 'import numpy as np\n'), (155, 'utils.select_target_label', 'select_target_label', (['original_images', 'self.feature_extractors_ls', 'self.fs_names'], {}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (164, 'utils.reverse_process_cloaked', 'reverse_process_cloaked', (['protected_images'], {}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (164, 'utils.reverse_process_cloaked', 'reverse_process_cloaked', (['original_images'], {}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (172, 'utils.dump_image', 'dump_image', (['p_img', 'file_name'], {'format': 'format'}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (34, 'numpy.max', 'np.max', (['imgs'], {}), True, 'import numpy as np\n'), (34, 'numpy.min', 'np.min', (['imgs'], {}), True, 'import numpy as np\n'), (53, 'os.path.join', 'os.path.join', (['model_dir', '"""mtcnn.p.gz"""'], {}), False, 'import os\n'), (150, 'utils.select_target_label', 'select_target_label', (['org_img', 'self.feature_extractors_ls', 'self.fs_names'], {}), False, 'from utils import load_extractor, init_gpu, select_target_label, dump_image, reverse_process_cloaked, Faces, filter_image_paths\n'), (179, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (223, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (225, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (229, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (81, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (83, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (98, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (140, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (145, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (161, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (163, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (166, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (168, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (173, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (118, 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), True, 'import tensorflow as tf\n'), (120, 'sys.exit', 'sys.exit', (['(1)'], {}), False, 'import sys\n'), (153, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (156, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
mlcommons/mobile_open
d0c62d5d633cbc6b62aa39fe33a901cc6d555b1a
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Provides DeepLab model definition and helper functions. DeepLab is a deep learning system for semantic image segmentation with the following features: (1) Atrous convolution to explicitly control the resolution at which feature responses are computed within Deep Convolutional Neural Networks. (2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at multiple scales with filters at multiple sampling rates and effective fields-of-views. (3) ASPP module augmented with image-level feature and batch normalization. (4) A simple yet effective decoder module to recover the object boundaries. See the following papers for more details: "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" Liang-Chieh Chen, Yukun Zhu, George Papandreou, Florian Schroff, Hartwig Adam. (https://arxiv.org/abs/1802.02611) "Rethinking Atrous Convolution for Semantic Image Segmentation," Liang-Chieh Chen, George Papandreou, Florian Schroff, Hartwig Adam (https://arxiv.org/abs/1706.05587) "DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs", Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L Yuille (* equal contribution) (https://arxiv.org/abs/1606.00915) "Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs" Liang-Chieh Chen*, George Papandreou*, Iasonas Kokkinos, Kevin Murphy, Alan L. Yuille (* equal contribution) (https://arxiv.org/abs/1412.7062) """ import tensorflow as tf from tensorflow.contrib import slim as contrib_slim from core import dense_prediction_cell from core import feature_extractor from core import utils slim = contrib_slim LOGITS_SCOPE_NAME = 'logits' MERGED_LOGITS_SCOPE = 'merged_logits' IMAGE_POOLING_SCOPE = 'image_pooling' ASPP_SCOPE = 'aspp' CONCAT_PROJECTION_SCOPE = 'concat_projection' DECODER_SCOPE = 'decoder' META_ARCHITECTURE_SCOPE = 'meta_architecture' PROB_SUFFIX = '_prob' _resize_bilinear = utils.resize_bilinear scale_dimension = utils.scale_dimension split_separable_conv2d = utils.split_separable_conv2d def get_extra_layer_scopes(last_layers_contain_logits_only=False): """Gets the scopes for extra layers. Args: last_layers_contain_logits_only: Boolean, True if only consider logits as the last layer (i.e., exclude ASPP module, decoder module and so on) Returns: A list of scopes for extra layers. """ if last_layers_contain_logits_only: return [LOGITS_SCOPE_NAME] else: return [ LOGITS_SCOPE_NAME, IMAGE_POOLING_SCOPE, ASPP_SCOPE, CONCAT_PROJECTION_SCOPE, DECODER_SCOPE, META_ARCHITECTURE_SCOPE, ] def predict_labels_multi_scale(images, model_options, add_flipped_images=False): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. add_flipped_images: Add flipped images for evaluation or not. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_predictions = { output: [] for output in model_options.outputs_to_num_classes } with tf.variable_scope(tf.get_variable_scope(), reuse=None): outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, is_training=False, fine_tune_batch_norm=False) if add_flipped_images: with tf.variable_scope(tf.get_variable_scope(), reuse=True): outputs_to_scales_to_logits_reversed = multi_scale_logits( tf.reverse_v2(images, [2]), model_options=model_options, is_training=False, fine_tune_batch_norm=False) for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = _resize_bilinear( scales_to_logits[MERGED_LOGITS_SCOPE], tf.shape(images)[1:3], scales_to_logits[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits), 4)) if add_flipped_images: scales_to_logits_reversed = ( outputs_to_scales_to_logits_reversed[output]) logits_reversed = _resize_bilinear( tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] # Compute average prediction across different scales and flipped images. predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) outputs_to_predictions[output] = tf.argmax(predictions, 3, output_type=tf.dtypes.int32) outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions) return outputs_to_predictions def predict_labels(images, model_options): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. Returns: A dictionary with keys specifying the output_type (e.g., semantic prediction) and values storing Tensors representing predictions (argmax over channels). Each prediction has size [batch, height, width]. """ outputs_to_scales_to_logits = multi_scale_logits( images, model_options=model_options, is_training=False, fine_tune_batch_norm=False) predictions = {} for output in sorted(outputs_to_scales_to_logits): scales_to_logits = outputs_to_scales_to_logits[output] logits = scales_to_logits[MERGED_LOGITS_SCOPE] # There are two ways to obtain the final prediction results: (1) bilinear # upsampling the logits followed by argmax, or (2) argmax followed by # nearest neighbor upsampling. The second option may introduce the "blocking # effect" but is computationally efficient. if model_options.prediction_with_upsampled_logits: logits = _resize_bilinear(logits, #tf.shape(images)[1:3], tf.TensorShape([512,512]), scales_to_logits[MERGED_LOGITS_SCOPE].dtype) predictions[output] = tf.argmax(logits, 3, output_type=tf.dtypes.int32) #predictions[output + PROB_SUFFIX] = tf.nn.softmax(logits) else: argmax_results = tf.argmax(logits, 3, output_type=tf.dtypes.int32) argmax_results = tf.image.resize_nearest_neighbor( tf.expand_dims(argmax_results, 3), tf.shape(images)[1:3], align_corners=True, name='resize_prediction') predictions[output] = tf.squeeze(argmax_results, 3) #predictions[output + PROB_SUFFIX] = tf.image.resize_bilinear( # tf.nn.softmax(logits), # tf.shape(images)[1:3], # align_corners=True, # name='resize_prob') return predictions def multi_scale_logits(images, model_options, weight_decay=0.0001, is_training=False, fine_tune_batch_norm=False, nas_training_hyper_parameters=None): """Gets the logits for multi-scale inputs. The returned logits are all downsampled (due to max-pooling layers) for both training and evaluation. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. nas_training_hyper_parameters: A dictionary storing hyper-parameters for training nas models. Its keys are: - `drop_path_keep_prob`: Probability to keep each path in the cell when training. - `total_training_steps`: Total training steps to help drop path probability calculation. Returns: outputs_to_scales_to_logits: A map of maps from output_type (e.g., semantic prediction) to a dictionary of multi-scale logits names to logits. For each output_type, the dictionary has keys which correspond to the scales and values which correspond to the logits. For example, if `scales` equals [1.0, 1.5], then the keys would include 'merged_logits', 'logits_1.00' and 'logits_1.50'. """ # Setup default values. crop_height = ( model_options.crop_size[0] if model_options.crop_size else tf.shape(images)[1]) crop_width = ( model_options.crop_size[1] if model_options.crop_size else tf.shape(images)[2]) if model_options.image_pooling_crop_size: image_pooling_crop_height = model_options.image_pooling_crop_size[0] image_pooling_crop_width = model_options.image_pooling_crop_size[1] # Compute the height, width for the output logits. if model_options.decoder_output_stride: logits_output_stride = min(model_options.decoder_output_stride) else: logits_output_stride = model_options.output_stride logits_height = scale_dimension( crop_height, 1.0 / logits_output_stride) logits_width = scale_dimension( crop_width, 1.0 / logits_output_stride) # Compute the logits for each scale in the image pyramid. outputs_to_scales_to_logits = { k: {} for k in model_options.outputs_to_num_classes } num_channels = images.get_shape().as_list()[-1] scaled_crop_size = model_options.crop_size scaled_images = images scaled_image_pooling_crop_size = model_options.image_pooling_crop_size updated_options = model_options._replace( crop_size=scaled_crop_size, image_pooling_crop_size=scaled_image_pooling_crop_size) outputs_to_logits = _get_logits( scaled_images, updated_options, weight_decay=weight_decay, reuse=tf.AUTO_REUSE, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm, nas_training_hyper_parameters=nas_training_hyper_parameters) # Return when only one input scale. for output in sorted(model_options.outputs_to_num_classes): outputs_to_scales_to_logits[output][ MERGED_LOGITS_SCOPE] = outputs_to_logits[output] return outputs_to_scales_to_logits def extract_features(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False, nas_training_hyper_parameters=None): """Extracts features by the particular. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. nas_training_hyper_parameters: A dictionary storing hyper-parameters for training nas models. Its keys are: - `drop_path_keep_prob`: Probability to keep each path in the cell when training. - `total_training_steps`: Total training steps to help drop path probability calculation. Returns: concat_logits: A tensor of size [batch, feature_height, feature_width, feature_channels], where feature_height/feature_width are determined by the images height/width and output_stride. end_points: A dictionary from components of the network to the corresponding activation. """ features, end_points = feature_extractor.extract_features( images, output_stride=model_options.output_stride, depth_multiplier=model_options.depth_multiplier, divisible_by=model_options.divisible_by, weight_decay=weight_decay, reuse=reuse, is_training=is_training, preprocess_images=model_options.preprocess_images, preprocessed_images_dtype=model_options.preprocessed_images_dtype, fine_tune_batch_norm=fine_tune_batch_norm, nas_architecture_options=model_options.nas_architecture_options, nas_training_hyper_parameters=nas_training_hyper_parameters, use_bounded_activation=model_options.use_bounded_activation) if model_options.dense_prediction_cell_config is not None: tf.logging.info('Using dense prediction cell config.') dense_prediction_layer = dense_prediction_cell.DensePredictionCell( config=model_options.dense_prediction_cell_config, hparams={ 'conv_rate_multiplier': 16 // model_options.output_stride, }) concat_logits = dense_prediction_layer.build_cell( features, output_stride=model_options.output_stride, crop_size=model_options.crop_size, image_pooling_crop_size=model_options.image_pooling_crop_size, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm) return concat_logits, end_points else: # The following codes employ the DeepLabv3 ASPP module. Note that we # could express the ASPP module as one particular dense prediction # cell architecture. We do not do so but leave the following codes # for backward compatibility. batch_norm_params = utils.get_batch_norm_params( decay=0.9997, epsilon=1e-5, scale=True, is_training=(is_training and fine_tune_batch_norm), sync_batch_norm_method=model_options.sync_batch_norm_method) batch_norm = utils.get_batch_norm_fn( model_options.sync_batch_norm_method) activation_fn = ( tf.nn.relu6 if model_options.use_bounded_activation else tf.nn.relu) with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=activation_fn, normalizer_fn=batch_norm, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([batch_norm], **batch_norm_params): depth = model_options.aspp_convs_filters branch_logits = [] if model_options.crop_size is not None: image_pooling_crop_size = model_options.image_pooling_crop_size # If image_pooling_crop_size is not specified, use crop_size. if image_pooling_crop_size is None: image_pooling_crop_size = model_options.crop_size pool_height = scale_dimension( image_pooling_crop_size[0], 1. / model_options.output_stride) pool_width = scale_dimension( image_pooling_crop_size[1], 1. / model_options.output_stride) image_feature = slim.avg_pool2d( features, [pool_height, pool_width], model_options.image_pooling_stride, padding='VALID') resize_height = scale_dimension( model_options.crop_size[0], 1. / model_options.output_stride) resize_width = scale_dimension( model_options.crop_size[1], 1. / model_options.output_stride) else: # If crop_size is None, we simply do global pooling. pool_height = tf.shape(features)[1] pool_width = tf.shape(features)[2] image_feature = tf.reduce_mean( features, axis=[1, 2], keepdims=True) resize_height = pool_height resize_width = pool_width image_feature_activation_fn = tf.nn.relu image_feature_normalizer_fn = batch_norm if model_options.aspp_with_squeeze_and_excitation: image_feature_activation_fn = tf.nn.sigmoid if model_options.image_se_uses_qsigmoid: image_feature_activation_fn = utils.q_sigmoid image_feature_normalizer_fn = None image_feature = slim.conv2d( image_feature, depth, 1, activation_fn=image_feature_activation_fn, normalizer_fn=image_feature_normalizer_fn, scope=IMAGE_POOLING_SCOPE) image_feature = _resize_bilinear( image_feature, [resize_height, resize_width], image_feature.dtype) # Set shape for resize_height/resize_width if they are not Tensor. if isinstance(resize_height, tf.Tensor): resize_height = None if isinstance(resize_width, tf.Tensor): resize_width = None image_feature.set_shape([None, resize_height, resize_width, depth]) if not model_options.aspp_with_squeeze_and_excitation: branch_logits.append(image_feature) # Employ a 1x1 convolution. branch_logits.append(slim.conv2d(features, depth, 1, scope=ASPP_SCOPE + str(0))) if model_options.atrous_rates: # Employ 3x3 convolutions with different atrous rates. for i, rate in enumerate(model_options.atrous_rates, 1): scope = ASPP_SCOPE + str(i) if model_options.aspp_with_separable_conv: aspp_features = split_separable_conv2d( features, filters=depth, rate=rate, weight_decay=weight_decay, scope=scope) else: aspp_features = slim.conv2d( features, depth, 3, rate=rate, scope=scope) branch_logits.append(aspp_features) # Merge branch logits. concat_logits = tf.concat(branch_logits, 3) if model_options.aspp_with_concat_projection: concat_logits = slim.conv2d( concat_logits, depth, 1, scope=CONCAT_PROJECTION_SCOPE) concat_logits = slim.dropout( concat_logits, keep_prob=0.9, is_training=is_training, scope=CONCAT_PROJECTION_SCOPE + '_dropout') if model_options.aspp_with_squeeze_and_excitation: concat_logits *= image_feature return concat_logits, end_points def _get_logits(images, model_options, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False, nas_training_hyper_parameters=None): """Gets the logits by atrous/image spatial pyramid pooling. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. nas_training_hyper_parameters: A dictionary storing hyper-parameters for training nas models. Its keys are: - `drop_path_keep_prob`: Probability to keep each path in the cell when training. - `total_training_steps`: Total training steps to help drop path probability calculation. Returns: outputs_to_logits: A map from output_type to logits. """ features, end_points = extract_features( images, model_options, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm, nas_training_hyper_parameters=nas_training_hyper_parameters) if model_options.decoder_output_stride: crop_size = model_options.crop_size if crop_size is None: crop_size = [tf.shape(images)[1], tf.shape(images)[2]] features = refine_by_decoder( features, end_points, crop_size=crop_size, decoder_output_stride=model_options.decoder_output_stride, decoder_use_separable_conv=model_options.decoder_use_separable_conv, decoder_use_sum_merge=model_options.decoder_use_sum_merge, decoder_filters=model_options.decoder_filters, decoder_output_is_logits=model_options.decoder_output_is_logits, weight_decay=weight_decay, reuse=reuse, is_training=is_training, fine_tune_batch_norm=fine_tune_batch_norm, use_bounded_activation=model_options.use_bounded_activation) outputs_to_logits = {} for output in sorted(model_options.outputs_to_num_classes): if model_options.decoder_output_is_logits: outputs_to_logits[output] = tf.identity(features, name=output) else: outputs_to_logits[output] = get_branch_logits( features, model_options.outputs_to_num_classes[output], model_options.atrous_rates, weight_decay=weight_decay, reuse=reuse, scope_suffix=output) return outputs_to_logits def refine_by_decoder(features, end_points, crop_size=None, decoder_output_stride=None, decoder_use_separable_conv=False, decoder_use_sum_merge=False, decoder_filters=256, decoder_output_is_logits=False, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False, use_bounded_activation=False, sync_batch_norm_method='None'): """Adds the decoder to obtain sharper segmentation results. Args: features: A tensor of size [batch, features_height, features_width, features_channels]. end_points: A dictionary from components of the network to the corresponding activation. crop_size: A tuple [crop_height, crop_width] specifying whole patch crop size. decoder_output_stride: A list of integers specifying the output stride of low-level features used in the decoder module. decoder_use_separable_conv: Employ separable convolution for decoder or not. decoder_use_sum_merge: Boolean, decoder uses simple sum merge or not. decoder_filters: Integer, decoder filter size. decoder_output_is_logits: Boolean, using decoder output as logits or not. weight_decay: The weight decay for model variables. reuse: Reuse the model variables or not. is_training: Is training or not. fine_tune_batch_norm: Fine-tune the batch norm parameters or not. use_bounded_activation: Whether or not to use bounded activations. Bounded activations better lend themselves to quantized inference. sync_batch_norm_method: String, method used to sync batch norm. Currently only support `None` (no sync batch norm) and `tpu` (use tpu code to sync batch norm). Returns: Decoder output with size [batch, decoder_height, decoder_width, decoder_channels]. Raises: ValueError: If crop_size is None. """ if crop_size is None: raise ValueError('crop_size must be provided when using decoder.') batch_norm_params = utils.get_batch_norm_params( decay=0.9997, epsilon=1e-5, scale=True, is_training=(is_training and fine_tune_batch_norm), sync_batch_norm_method=sync_batch_norm_method) batch_norm = utils.get_batch_norm_fn(sync_batch_norm_method) decoder_depth = decoder_filters projected_filters = 48 if decoder_use_sum_merge: # When using sum merge, the projected filters must be equal to decoder # filters. projected_filters = decoder_filters if decoder_output_is_logits: # Overwrite the setting when decoder output is logits. activation_fn = None normalizer_fn = None conv2d_kernel = 1 # Use original conv instead of separable conv. decoder_use_separable_conv = False else: # Default setting when decoder output is not logits. activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu normalizer_fn = batch_norm conv2d_kernel = 3 with slim.arg_scope( [slim.conv2d, slim.separable_conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=activation_fn, normalizer_fn=normalizer_fn, padding='SAME', stride=1, reuse=reuse): with slim.arg_scope([batch_norm], **batch_norm_params): with tf.variable_scope(DECODER_SCOPE, DECODER_SCOPE, [features]): decoder_features = features decoder_stage = 0 scope_suffix = '' for output_stride in decoder_output_stride: feature_list = feature_extractor.networks_to_feature_maps[ feature_extractor.DECODER_END_POINTS][output_stride] # If only one decoder stage, we do not change the scope name in # order for backward compactibility. if decoder_stage: scope_suffix = '_{}'.format(decoder_stage) for i, name in enumerate(feature_list): decoder_features_list = [decoder_features] # MobileNet and NAS variants use different naming convention. feature_name = name decoder_features_list.append( slim.conv2d( end_points[feature_name], projected_filters, 1, scope='feature_projection' + str(i) + scope_suffix)) # Determine the output size. decoder_height = scale_dimension(crop_size[0], 1.0 / output_stride) decoder_width = scale_dimension(crop_size[1], 1.0 / output_stride) # Resize to decoder_height/decoder_width. decoder_features_list[0] = _resize_bilinear( decoder_features_list[0], [decoder_height, decoder_width], decoder_features_list[0].dtype) for j, feature in enumerate(decoder_features_list): # decoder_features_list[j] = _resize_bilinear( # feature, [decoder_height, decoder_width], feature.dtype) h = (None if isinstance(decoder_height, tf.Tensor) else decoder_height) w = (None if isinstance(decoder_width, tf.Tensor) else decoder_width) decoder_features_list[j].set_shape([None, h, w, None]) if decoder_use_sum_merge: decoder_features = _decoder_with_sum_merge( decoder_features_list, decoder_depth, conv2d_kernel=conv2d_kernel, decoder_use_separable_conv=decoder_use_separable_conv, weight_decay=weight_decay, scope_suffix=scope_suffix) else: if not decoder_use_separable_conv: scope_suffix = str(i) + scope_suffix decoder_features = _decoder_with_concat_merge( decoder_features_list, decoder_depth, decoder_use_separable_conv=decoder_use_separable_conv, weight_decay=weight_decay, scope_suffix=scope_suffix) decoder_stage += 1 return decoder_features def _decoder_with_sum_merge(decoder_features_list, decoder_depth, conv2d_kernel=3, decoder_use_separable_conv=True, weight_decay=0.0001, scope_suffix=''): """Decoder with sum to merge features. Args: decoder_features_list: A list of decoder features. decoder_depth: Integer, the filters used in the convolution. conv2d_kernel: Integer, the convolution kernel size. decoder_use_separable_conv: Boolean, use separable conv or not. weight_decay: Weight decay for the model variables. scope_suffix: String, used in the scope suffix. Returns: decoder features merged with sum. Raises: RuntimeError: If decoder_features_list have length not equal to 2. """ if len(decoder_features_list) != 2: raise RuntimeError('Expect decoder_features has length 2.') # Only apply one convolution when decoder use sum merge. if decoder_use_separable_conv: decoder_features = split_separable_conv2d( decoder_features_list[0], filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_split_sep_conv0'+scope_suffix) + decoder_features_list[1] else: decoder_features = slim.conv2d( decoder_features_list[0], decoder_depth, conv2d_kernel, scope='decoder_conv0'+scope_suffix) + decoder_features_list[1] return decoder_features def _decoder_with_concat_merge(decoder_features_list, decoder_depth, decoder_use_separable_conv=True, weight_decay=0.0001, scope_suffix=''): """Decoder with concatenation to merge features. This decoder method applies two convolutions to smooth the features obtained by concatenating the input decoder_features_list. This decoder module is proposed in the DeepLabv3+ paper. Args: decoder_features_list: A list of decoder features. decoder_depth: Integer, the filters used in the convolution. decoder_use_separable_conv: Boolean, use separable conv or not. weight_decay: Weight decay for the model variables. scope_suffix: String, used in the scope suffix. Returns: decoder features merged with concatenation. """ if decoder_use_separable_conv: decoder_features = split_separable_conv2d( tf.concat(decoder_features_list, 3), filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv0'+scope_suffix) decoder_features = split_separable_conv2d( decoder_features, filters=decoder_depth, rate=1, weight_decay=weight_decay, scope='decoder_conv1'+scope_suffix) else: num_convs = 2 decoder_features = slim.repeat( tf.concat(decoder_features_list, 3), num_convs, slim.conv2d, decoder_depth, 3, scope='decoder_conv'+scope_suffix) return decoder_features def get_branch_logits(features, num_classes, atrous_rates=None, kernel_size=1, weight_decay=0.0001, reuse=None, scope_suffix=''): """Gets the logits from each model's branch. The underlying model is branched out in the last layer when atrous spatial pyramid pooling is employed, and all branches are sum-merged to form the final logits. Args: features: A float tensor of shape [batch, height, width, channels]. num_classes: Number of classes to predict. atrous_rates: A list of atrous convolution rates for last layer. kernel_size: Kernel size for convolution. weight_decay: Weight decay for the model variables. reuse: Reuse model variables or not. scope_suffix: Scope suffix for the model variables. Returns: Merged logits with shape [batch, height, width, num_classes]. Raises: ValueError: Upon invalid input kernel_size value. """ # When using batch normalization with ASPP, ASPP has been applied before # in extract_features, and thus we simply apply 1x1 convolution here. if atrous_rates is None: if kernel_size != 1: raise ValueError('Kernel size must be 1 when atrous_rates is None. ' 'Gets %d.' % kernel_size) atrous_rates = [1] with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.01), reuse=reuse): with tf.variable_scope(LOGITS_SCOPE_NAME, LOGITS_SCOPE_NAME, [features]): branch_logits = [] for i, rate in enumerate(atrous_rates): scope = scope_suffix if i: scope += '_%d' % i branch_logits.append( slim.conv2d( features, num_classes, kernel_size=kernel_size, rate=rate, activation_fn=None, normalizer_fn=None, scope=scope)) return tf.add_n(branch_logits)
[ "tensorflow.TensorShape", "tensorflow.nn.softmax", "tensorflow.concat", "tensorflow.shape", "tensorflow.reduce_mean", "tensorflow.identity", "tensorflow.squeeze", "tensorflow.expand_dims", "tensorflow.truncated_normal_initializer", "tensorflow.logging.info", "tensorflow.reverse_v2", "tensorflow.variable_scope", "tensorflow.argmax", "tensorflow.get_variable_scope", "tensorflow.add_n" ]
vision/deeplab/models_and_code/model.py
[(330, 'core.feature_extractor.extract_features', 'feature_extractor.extract_features', (['images'], {'output_stride': 'model_options.output_stride', 'depth_multiplier': 'model_options.depth_multiplier', 'divisible_by': 'model_options.divisible_by', 'weight_decay': 'weight_decay', 'reuse': 'reuse', 'is_training': 'is_training', 'preprocess_images': 'model_options.preprocess_images', 'preprocessed_images_dtype': 'model_options.preprocessed_images_dtype', 'fine_tune_batch_norm': 'fine_tune_batch_norm', 'nas_architecture_options': 'model_options.nas_architecture_options', 'nas_training_hyper_parameters': 'nas_training_hyper_parameters', 'use_bounded_activation': 'model_options.use_bounded_activation'}), False, 'from core import feature_extractor\n'), (599, 'core.utils.get_batch_norm_params', 'utils.get_batch_norm_params', ([], {'decay': '(0.9997)', 'epsilon': '(1e-05)', 'scale': '(True)', 'is_training': '(is_training and fine_tune_batch_norm)', 'sync_batch_norm_method': 'sync_batch_norm_method'}), False, 'from core import utils\n'), (605, 'core.utils.get_batch_norm_fn', 'utils.get_batch_norm_fn', (['sync_batch_norm_method'], {}), False, 'from core import utils\n'), (159, 'tensorflow.argmax', 'tf.argmax', (['predictions', '(3)'], {'output_type': 'tf.dtypes.int32'}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['predictions'], {}), True, 'import tensorflow as tf\n'), (346, 'tensorflow.logging.info', 'tf.logging.info', (['"""Using dense prediction cell config."""'], {}), True, 'import tensorflow as tf\n'), (347, 'core.dense_prediction_cell.DensePredictionCell', 'dense_prediction_cell.DensePredictionCell', ([], {'config': 'model_options.dense_prediction_cell_config', 'hparams': "{'conv_rate_multiplier': 16 // model_options.output_stride}"}), False, 'from core import dense_prediction_cell\n'), (367, 'core.utils.get_batch_norm_params', 'utils.get_batch_norm_params', ([], {'decay': '(0.9997)', 'epsilon': '(1e-05)', 'scale': '(True)', 'is_training': '(is_training and fine_tune_batch_norm)', 'sync_batch_norm_method': 'model_options.sync_batch_norm_method'}), False, 'from core import utils\n'), (373, 'core.utils.get_batch_norm_fn', 'utils.get_batch_norm_fn', (['model_options.sync_batch_norm_method'], {}), False, 'from core import utils\n'), (121, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.concat', 'tf.concat', (['predictions', '(4)'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.argmax', 'tf.argmax', (['logits', '(3)'], {'output_type': 'tf.dtypes.int32'}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.argmax', 'tf.argmax', (['logits', '(3)'], {'output_type': 'tf.dtypes.int32'}), True, 'import tensorflow as tf\n'), (205, 'tensorflow.squeeze', 'tf.squeeze', (['argmax_results', '(3)'], {}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (537, 'tensorflow.identity', 'tf.identity', (['features'], {'name': 'output'}), True, 'import tensorflow as tf\n'), (755, 'tensorflow.concat', 'tf.concat', (['decoder_features_list', '(3)'], {}), True, 'import tensorflow as tf\n'), (769, 'tensorflow.concat', 'tf.concat', (['decoder_features_list', '(3)'], {}), True, 'import tensorflow as tf\n'), (819, 'tensorflow.variable_scope', 'tf.variable_scope', (['LOGITS_SCOPE_NAME', 'LOGITS_SCOPE_NAME', '[features]'], {}), True, 'import tensorflow as tf\n'), (836, 'tensorflow.add_n', 'tf.add_n', (['branch_logits'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.reverse_v2', 'tf.reverse_v2', (['images', '[2]'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.reverse_v2', 'tf.reverse_v2', (['scales_to_logits_reversed[MERGED_LOGITS_SCOPE]', '[2]'], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.TensorShape', 'tf.TensorShape', (['[512, 512]'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.expand_dims', 'tf.expand_dims', (['argmax_results', '(3)'], {}), True, 'import tensorflow as tf\n'), (465, 'tensorflow.concat', 'tf.concat', (['branch_logits', '(3)'], {}), True, 'import tensorflow as tf\n'), (633, 'tensorflow.variable_scope', 'tf.variable_scope', (['DECODER_SCOPE', 'DECODER_SCOPE', '[features]'], {}), True, 'import tensorflow as tf\n'), (817, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.01)'}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits_reversed'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (415, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['features'], {'axis': '[1, 2]', 'keepdims': '(True)'}), True, 'import tensorflow as tf\n'), (518, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (518, 'tensorflow.shape', 'tf.shape', (['images'], {}), True, 'import tensorflow as tf\n'), (413, 'tensorflow.shape', 'tf.shape', (['features'], {}), True, 'import tensorflow as tf\n'), (414, 'tensorflow.shape', 'tf.shape', (['features'], {}), True, 'import tensorflow as tf\n')]
creotiv/hdrnet
e5c00f11b8ee9afe8444014ce682e6c997df7003
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python interface to custom Tensorflow operations for HDRnet.""" import os import tensorflow as tf from tensorflow.python.framework import ops __all__ = ['bilateral_slice', 'bilateral_slice_apply'] path = os.path.dirname(os.path.abspath(__file__)) path = tf.resource_loader.get_path_to_datafile( os.path.join(path, 'lib', 'hdrnet_ops.so')) _hdrnet = tf.load_op_library(path) # -- Register operations ------------------------------------------------------ bilateral_slice = _hdrnet.bilateral_slice bilateral_slice_apply = _hdrnet.bilateral_slice_apply # ----------- Register gradients ---------------------------------------------- @ops.RegisterGradient('BilateralSlice') def _bilateral_slice_grad(op, grad): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] return _hdrnet.bilateral_slice_grad(grid_tensor, guide_tensor, grad) @ops.RegisterGradient('BilateralSliceApply') def _bilateral_slice_grad(op, grad): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] input_tensor = op.inputs[2] has_offset = op.get_attr('has_offset') return _hdrnet.bilateral_slice_apply_grad( grid_tensor, guide_tensor, input_tensor, grad, has_offset=has_offset) # ----------- Register Shape inference ---------------------------------------- @ops.RegisterShape('BilateralSlice') def _bilateral_slice_shape(op): input_tensor = op.inputs[0] guide_tensor = op.inputs[1] return [guide_tensor.get_shape().concatenate(input_tensor.get_shape()[-1])] @ops.RegisterShape('BilateralSliceApply') def _bilateral_slice_shape(op): grid_tensor = op.inputs[0] guide_tensor = op.inputs[1] input_tensor = op.inputs[2] has_offset = op.get_attr('has_offset') chan_in = input_tensor.get_shape()[-1] chan_grid = grid_tensor.get_shape()[-1] if has_offset: chan_out = chan_grid // (chan_in+1) else: chan_out = chan_grid // chan_in return [guide_tensor.get_shape().concatenate(chan_out)]
[ "tensorflow.python.framework.ops.RegisterShape", "tensorflow.load_op_library", "tensorflow.python.framework.ops.RegisterGradient" ]
hdrnet/hdrnet_ops.py
[(27, 'tensorflow.load_op_library', 'tf.load_op_library', (['path'], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""BilateralSlice"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (41, 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['"""BilateralSliceApply"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (52, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""BilateralSlice"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (59, 'tensorflow.python.framework.ops.RegisterShape', 'ops.RegisterShape', (['"""BilateralSliceApply"""'], {}), False, 'from tensorflow.python.framework import ops\n'), (23, 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), False, 'import os\n'), (25, 'os.path.join', 'os.path.join', (['path', '"""lib"""', '"""hdrnet_ops.so"""'], {}), False, 'import os\n')]
csong27/embedding-tests
07248c8038ce4cf229320cf5672ea323afeed477
# coding=utf-8 # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The main ALBERT model and related functions. For a description of the algorithm, see https://arxiv.org/abs/1909.11942. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import re import numpy as np import six from six.moves import range import tensorflow as tf from tensorflow.contrib import layers as contrib_layers class AlbertConfig(object): """Configuration for `AlbertModel`. The default settings match the configuration of model `albert_xxlarge`. """ def __init__(self, vocab_size, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, down_scale_factor=1, hidden_act="gelu", hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02): """Constructs AlbertConfig. Args: vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`. embedding_size: size of voc embeddings. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_hidden_groups: Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. inner_group_num: int, number of inner repetition of attention and ffn. down_scale_factor: float, the scale to apply hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `AlbertModel`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. """ self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.down_scale_factor = down_scale_factor self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range @classmethod def from_dict(cls, json_object): """Constructs a `AlbertConfig` from a Python dictionary of parameters.""" config = AlbertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `AlbertConfig` from a json file of parameters.""" with tf.gfile.GFile(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class AlbertModel(object): """BERT model ("Bidirectional Encoder Representations from Transformers"). Example usage: ```python # Already been converted from strings into ids input_ids = tf.constant([[31, 51, 99], [15, 5, 0]]) input_mask = tf.constant([[1, 1, 1], [1, 1, 0]]) token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]]) config = modeling.AlbertConfig(vocab_size=32000, hidden_size=512, num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) model = modeling.AlbertModel(config=config, is_training=True, input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids) label_embeddings = tf.get_variable(...) pooled_output = model.get_pooled_output() logits = tf.matmul(pooled_output, label_embeddings) ... ``` """ def __init__(self, config, is_training, input_ids, input_mask=None, token_type_ids=None, use_one_hot_embeddings=False, scope=None): """Constructor for AlbertModel. Args: config: `AlbertConfig` instance. is_training: bool. true for training model, false for eval model. Controls whether dropout will be applied. input_ids: int32 Tensor of shape [batch_size, seq_length]. input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. use_one_hot_embeddings: (optional) bool. Whether to use one-hot word embeddings or tf.embedding_lookup() for the word embeddings. scope: (optional) variable scope. Defaults to "bert". Raises: ValueError: The config is invalid or one of the input tensor shapes is invalid. """ config = copy.deepcopy(config) if not is_training: config.hidden_dropout_prob = 0.0 config.attention_probs_dropout_prob = 0.0 input_shape = get_shape_list(input_ids, expected_rank=2) batch_size = input_shape[0] seq_length = input_shape[1] if input_mask is None: input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) if token_type_ids is None: token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) with tf.variable_scope(scope, default_name="bert"): with tf.variable_scope("embeddings"): # Perform embedding lookup on the word ids. (self.word_embedding_output, self.output_embedding_table) = embedding_lookup( input_ids=input_ids, vocab_size=config.vocab_size, embedding_size=config.embedding_size, initializer_range=config.initializer_range, word_embedding_name="word_embeddings", use_one_hot_embeddings=use_one_hot_embeddings) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. self.embedding_output = embedding_postprocessor( input_tensor=self.word_embedding_output, use_token_type=True, token_type_ids=token_type_ids, token_type_vocab_size=config.type_vocab_size, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=config.initializer_range, max_position_embeddings=config.max_position_embeddings, dropout_prob=config.hidden_dropout_prob) with tf.variable_scope("encoder"): # Run the stacked transformer. # `sequence_output` shape = [batch_size, seq_length, hidden_size]. self.all_encoder_layers = transformer_model( input_tensor=self.embedding_output, attention_mask=input_mask, hidden_size=config.hidden_size, num_hidden_layers=config.num_hidden_layers, num_hidden_groups=config.num_hidden_groups, num_attention_heads=config.num_attention_heads, intermediate_size=config.intermediate_size, inner_group_num=config.inner_group_num, intermediate_act_fn=get_activation(config.hidden_act), hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, initializer_range=config.initializer_range, do_return_all_layers=True) self.sequence_output = self.all_encoder_layers[-1] # The "pooler" converts the encoded sequence tensor of shape # [batch_size, seq_length, hidden_size] to a tensor of shape # [batch_size, hidden_size]. This is necessary for segment-level # (or segment-pair-level) classification tasks where we need a fixed # dimensional representation of the segment. with tf.variable_scope("pooler"): # We "pool" the model by simply taking the hidden state corresponding # to the first token. We assume that this has been pre-trained first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) self.pooled_output = tf.layers.dense( first_token_tensor, config.hidden_size, activation=tf.tanh, kernel_initializer=create_initializer(config.initializer_range)) def get_pooled_output(self): return self.pooled_output def get_sequence_output(self): """Gets final hidden layer of encoder. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the final hidden of the transformer encoder. """ return self.sequence_output def get_all_encoder_layers(self): return self.all_encoder_layers def get_word_embedding_output(self): """Get output of the word(piece) embedding lookup. This is BEFORE positional embeddings and token type embeddings have been added. Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the output of the word(piece) embedding layer. """ return self.word_embedding_output def get_embedding_output(self): """Gets output of the embedding lookup (i.e., input to the transformer). Returns: float Tensor of shape [batch_size, seq_length, hidden_size] corresponding to the output of the embedding layer, after summing the word embeddings with the positional embeddings and the token type embeddings, then performing layer normalization. This is the input to the transformer. """ return self.embedding_output def get_embedding_table(self): return self.output_embedding_table def gelu(x): """Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. """ cdf = 0.5 * (1.0 + tf.tanh( (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) return x * cdf def get_activation(activation_string): """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. Args: activation_string: String name of the activation function. Returns: A Python function corresponding to the activation function. If `activation_string` is None, empty, or "linear", this will return None. If `activation_string` is not a string, it will return `activation_string`. Raises: ValueError: The `activation_string` does not correspond to a known activation. """ # We assume that anything that"s not a string is already an activation # function, so we just return it. if not isinstance(activation_string, six.string_types): return activation_string if not activation_string: return None act = activation_string.lower() if act == "linear": return None elif act == "relu": return tf.nn.relu elif act == "gelu": return gelu elif act == "tanh": return tf.tanh else: raise ValueError("Unsupported activation: %s" % act) def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) init_vars_name = [name for (name, _) in init_vars] if num_of_group > 0: assignment_map = [] for gid in range(num_of_group): assignment_map.append(collections.OrderedDict()) else: assignment_map = collections.OrderedDict() for name in name_to_variable: if name in init_vars_name: tvar_name = name elif (re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/group_\d+/", "/group_0/", six.ensure_str(name)) elif (re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/ffn_\d+/", "/ffn_1/", six.ensure_str(name)) elif (re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name)) in init_vars_name and num_of_group > 1): tvar_name = re.sub(r"/attention_\d+/", "/attention_1/", six.ensure_str(name)) else: tf.logging.warn("name %s does not get matched", name) continue # tf.logging.info("name %s match to %s", name, tvar_name) if num_of_group > 0: group_matched = False for gid in range(1, num_of_group): if (("/group_" + str(gid) + "/" in name) or ("/ffn_" + str(gid) + "/" in name) or ("/attention_" + str(gid) + "/" in name)): group_matched = True tf.logging.info("%s belongs to %dth", name, gid) assignment_map[gid][tvar_name] = name if not group_matched: assignment_map[0][tvar_name] = name else: assignment_map[tvar_name] = name initialized_variable_names[name] = 1 initialized_variable_names[six.ensure_str(name) + ":0"] = 1 return (assignment_map, initialized_variable_names) def dropout(input_tensor, dropout_prob): """Perform dropout. Args: input_tensor: float Tensor. dropout_prob: Python float. The probability of dropping out a value (NOT of *keeping* a dimension as in `tf.nn.dropout`). Returns: A version of `input_tensor` with dropout applied. """ if dropout_prob is None or dropout_prob == 0.0: return input_tensor output = tf.nn.dropout(input_tensor, rate=dropout_prob) return output def layer_norm(input_tensor, name=None): """Run layer normalization on the last dimension of the tensor.""" return contrib_layers.layer_norm( inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): """Runs layer normalization followed by dropout.""" output_tensor = layer_norm(input_tensor, name) output_tensor = dropout(output_tensor, dropout_prob) return output_tensor def create_initializer(initializer_range=0.02): """Creates a `truncated_normal_initializer` with the given range.""" return tf.truncated_normal_initializer(stddev=initializer_range) def get_timing_signal_1d_given_position(channels, position, min_timescale=1.0, max_timescale=1.0e4): """Get sinusoids of diff frequencies, with timing position given. Adapted from add_timing_signal_1d_given_position in //third_party/py/tensor2tensor/layers/common_attention.py Args: channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. position: a Tensor with shape [batch, seq_len] min_timescale: a float max_timescale: a float Returns: a Tensor of timing signals [batch, seq_len, channels] """ num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / (tf.to_float(num_timescales) - 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = ( tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( tf.expand_dims(inv_timescales, 0), 0)) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) return signal def embedding_lookup(input_ids, vocab_size, embedding_size=128, initializer_range=0.02, word_embedding_name="word_embeddings", use_one_hot_embeddings=False): """Looks up words embeddings for id tensor. Args: input_ids: int32 Tensor of shape [batch_size, seq_length] containing word ids. vocab_size: int. Size of the embedding vocabulary. embedding_size: int. Width of the word embeddings. initializer_range: float. Embedding initialization range. word_embedding_name: string. Name of the embedding table. use_one_hot_embeddings: bool. If True, use one-hot method for word embeddings. If False, use `tf.nn.embedding_lookup()`. Returns: float Tensor of shape [batch_size, seq_length, embedding_size]. """ # This function assumes that the input is of shape [batch_size, seq_length, # num_inputs]. # # If the input is a 2D tensor of shape [batch_size, seq_length], we # reshape to [batch_size, seq_length, 1]. if input_ids.shape.ndims == 2: input_ids = tf.expand_dims(input_ids, axis=[-1]) embedding_table = tf.get_variable( name=word_embedding_name, shape=[vocab_size, embedding_size], initializer=create_initializer(initializer_range)) if use_one_hot_embeddings: flat_input_ids = tf.reshape(input_ids, [-1]) one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) output = tf.matmul(one_hot_input_ids, embedding_table) else: output = tf.nn.embedding_lookup(embedding_table, input_ids) input_shape = get_shape_list(input_ids) output = tf.reshape(output, input_shape[0:-1] + [input_shape[-1] * embedding_size]) return (output, embedding_table) def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name="token_type_embeddings", use_position_embeddings=True, position_embedding_name="position_embeddings", initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ input_shape = get_shape_list(input_tensor, expected_rank=3) batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError("`token_type_ids` must be specified if" "`use_token_type` is True.") token_type_table = tf.get_variable( name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) # This vocab will be small so we always do one-hot here, since it is always # faster for a small vocabulary. flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable( name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) # Since the position embedding table is a learned variable, we create it # using a (long) sequence length `max_position_embeddings`. The actual # sequence length might be shorter than this, for faster training of # tasks that do not have long sequences. # # So `full_position_embeddings` is effectively an embedding table # for position [0, 1, 2, ..., max_position_embeddings-1], and the current # sequence has positions [0, 1, 2, ... seq_length-1], so we can just # perform a slice. position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) # Only the last two dimensions are relevant (`seq_length` and `width`), so # we broadcast among the first dimensions, which is typically just # the batch size. position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output = layer_norm_and_dropout(output, dropout_prob) return output def dense_layer_3d(input_tensor, num_attention_heads, head_size, initializer, activation, name=None): """A dense layer with 3D kernel. Args: input_tensor: float Tensor of shape [batch, seq_length, hidden_size]. num_attention_heads: Number of attention heads. head_size: The size per attention head. initializer: Kernel initializer. activation: Actication function. name: The name scope of this layer. Returns: float logits Tensor. """ input_shape = get_shape_list(input_tensor) hidden_size = input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[hidden_size, num_attention_heads * head_size], initializer=initializer) w = tf.reshape(w, [hidden_size, num_attention_heads, head_size]) b = tf.get_variable( name="bias", shape=[num_attention_heads * head_size], initializer=tf.zeros_initializer) b = tf.reshape(b, [num_attention_heads, head_size]) ret = tf.einsum("BFH,HND->BFND", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dense_layer_3d_proj(input_tensor, hidden_size, head_size, initializer, activation, name=None): """A dense layer with 3D kernel for projection. Args: input_tensor: float Tensor of shape [batch,from_seq_length, num_attention_heads, size_per_head]. hidden_size: The size of hidden layer. num_attention_heads: The size of output dimension. head_size: The size of head. initializer: Kernel initializer. activation: Actication function. name: The name scope of this layer. Returns: float logits Tensor. """ input_shape = get_shape_list(input_tensor) num_attention_heads= input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[num_attention_heads * head_size, hidden_size], initializer=initializer) w = tf.reshape(w, [num_attention_heads, head_size, hidden_size]) b = tf.get_variable( name="bias", shape=[hidden_size], initializer=tf.zeros_initializer) ret = tf.einsum("BFND,NDH->BFH", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dense_layer_2d(input_tensor, output_size, initializer, activation, num_attention_heads=1, name=None): """A dense layer with 2D kernel. Args: input_tensor: Float tensor with rank 3. output_size: The size of output dimension. initializer: Kernel initializer. activation: Activation function. num_attention_heads: number of attention head in attention layer. name: The name scope of this layer. Returns: float logits Tensor. """ del num_attention_heads # unused input_shape = get_shape_list(input_tensor) hidden_size = input_shape[2] with tf.variable_scope(name): w = tf.get_variable( name="kernel", shape=[hidden_size, output_size], initializer=initializer) b = tf.get_variable( name="bias", shape=[output_size], initializer=tf.zeros_initializer) ret = tf.einsum("BFH,HO->BFO", input_tensor, w) ret += b if activation is not None: return activation(ret) else: return ret def dot_product_attention(q, k, v, bias, dropout_rate=0.0): """Dot-product attention. Args: q: Tensor with shape [..., length_q, depth_k]. k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must match with q. v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match with q. bias: bias Tensor (see attention_bias()) dropout_rate: a float. Returns: Tensor with shape [..., length_q, depth_v]. """ logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1]))) if bias is not None: # `attention_mask` = [B, T] from_shape = get_shape_list(q) if len(from_shape) == 4: broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], 1], tf.float32) elif len(from_shape) == 5: # from_shape = [B, N, Block_num, block_size, depth]# broadcast_ones = tf.ones([from_shape[0], 1, from_shape[2], from_shape[3], 1], tf.float32) bias = tf.matmul(broadcast_ones, tf.cast(bias, tf.float32), transpose_b=True) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. adder = (1.0 - bias) * -10000.0 # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. logits += adder else: adder = 0.0 attention_probs = tf.nn.softmax(logits, name="attention_probs") attention_probs = dropout(attention_probs, dropout_rate) return tf.matmul(attention_probs, v) def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, batch_size=None, from_seq_length=None, to_seq_length=None): """Performs multi-headed attention from `from_tensor` to `to_tensor`. Args: from_tensor: float Tensor of shape [batch_size, from_seq_length, from_width]. to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. query_act: (optional) Activation function for the query transform. key_act: (optional) Activation function for the key transform. value_act: (optional) Activation function for the value transform. attention_probs_dropout_prob: (optional) float. Dropout probability of the attention probabilities. initializer_range: float. Range of the weight initializer. batch_size: (Optional) int. If the input is 2D, this might be the batch size of the 3D version of the `from_tensor` and `to_tensor`. from_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `from_tensor`. to_seq_length: (Optional) If the input is 2D, this might be the seq length of the 3D version of the `to_tensor`. Returns: float Tensor of shape [batch_size, from_seq_length, num_attention_heads, size_per_head]. Raises: ValueError: Any of the arguments or tensor shapes are invalid. """ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) size_per_head = int(from_shape[2]/num_attention_heads) if len(from_shape) != len(to_shape): raise ValueError( "The rank of `from_tensor` must match the rank of `to_tensor`.") if len(from_shape) == 3: batch_size = from_shape[0] from_seq_length = from_shape[1] to_seq_length = to_shape[1] elif len(from_shape) == 2: if (batch_size is None or from_seq_length is None or to_seq_length is None): raise ValueError( "When passing in rank 2 tensors to attention_layer, the values " "for `batch_size`, `from_seq_length`, and `to_seq_length` " "must all be specified.") # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` # `query_layer` = [B, F, N, H] q = dense_layer_3d(from_tensor, num_attention_heads, size_per_head, create_initializer(initializer_range), query_act, "query") # `key_layer` = [B, T, N, H] k = dense_layer_3d(to_tensor, num_attention_heads, size_per_head, create_initializer(initializer_range), key_act, "key") # `value_layer` = [B, T, N, H] v = dense_layer_3d(to_tensor, num_attention_heads, size_per_head, create_initializer(initializer_range), value_act, "value") q = tf.transpose(q, [0, 2, 1, 3]) k = tf.transpose(k, [0, 2, 1, 3]) v = tf.transpose(v, [0, 2, 1, 3]) if attention_mask is not None: attention_mask = tf.reshape( attention_mask, [batch_size, 1, to_seq_length, 1]) # 'new_embeddings = [B, N, F, H]' new_embeddings = dot_product_attention(q, k, v, attention_mask, attention_probs_dropout_prob) return tf.transpose(new_embeddings, [0, 2, 1, 3]) def attention_ffn_block(layer_input, hidden_size=768, attention_mask=None, num_attention_heads=1, attention_head_size=64, attention_probs_dropout_prob=0.0, intermediate_size=3072, intermediate_act_fn=None, initializer_range=0.02, hidden_dropout_prob=0.0): """A network with attention-ffn as sub-block. Args: layer_input: float Tensor of shape [batch_size, from_seq_length, from_width]. hidden_size: (optional) int, size of hidden layer. attention_mask: (optional) int32 Tensor of shape [batch_size, from_seq_length, to_seq_length]. The values should be 1 or 0. The attention scores will effectively be set to -infinity for any positions in the mask that are 0, and will be unchanged for positions that are 1. num_attention_heads: int. Number of attention heads. attention_head_size: int. Size of attention head. attention_probs_dropout_prob: float. dropout probability for attention_layer intermediate_size: int. Size of intermediate hidden layer. intermediate_act_fn: (optional) Activation function for the intermediate layer. initializer_range: float. Range of the weight initializer. hidden_dropout_prob: (optional) float. Dropout probability of the hidden layer. Returns: layer output """ with tf.variable_scope("attention_1"): with tf.variable_scope("self"): attention_output = attention_layer( from_tensor=layer_input, to_tensor=layer_input, attention_mask=attention_mask, num_attention_heads=num_attention_heads, attention_probs_dropout_prob=attention_probs_dropout_prob, initializer_range=initializer_range) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. with tf.variable_scope("output"): attention_output = dense_layer_3d_proj( attention_output, hidden_size, attention_head_size, create_initializer(initializer_range), None, name="dense") attention_output = dropout(attention_output, hidden_dropout_prob) attention_output = layer_norm(attention_output + layer_input) with tf.variable_scope("ffn_1"): with tf.variable_scope("intermediate"): intermediate_output = dense_layer_2d( attention_output, intermediate_size, create_initializer(initializer_range), intermediate_act_fn, num_attention_heads=num_attention_heads, name="dense") with tf.variable_scope("output"): ffn_output = dense_layer_2d( intermediate_output, hidden_size, create_initializer(initializer_range), None, num_attention_heads=num_attention_heads, name="dense") ffn_output = dropout(ffn_output, hidden_dropout_prob) ffn_output = layer_norm(ffn_output + attention_output) return ffn_output def transformer_model(input_tensor, attention_mask=None, hidden_size=768, num_hidden_layers=12, num_hidden_groups=12, num_attention_heads=12, intermediate_size=3072, inner_group_num=1, intermediate_act_fn="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, do_return_all_layers=False): """Multi-headed, multi-layer Transformer from "Attention is All You Need". This is almost an exact implementation of the original Transformer encoder. See the original paper: https://arxiv.org/abs/1706.03762 Also see: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py Args: input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, seq_length], with 1 for positions that can be attended to and 0 in positions that should not be. hidden_size: int. Hidden size of the Transformer. num_hidden_layers: int. Number of layers (blocks) in the Transformer. num_hidden_groups: int. Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: int. Number of attention heads in the Transformer. intermediate_size: int. The size of the "intermediate" (a.k.a., feed forward) layer. inner_group_num: int, number of inner repetition of attention and ffn. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. hidden_dropout_prob: float. Dropout probability for the hidden layers. attention_probs_dropout_prob: float. Dropout probability of the attention probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final layer. Returns: float Tensor of shape [batch_size, seq_length, hidden_size], the final hidden layer of the Transformer. Raises: ValueError: A Tensor shape or parameter is invalid. """ if hidden_size % num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (hidden_size, num_attention_heads)) attention_head_size = hidden_size // num_attention_heads input_shape = get_shape_list(input_tensor, expected_rank=3) input_width = input_shape[2] all_layer_outputs = [] if input_width != hidden_size: prev_output = dense_layer_2d( input_tensor, hidden_size, create_initializer(initializer_range), None, name="embedding_hidden_mapping_in") else: prev_output = input_tensor with tf.variable_scope("transformer", reuse=tf.AUTO_REUSE): for layer_idx in range(num_hidden_layers): group_idx = int(layer_idx / num_hidden_layers * num_hidden_groups) with tf.variable_scope("group_%d" % group_idx): with tf.name_scope("layer_%d" % layer_idx): layer_output = prev_output for inner_group_idx in range(inner_group_num): with tf.variable_scope("inner_group_%d" % inner_group_idx): layer_output = attention_ffn_block( layer_output, hidden_size, attention_mask, num_attention_heads, attention_head_size, attention_probs_dropout_prob, intermediate_size, intermediate_act_fn, initializer_range, hidden_dropout_prob) prev_output = layer_output all_layer_outputs.append(layer_output) if do_return_all_layers: return all_layer_outputs else: return all_layer_outputs[-1] def get_shape_list(tensor, expected_rank=None, name=None): """Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. """ if name is None: name = tensor.name if expected_rank is not None: assert_rank(tensor, expected_rank, name) shape = tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: return shape dyn_shape = tf.shape(tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] return shape def reshape_to_matrix(input_tensor): """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError("Input tensor must have at least rank 2. Shape = %s" % (input_tensor.shape)) if ndims == 2: return input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) return output_tensor def reshape_from_matrix(output_tensor, orig_shape_list): """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" if len(orig_shape_list) == 2: return output_tensor output_shape = get_shape_list(output_tensor) orig_dims = orig_shape_list[0:-1] width = output_shape[-1] return tf.reshape(output_tensor, orig_dims + [width]) def assert_rank(tensor, expected_rank, name=None): """Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A tf.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. """ if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = tf.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
[ "tensorflow.get_variable", "numpy.sqrt", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.assert_less_equal", "tensorflow.truncated_normal_initializer", "tensorflow.squeeze", "tensorflow.logging.warn", "tensorflow.to_float", "tensorflow.name_scope", "tensorflow.train.list_variables", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.shape", "tensorflow.pow", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.nn.embedding_lookup", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.sin", "tensorflow.cos", "tensorflow.range", "tensorflow.slice", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.ones", "tensorflow.einsum", "tensorflow.mod", "tensorflow.contrib.layers.layer_norm", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ]
models/albert/modeling.py
[(354, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (361, 'tensorflow.train.list_variables', 'tf.train.list_variables', (['init_checkpoint'], {}), True, 'import tensorflow as tf\n'), (422, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['input_tensor'], {'rate': 'dropout_prob'}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.contrib.layers.layer_norm', 'contrib_layers.layer_norm', ([], {'inputs': 'input_tensor', 'begin_norm_axis': '(-1)', 'begin_params_axis': '(-1)', 'scope': 'name'}), True, 'from tensorflow.contrib import layers as contrib_layers\n'), (441, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'initializer_range'}), True, 'import tensorflow as tf\n'), (520, 'tensorflow.reshape', 'tf.reshape', (['output', '(input_shape[0:-1] + [input_shape[-1] * embedding_size])'], {}), True, 'import tensorflow as tf\n'), (754, 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (780, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""attention_probs"""'}), True, 'import tensorflow as tf\n'), (782, 'tensorflow.matmul', 'tf.matmul', (['attention_probs', 'v'], {}), True, 'import tensorflow as tf\n'), (864, 'tensorflow.transpose', 'tf.transpose', (['q', '[0, 2, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (865, 'tensorflow.transpose', 'tf.transpose', (['k', '[0, 2, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (866, 'tensorflow.transpose', 'tf.transpose', (['v', '[0, 2, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (874, 'tensorflow.transpose', 'tf.transpose', (['new_embeddings', '[0, 2, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (1076, 'tensorflow.shape', 'tf.shape', (['tensor'], {}), True, 'import tensorflow as tf\n'), (1092, 'tensorflow.reshape', 'tf.reshape', (['input_tensor', '[-1, width]'], {}), True, 'import tensorflow as tf\n'), (1106, 'tensorflow.reshape', 'tf.reshape', (['output_tensor', '(orig_dims + [width])'], {}), True, 'import tensorflow as tf\n'), (108, 'six.iteritems', 'six.iteritems', (['json_object'], {}), False, 'import six\n'), (121, 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), False, 'import copy\n'), (178, 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), False, 'import copy\n'), (357, 're.match', 're.match', (['"""^(.*):\\\\d+$"""', 'name'], {}), False, 'import re\n'), (366, 'six.moves.range', 'range', (['num_of_group'], {}), False, 'from six.moves import range\n'), (369, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (504, 'tensorflow.expand_dims', 'tf.expand_dims', (['input_ids'], {'axis': '[-1]'}), True, 'import tensorflow as tf\n'), (512, 'tensorflow.reshape', 'tf.reshape', (['input_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (513, 'tensorflow.one_hot', 'tf.one_hot', (['flat_input_ids'], {'depth': 'vocab_size'}), True, 'import tensorflow as tf\n'), (514, 'tensorflow.matmul', 'tf.matmul', (['one_hot_input_ids', 'embedding_table'], {}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_table', 'input_ids'], {}), True, 'import tensorflow as tf\n'), (579, 'tensorflow.reshape', 'tf.reshape', (['token_type_ids', '[-1]'], {}), True, 'import tensorflow as tf\n'), (580, 'tensorflow.one_hot', 'tf.one_hot', (['flat_token_type_ids'], {'depth': 'token_type_vocab_size'}), True, 'import tensorflow as tf\n'), (581, 'tensorflow.matmul', 'tf.matmul', (['one_hot_ids', 'token_type_table'], {}), True, 'import tensorflow as tf\n'), (582, 'tensorflow.reshape', 'tf.reshape', (['token_type_embeddings', '[batch_size, seq_length, width]'], {}), True, 'import tensorflow as tf\n'), (587, 'tensorflow.assert_less_equal', 'tf.assert_less_equal', (['seq_length', 'max_position_embeddings'], {}), True, 'import tensorflow as tf\n'), (644, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (645, 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""kernel"""', 'shape': '[hidden_size, num_attention_heads * head_size]', 'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (649, 'tensorflow.reshape', 'tf.reshape', (['w', '[hidden_size, num_attention_heads, head_size]'], {}), True, 'import tensorflow as tf\n'), (650, 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""bias"""', 'shape': '[num_attention_heads * head_size]', 'initializer': 'tf.zeros_initializer'}), True, 'import tensorflow as tf\n'), (654, 'tensorflow.reshape', 'tf.reshape', (['b', '[num_attention_heads, head_size]'], {}), True, 'import tensorflow as tf\n'), (655, 'tensorflow.einsum', 'tf.einsum', (['"""BFH,HND->BFND"""', 'input_tensor', 'w'], {}), True, 'import tensorflow as tf\n'), (686, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (687, 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""kernel"""', 'shape': '[num_attention_heads * head_size, hidden_size]', 'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (691, 'tensorflow.reshape', 'tf.reshape', (['w', '[num_attention_heads, head_size, hidden_size]'], {}), True, 'import tensorflow as tf\n'), (692, 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""bias"""', 'shape': '[hidden_size]', 'initializer': 'tf.zeros_initializer'}), True, 'import tensorflow as tf\n'), (694, 'tensorflow.einsum', 'tf.einsum', (['"""BFND,NDH->BFH"""', 'input_tensor', 'w'], {}), True, 'import tensorflow as tf\n'), (724, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (725, 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""kernel"""', 'shape': '[hidden_size, output_size]', 'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (729, 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""bias"""', 'shape': '[output_size]', 'initializer': 'tf.zeros_initializer'}), True, 'import tensorflow as tf\n'), (731, 'tensorflow.einsum', 'tf.einsum', (['"""BFH,HO->BFO"""', 'input_tensor', 'w'], {}), True, 'import tensorflow as tf\n'), (868, 'tensorflow.reshape', 'tf.reshape', (['attention_mask', '[batch_size, 1, to_seq_length, 1]'], {}), True, 'import tensorflow as tf\n'), (911, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention_1"""'], {}), True, 'import tensorflow as tf\n'), (933, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ffn_1"""'], {}), True, 'import tensorflow as tf\n'), (1024, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""transformer"""'], {'reuse': 'tf.AUTO_REUSE'}), True, 'import tensorflow as tf\n'), (1025, 'six.moves.range', 'range', (['num_hidden_layers'], {}), False, 'from six.moves import range\n'), (115, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['json_file', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (117, 'json.loads', 'json.loads', (['text'], {}), False, 'import json\n'), (188, 'tensorflow.ones', 'tf.ones', ([], {'shape': '[batch_size, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[batch_size, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'default_name': '"""bert"""'}), True, 'import tensorflow as tf\n'), (391, 'six.moves.range', 'range', (['(1)', 'num_of_group'], {}), False, 'from six.moves import range\n'), (466, 'tensorflow.to_float', 'tf.to_float', (['num_timescales'], {}), True, 'import tensorflow as tf\n'), (470, 'tensorflow.to_float', 'tf.to_float', (['position'], {}), True, 'import tensorflow as tf\n'), (471, 'tensorflow.expand_dims', 'tf.expand_dims', (['inv_timescales', '(0)'], {}), True, 'import tensorflow as tf\n'), (472, 'tensorflow.sin', 'tf.sin', (['scaled_time'], {}), True, 'import tensorflow as tf\n'), (472, 'tensorflow.cos', 'tf.cos', (['scaled_time'], {}), True, 'import tensorflow as tf\n'), (588, 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[assert_op]'], {}), True, 'import tensorflow as tf\n'), (602, 'tensorflow.slice', 'tf.slice', (['full_position_embeddings', '[0, 0]', '[seq_length, -1]'], {}), True, 'import tensorflow as tf\n'), (610, 'six.moves.range', 'range', (['(num_dims - 2)'], {}), False, 'from six.moves import range\n'), (613, 'tensorflow.reshape', 'tf.reshape', (['position_embeddings', 'position_broadcast_shape'], {}), True, 'import tensorflow as tf\n'), (760, 'tensorflow.ones', 'tf.ones', (['[from_shape[0], 1, from_shape[2], 1]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (767, 'tensorflow.cast', 'tf.cast', (['bias', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (912, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""self"""'], {}), True, 'import tensorflow as tf\n'), (923, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {}), True, 'import tensorflow as tf\n'), (934, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""intermediate"""'], {}), True, 'import tensorflow as tf\n'), (1132, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embeddings"""'], {}), True, 'import tensorflow as tf\n'), (219, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pooler"""'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.squeeze', 'tf.squeeze', (['self.sequence_output[:, 0:1, :]'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (367, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (473, 'tensorflow.mod', 'tf.mod', (['channels', '(2)'], {}), True, 'import tensorflow as tf\n'), (763, 'tensorflow.ones', 'tf.ones', (['[from_shape[0], 1, from_shape[2], from_shape[3], 1]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (942, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output"""'], {}), True, 'import tensorflow as tf\n'), (1027, 'tensorflow.variable_scope', 'tf.variable_scope', (["('group_%d' % group_idx)"], {}), True, 'import tensorflow as tf\n'), (308, 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), True, 'import numpy as np\n'), (377, 'six.ensure_str', 'six.ensure_str', (['name'], {}), False, 'import six\n'), (396, 'tensorflow.logging.info', 'tf.logging.info', (['"""%s belongs to %dth"""', 'name', 'gid'], {}), True, 'import tensorflow as tf\n'), (403, 'six.ensure_str', 'six.ensure_str', (['name'], {}), False, 'import six\n'), (468, 'tensorflow.range', 'tf.range', (['num_timescales'], {}), True, 'import tensorflow as tf\n'), (1028, 'tensorflow.name_scope', 'tf.name_scope', (["('layer_%d' % layer_idx)"], {}), True, 'import tensorflow as tf\n'), (1030, 'six.moves.range', 'range', (['inner_group_num'], {}), False, 'from six.moves import range\n'), (375, 'six.ensure_str', 'six.ensure_str', (['name'], {}), False, 'import six\n'), (380, 'six.ensure_str', 'six.ensure_str', (['name'], {}), False, 'import six\n'), (386, 'tensorflow.logging.warn', 'tf.logging.warn', (['"""name %s does not get matched"""', 'name'], {}), True, 'import tensorflow as tf\n'), (308, 'tensorflow.pow', 'tf.pow', (['x', '(3)'], {}), True, 'import tensorflow as tf\n'), (378, 'six.ensure_str', 'six.ensure_str', (['name'], {}), False, 'import six\n'), (384, 'six.ensure_str', 'six.ensure_str', (['name'], {}), False, 'import six\n'), (1031, 'tensorflow.variable_scope', 'tf.variable_scope', (["('inner_group_%d' % inner_group_idx)"], {}), True, 'import tensorflow as tf\n'), (381, 'six.ensure_str', 'six.ensure_str', (['name'], {}), False, 'import six\n')]
spyysalo/crf-test
7f5364e7ec7ae55a67b4721293c91d1f9a8cc28a
#!/usr/bin/env python from __future__ import print_function import numpy as np import collections from keras import backend as K def ones(shape, dtype=K.floatx()): """Return all-ones tensor of given shape and type.""" # As of Keras version 1.1.0, Keras ones() requires integer values # in shape (e.g. calling np.ones() with the Theano backend) and # thus can't be called with tensor values. This version avoids the # issue by using the backend ones() instead. if K.backend() == 'theano': from theano import tensor as T return T.ones(shape, dtype) else: assert K.backend() == 'tensorflow' import tensorflow as tf return tf.ones(shape, dtype) def zeros(shape, dtype=K.floatx()): """Return all-zeros tensor of given shape and type.""" # As of Keras version 1.1.0, Keras zeros() requires integer values # in shape (e.g. calling np.zeros() with the Theano backend) and # thus can't be called with tensor values. This version avoids the # issue by using the backend zeros() instead. if K.backend() == 'theano': from theano import tensor as T return T.zeros(shape, dtype) else: assert K.backend() == 'tensorflow' import tensorflow as tf return tf.zeros(shape, dtype) def values(value, shape, dtype=K.floatx()): """Return tensor of given shape and type filled with given value.""" return value * ones(shape, dtype) # or zeros() + ? def meshgrid(i, j, indexing='ij'): """Return matrices broadcasting indices on a 2d grid. This is a partial backend-independent version of TensorFlow meshgrid() (https://www.tensorflow.org/api_docs/python/array_ops.html#meshgrid) with matrix indexing. """ if K.ndim(i) != 1 or K.ndim(j) != 1: raise ValueError('need ndim() == 1') if K.backend() == 'tensorflow': import tensorflow as tf I, J = tf.meshgrid(i, j, indexing=indexing) else: assert K.backend() == 'theano' from theano import tensor as T I = T.repeat(i, K.shape(j)[0]) J = T.tile(j, K.shape(i)[0]) shape = (K.shape(i)[0], K.shape(j)[0]) return K.reshape(I, shape), K.reshape(J, shape) def one_hot(a, size=None, dtype=np.int32): """Return one-hot representation of given tensor or numpy array.""" # http://stackoverflow.com/a/37323404 if isinstance(a, np.ndarray): if size is None: size = a.max() + 1 return np.eye(size, dtype=dtype)[a] else: if size is None: raise NotImplementedError() return K.eye(size, dtype)[a] def unique(iterable): """Return unique values from iterable.""" seen = set() return [i for i in iterable if not (i in seen or seen.add(i))] def arange(start, stop=None, dtype=None): """Keras backend-independent range for tensor values.""" if stop is None: start, stop = 0, start if K.backend() == 'theano': from theano import tensor as T range_ = T.arange(start, stop) else: assert K.backend() == 'tensorflow' import tensorflow as tf range_ = tf.range(start, stop) if dtype is not None: range_ = K.cast(range_, dtype=dtype) return range_ def ndim(a): """Return the number of dimensions in a tensor or numpy array.""" if isinstance(a, np.ndarray): return a.ndim else: return K.ndim(a) def zeros_like(a): """Return array of zeros with shape of given tensor or numpy array.""" if isinstance(a, np.ndarray): return np.zeros_like(a) else: return K.zeros_like(a) def check_ndim(a, d): """Check that number of dimensions in a is d, raise ValueError otherwise.""" if ndim(a) != d: raise ValueError('expected {}d value, got {}d'.format(d, ndim(a))) def normalize_and_check_ndim(values, d): """Convert Python Sequences to numpy array and check that the number of dimensions in each given value matches d. """ def normalize(a): if isinstance(a, collections.Sequence): return np.asarray(a) else: return a values = [normalize(v) for v in values] for v in values: check_ndim(v, d) return values def outer_product(a, b, batch=False): """Outer product of two vectors. If batch is True, return batchwise outer product. """ if batch: return batch_outer_product(a, b) a, b = normalize_and_check_ndim([a, b], 1) # The outer product is equivalent to matrix multiplication a * b # where the vector a is interpreted as a column matrix and the # vector b as a row matrix. The following reshaping and # multiplication accomplishes this. return a[:, np.newaxis] * b[np.newaxis, :] def batch_outer_product(a, b): """Batchwise outer product of pairs of vectors. Expects two 2d tensors of shapes (b, m) and (b, n) and returns a 3d tensor of shape (b, m, n) where each of the (m, n) submatrices is the outer product of corresponding vectors. """ a, b = normalize_and_check_ndim([a, b], 2) # This is a batchwise version of the matrix multiplication approach # used for outer_product(), see explanation there. return a[:, :, np.newaxis] * b[:, np.newaxis, :] def outer_sum(a, b, batch=False): """\"Outer sum" of two vectors. If batch is True, return batchwise outer sum. """ if batch: return batch_outer_sum(a, b) # TODO: naming. Surely this has to be called something sensible? a, b = normalize_and_check_ndim([a, b], 1) # Due to broadcasting, this sum works analogously to matrix # multiplication. See also comments in outer_product(). return a[:, np.newaxis] + b[np.newaxis, :] def batch_outer_sum(a, b): """Batchwise "outer sum" of pairs of vectors. Expects two 2d tensors of shapes (b, m) and (b, n) and returns a 3d tensor of shape (b, m, n) where each of the (m, n) submatrices is the "outer sum" of corresponding vectors. """ a, b = normalize_and_check_ndim([a, b], 2) # Due to broadcasting, this sum works analogously to batch matrix # multiplication. See also comments in batch_outer_product(). return a[:, :, np.newaxis] + b[:, np.newaxis, :] def logsumexp(x, axis=None): """Return the log of the sum of exponentials of elements of x. Preserves numerical precision around the maximum value by initially subtracting and finally adding back in the max. See e.g. https://en.wikipedia.org/wiki/LogSumExp , http://math.stackexchange.com/a/648606 . """ xmax = K.max(x, axis=axis, keepdims=True) xmax_ = K.max(x, axis=axis) return xmax_ + K.log(K.sum(K.exp(x - xmax), axis=axis)) def multi_index(t, indices): """Return t[indices] where indices is a sequence. This Implements a subset of "fancy indexing" operations such as indexing with a tuple (e.g. t[idx1, idx2]) in a way that is transparent to the choice of Keras backend. This is needed because still as of version 0.11, TensorFlow doesn't fully support Numpy/Theano-like advanced indexing (see https://github.com/tensorflow/tensorflow/issues/206, https://github.com/tensorflow/tensorflow/issues/418, https://github.com/tensorflow/tensorflow/issues/4638). """ if K._BACKEND == 'theano': return t[tuple(indices)] #from operator import getitem # Use native Theano indexing. #return getitem(t, tuple(indices)) # Equivalent to t[indices]. else: return _tf_multi_index(t, indices) def _tf_multi_index(t, indices): """Partial TensorFlow implementation of Theano t[indices].""" # Note: this is far from a full implementation of Theano fancy # indexing, use with care. assert K._BACKEND == 'tensorflow' from collections import Sequence import tensorflow as tf if not isinstance(indices, Sequence): raise ValueError(indices) if len(indices) == 1: return tf.gather(t, indices[0]) # gather() suffices for 1d if K.ndim(t) == len(indices): # Index n-dimensional tensor with n indices: pack the indices # from e.g. [[i_0, i_1, ...] [j_0, j_1, ...]] to [[i_0, j_0], # [i_1, j_1], ...] and use gather_nd() # (https://www.tensorflow.org/api_docs/python/array_ops.html#gather_nd) # TODO: check that all i in indices have ndim n-1 # TODO: support broadcasting for numpy arrays with np.broadcast_to() #indices = tf.pack(list(indices), axis=len(indices)-1) indices = tf.pack(list(indices), axis=-1) # indices = tf.Print(indices, [indices], 'indices', summarize=100) return tf.gather_nd(t, indices) else: raise NotImplementedError('index {} with {}'.format(t, indices)) def _test(): # Self-tests. TODO: rewrite using proper testing framework. u = [1, 2, 4] v = [1, 10, 100] assert np.array_equal(outer_product(u, v), np.outer(u, v)) # Keras tests of outer_product and outer_sum u = K.placeholder(ndim=1) v = K.placeholder(ndim=1) p = outer_product(u, v) s = outer_sum(u, v) fp = K.function([u, v], [p]) fs = K.function([u, v], [s]) x = [1, 2, 4] y = [1, 10, 100] r = fp([x,y])[0] print('outer product: {} x {} = {}'.format(x, y, r)) r = fs([x,y])[0] print('outer sum: {} (+) {} = {}'.format(x, y, r)) # Keras test of batch_outer_product bu = K.placeholder(ndim=2) bv = K.placeholder(ndim=2) bp = batch_outer_product(bu, bv) bs = batch_outer_sum(bu, bv) bpf = K.function([bu, bv], [bp]) bsf = K.function([bu, bv], [bs]) bx = [[1, 2, 4], [2, 4, 8]] by = [[1, 10, 100], [1, 10, 100]] br = bpf([bx, by])[0] print('batch outer product: {} x {} = {}'.format(bx, by, br)) br = bsf([bx, by])[0] print('batch outer sum: {} (+) {} = {}'.format(bx, by, br)) # TODO: test multi_index() if __name__ == '__main__': _test()
[ "tensorflow.gather_nd", "tensorflow.range", "tensorflow.zeros", "numpy.asarray", "numpy.eye", "tensorflow.ones", "tensorflow.gather", "numpy.zeros_like", "tensorflow.meshgrid", "numpy.outer" ]
utils.py
[(11, 'keras.backend.floatx', 'K.floatx', ([], {}), True, 'from keras import backend as K\n'), (26, 'keras.backend.floatx', 'K.floatx', ([], {}), True, 'from keras import backend as K\n'), (41, 'keras.backend.floatx', 'K.floatx', ([], {}), True, 'from keras import backend as K\n'), (203, 'keras.backend.max', 'K.max', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), True, 'from keras import backend as K\n'), (204, 'keras.backend.max', 'K.max', (['x'], {'axis': 'axis'}), True, 'from keras import backend as K\n'), (264, 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(1)'}), True, 'from keras import backend as K\n'), (265, 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(1)'}), True, 'from keras import backend as K\n'), (268, 'keras.backend.function', 'K.function', (['[u, v]', '[p]'], {}), True, 'from keras import backend as K\n'), (269, 'keras.backend.function', 'K.function', (['[u, v]', '[s]'], {}), True, 'from keras import backend as K\n'), (278, 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), True, 'from keras import backend as K\n'), (279, 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), True, 'from keras import backend as K\n'), (282, 'keras.backend.function', 'K.function', (['[bu, bv]', '[bp]'], {}), True, 'from keras import backend as K\n'), (283, 'keras.backend.function', 'K.function', (['[bu, bv]', '[bs]'], {}), True, 'from keras import backend as K\n'), (17, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (19, 'theano.tensor.ones', 'T.ones', (['shape', 'dtype'], {}), True, 'from theano import tensor as T\n'), (23, 'tensorflow.ones', 'tf.ones', (['shape', 'dtype'], {}), True, 'import tensorflow as tf\n'), (32, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (34, 'theano.tensor.zeros', 'T.zeros', (['shape', 'dtype'], {}), True, 'from theano import tensor as T\n'), (38, 'tensorflow.zeros', 'tf.zeros', (['shape', 'dtype'], {}), True, 'import tensorflow as tf\n'), (55, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (57, 'tensorflow.meshgrid', 'tf.meshgrid', (['i', 'j'], {'indexing': 'indexing'}), True, 'import tensorflow as tf\n'), (64, 'keras.backend.reshape', 'K.reshape', (['I', 'shape'], {}), True, 'from keras import backend as K\n'), (64, 'keras.backend.reshape', 'K.reshape', (['J', 'shape'], {}), True, 'from keras import backend as K\n'), (90, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (92, 'theano.tensor.arange', 'T.arange', (['start', 'stop'], {}), True, 'from theano import tensor as T\n'), (96, 'tensorflow.range', 'tf.range', (['start', 'stop'], {}), True, 'import tensorflow as tf\n'), (98, 'keras.backend.cast', 'K.cast', (['range_'], {'dtype': 'dtype'}), True, 'from keras import backend as K\n'), (107, 'keras.backend.ndim', 'K.ndim', (['a'], {}), True, 'from keras import backend as K\n'), (113, 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), True, 'import numpy as np\n'), (115, 'keras.backend.zeros_like', 'K.zeros_like', (['a'], {}), True, 'from keras import backend as K\n'), (241, 'tensorflow.gather', 'tf.gather', (['t', 'indices[0]'], {}), True, 'import tensorflow as tf\n'), (242, 'keras.backend.ndim', 'K.ndim', (['t'], {}), True, 'from keras import backend as K\n'), (252, 'tensorflow.gather_nd', 'tf.gather_nd', (['t', 'indices'], {}), True, 'import tensorflow as tf\n'), (261, 'numpy.outer', 'np.outer', (['u', 'v'], {}), True, 'import numpy as np\n'), (21, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (36, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (53, 'keras.backend.ndim', 'K.ndim', (['i'], {}), True, 'from keras import backend as K\n'), (53, 'keras.backend.ndim', 'K.ndim', (['j'], {}), True, 'from keras import backend as K\n'), (59, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (63, 'keras.backend.shape', 'K.shape', (['i'], {}), True, 'from keras import backend as K\n'), (63, 'keras.backend.shape', 'K.shape', (['j'], {}), True, 'from keras import backend as K\n'), (73, 'numpy.eye', 'np.eye', (['size'], {'dtype': 'dtype'}), True, 'import numpy as np\n'), (77, 'keras.backend.eye', 'K.eye', (['size', 'dtype'], {}), True, 'from keras import backend as K\n'), (94, 'keras.backend.backend', 'K.backend', ([], {}), True, 'from keras import backend as K\n'), (130, 'numpy.asarray', 'np.asarray', (['a'], {}), True, 'import numpy as np\n'), (61, 'keras.backend.shape', 'K.shape', (['j'], {}), True, 'from keras import backend as K\n'), (62, 'keras.backend.shape', 'K.shape', (['i'], {}), True, 'from keras import backend as K\n'), (205, 'keras.backend.exp', 'K.exp', (['(x - xmax)'], {}), True, 'from keras import backend as K\n')]
coffeeshaychildren/master-computing-upload
e9352d0d52f40ef022c74ae01ca9e03395bdf860
# Copyright 2019, The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing utilities for the `tensor_encoding` package. This file contains: * Base test class for testing implementations of the `EncodingStageInterface`. * Example implementations of the `EncodingStageInterface`. These example implementations are used to test the base test class, and the `Encoder` class. * Other utilities useful for testing. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections from absl.testing import parameterized import numpy as np import six from six.moves import range import tensorflow as tf from tensorflow_model_optimization.python.core.internal.tensor_encoding.core import encoding_stage from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils DEFAULT_RTOL = 1e-05 DEFAULT_ATOL = 1e-05 # Named tuple containing the values summarizing the results for a single # evaluation of an EncodingStageInterface or an AdaptiveEncodingStageInterface. TestData = collections.namedtuple( 'TestData', [ 'x', # The input provided to encoding. 'encoded_x', # A dictionary of values representing the encoded input x. 'decoded_x', # Decoded value. Has the same shape as x. # The fields below are only relevant for AdaptiveEncodingStageInterface, # and will not be populated while testing an EncodingStageInterface. 'initial_state', # Initial state used for encoding. 'state_update_tensors', # State update tensors created by encoding. 'updated_state', # Updated state after encoding. ]) # Set the dafault values to be None, to enable use of TestData while testing # EncodingStageInterface, without needing to be aware of the other fields. TestData.__new__.__defaults__ = (None,) * len(TestData._fields) # This metaclass enables adding abc.ABCMeta metaclass to a class inheriting from # parameterized.TestCase. class ParameterizedABCMeta(abc.ABCMeta, parameterized.TestGeneratorMetaclass): pass @six.add_metaclass(ParameterizedABCMeta) class BaseEncodingStageTest(tf.test.TestCase, parameterized.TestCase): """Abstract base class for testing encoding stage implementations. Tests for each implementation of `EncodingStageInterface` and `AdaptiveEncodingStageInterface` should implement this class, and add additional tests specific to the behavior of the tested implementation. This class contains basic tests, which every implementation of `EncodingStageInterface` is expected to pass, and it contains a set of utilities for testing. In particular, the `test_one_to_many_encode_decode` and `test_many_to_one_encode_decode` methods ensure the implementation does not assume something that is not possible in scenarios where the class is meant to be used. """ # ----------------- # Abstract methods # ----------------- @abc.abstractproperty def is_lossless(self): """Returns True if the encoding stage is lossless. That is, if the `EncodingStageInterface` returned by `default_encoding_stage` is such that encoding and decoding amounts to an identity. This property is used to determine whether to perform additional checks in the test methods. """ @abc.abstractmethod def default_encoding_stage(self): """Provides a default constructor for an encoding stage. This is used for tests in the base class, which every implementation of `EncodingStageInterface` is expected to pass. Returns: An instance of a concrete `EncodingStageInterface` to be tested. """ @abc.abstractmethod def default_input(self): """Provides a default input for testing the encoding. This is used for tests in the base class, which every implementation of EncodingStageInterface is expected to pass. The `shape` of the returned `Tensor` must be statically known. Returns: A `Tensor` object to be used as default testing input for encoding. """ @abc.abstractmethod def common_asserts_for_test_data(self, data): """A collection of assertions for the results of encoding and decoding. This method takes a `TestData` object and evaluates any user provided expectations on the values. This method is used in multiple test methods and should not use TensorFlow in any way, only perform the assertions. Args: data: A `TestData` tuple containing numpy values with results to be evaluated. """ # ------------- # Test methods # ------------- def test_default_encoding_stage(self): """Tests the correctness of `default_encoding_stage`.""" stage = self.default_encoding_stage() self.assertIsInstance(stage, (encoding_stage.EncodingStageInterface, encoding_stage.AdaptiveEncodingStageInterface)) # Calling the method again should create a new instance. new_stage = self.default_encoding_stage() self.assertIsNot(stage, new_stage) def test_encoding_stage_constructor_does_not_modify_graph(self): """Tests that the constructor of encoding stage does not modify graph.""" graph_def = tf.get_default_graph().as_graph_def() self.default_encoding_stage() new_graph_def = tf.get_default_graph().as_graph_def() tf.test.assert_equal_graph_def(graph_def, new_graph_def) def test_encoding_stage_name(self): """Tests that the `name` property returns a string.""" stage = self.default_encoding_stage() self.assertIsInstance(stage.name, str) def test_default_input_is_tensor_with_fully_defined_shape(self): """Tests that `default_input` returns a `Tesnor` of fully defined shape.""" x = self.default_input() self.assertIsInstance(x, tf.Tensor) self.assertTrue(x.shape.is_fully_defined()) def test_basic_encode_decode(self): """Tests the core functionality. This test method uses the default encoding stage and default input, executes encoding and decoding in the context of the same graph, and finally performs custom asserts on the resulting data. """ # Get Tensors representing the encoded and decoded values and perform # generic type assertions. x = self.default_input() stage = self.default_encoding_stage() if is_adaptive_stage(stage): state = stage.initial_state() encode_params, decode_params = stage.get_params(state) encoded_x, decoded_x, state_update_tensors = self.encode_decode_x( stage, x, encode_params, decode_params) updated_state = stage.update_state(state, state_update_tensors) test_data = TestData(x, encoded_x, decoded_x, state, state_update_tensors, updated_state) else: encode_params, decode_params = stage.get_params() encoded_x, decoded_x = self.encode_decode_x(stage, x, encode_params, decode_params) test_data = TestData(x, encoded_x, decoded_x) self.generic_asserts(test_data, stage) # Evaluate the Tensors and get numpy values. test_data = self.evaluate_test_data(test_data) if self.is_lossless: self.assertAllClose( test_data.x, test_data.decoded_x, rtol=DEFAULT_RTOL, atol=DEFAULT_ATOL) self.common_asserts_for_test_data(test_data) def test_one_to_many_encode_decode(self): """Tests the core functionality in the 'one-to-many' case. This method tests that the implementation can be used in a setting, where the encoding happens in one location, decoding happens in anohter location, and communication between these happens outside of TensorFlow. In particular, this ensures that the implementation does not create something incompatible with the use case, such as creating a TensorFlow state during encoding, and accessing it during decoding. """ # This just delegates to a utility, which can be used if the same needs to # be tested with an input Tensor of specific properties, such as statically # unknown shape, potentially with addional assertions. test_data = self.run_one_to_many_encode_decode( self.default_encoding_stage(), self.default_input) self.common_asserts_for_test_data(test_data) def test_many_to_one_encode_decode(self): """Tests the core functionality in the 'many-to-one' case. This method tests that the implementation can be used in a setting, where the parameters are created in on location, communicated to a number of other locations, where different inputs are encoded, and decoding happens in the original location. The communication between these happens outside of TensorFlow. In particular, this ensures that the implementation does not create something incompatible with the use case, such as creating a TensorFlow state during encoding, and accessing it during decoding. """ stage = self.default_encoding_stage() input_values = self.evaluate([self.default_input() for _ in range(3)]) server_test_data, decode_params = self.run_many_to_one_encode_decode( stage, input_values) if self.is_lossless: self.assertAllClose( np.sum([d.x for d in server_test_data], axis=0), np.sum([d.decoded_x for d in server_test_data], axis=0), rtol=DEFAULT_RTOL, atol=DEFAULT_ATOL) if stage.commutes_with_sum: self.assert_commutes_with_sum(server_test_data, stage, decode_params, input_values[0].shape) self.asserts_for_test_many_to_one_encode_decode(server_test_data) # ------------------ # Testing utilities # ------------------ def encode_decode_x(self, stage, x, encode_params, decode_params): """Given params, encodes and decodes input `Tensor`. Args: stage: An `EncodingStageInterface` or an `AdaptiveEncodingStageInterface` to be used for encoding and decoding. x: A `Tensor` to be encoded and decoded. encode_params: Parameters to be provided to `stage.encode` decode_params: Parameters to be provided to `stage.decode` Returns: A tuple (encoded_x, decoded_x) if `stage` is an `EncodingStageInterface`, or a tuple (encoded_x, decoded_x, state_update_tensors) if `stage` is an `AdaptiveEncodingStageInterface`, where these are: encoded_x: A dictionary of `Tensor` objects representing the encoded input `x`. decoded_x: A single `Tensor`, representing decoded `encoded_x`. state_update_tensors: A dictionary of `Tensor` objects representing the information necessary for updating the state. """ if is_adaptive_stage(stage): encoded_x, state_update_tensors = stage.encode(x, encode_params) else: encoded_x = stage.encode(x, encode_params) shape = None if stage.decode_needs_input_shape: shape = py_utils.static_or_dynamic_shape(x) decoded_x = stage.decode(encoded_x, decode_params, shape=shape) if is_adaptive_stage(stage): return encoded_x, decoded_x, state_update_tensors else: return encoded_x, decoded_x def run_one_to_many_encode_decode(self, stage, input_fn, state=None): """Runs encoding and decoding in the one-to-many setting. This method creates the input `Tensor` in the context of one graph, creates and evaluates the encoded structure, along with `decode_params`. These are used as Python constants in another graph to create and evaluate decoding. The need for `input_fn`, as opposed to a simple numpy constant, is because some stages need to work with `Tensor` objects that do not have statically known shape. Such `Tensor` needs to be created in the context of the graph in which it is to be evaluated, that is, inside of this method. Args: stage: An `EncodingStageInterface` or an `AdaptiveEncodingStageInterface` to be used for encoding. input_fn: A callable object without arguments that creates and returns a `Tensor` or numpy value to be used for encoding. state: A dictionary representing the state. Can be set only if `stage` is an `AdaptiveEncodingStageInterface`. Returns: A `TestData` tuple containing numpy values representing the results. """ def _adaptive_one_to_many_encode_decode(state): """Implementation of the method for `AdaptiveEncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default(): x = input_fn() shape = py_utils.static_or_dynamic_shape(x) if state is None: state = stage.initial_state() encode_params, decode_params = stage.get_params(state) encoded_x, state_update_tensors = stage.encode(x, encode_params) updated_state = stage.update_state(state, state_update_tensors) # Get all values out of TensorFlow as Python constants. This is a trivial # example of communication happening outside of TensorFlow. with self.session(graph=server_graph): (x, decode_params, encoded_x, state, state_update_tensors, updated_state, shape) = self.evaluate_tf_py_list([ x, decode_params, encoded_x, state, state_update_tensors, updated_state, shape ]) client_graph = tf.Graph() with client_graph.as_default(): decoded_x = stage.decode(encoded_x, decode_params, shape=shape) with self.session(graph=client_graph): decoded_x = self.evaluate(decoded_x) return TestData(x, encoded_x, decoded_x, state, state_update_tensors, updated_state) def _non_adaptive_one_to_many_encode_decode(): """Implementation of the method for `EncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default(): x = input_fn() shape = py_utils.static_or_dynamic_shape(x) encode_params, decode_params = stage.get_params() encoded_x = stage.encode(x, encode_params) # Get all values out of TensorFlow as Python constants. This is a trivial # example of communication happening outside of TensorFlow. with self.session(graph=server_graph): x, decode_params, encoded_x, shape = self.evaluate_tf_py_list( [x, decode_params, encoded_x, shape]) client_graph = tf.Graph() with client_graph.as_default(): decoded_x = stage.decode(encoded_x, decode_params, shape=shape) with self.session(graph=client_graph): decoded_x = self.evaluate(decoded_x) return TestData(x, encoded_x, decoded_x) if is_adaptive_stage(stage): return _adaptive_one_to_many_encode_decode(state) else: assert state is None return _non_adaptive_one_to_many_encode_decode() def run_many_to_one_encode_decode(self, stage, input_values, state=None): """Runs encoding and decoding in the many-to-one setting. This method creates and evaluates the parameters in the context of one graph, which are used to create and evaluate encoding in a new graph for every input value provided. These values are then decoded in the context of the first graph. If the provided `stage` commutes with sum, this is in addition verified. Args: stage: An `EncodingStageInterface` or an `AdaptiveEncodingStageInterface` to be used for encoding. input_values: A list of numpy values to be used for encoding. All must have the same shape. state: A dictionary representing the state. Can be set only if `stage` is an `AdaptiveEncodingStageInterface`. Returns: A tuple `(server_test_data, decode_params)` where these are: server_test_data: A `list` of `TestData` tuples containing numpy values representing the results of encoding for each element of `input_values`. decode_params: Numpy values of the decode parameters used. These are values that should be used if additional decoding is to be done, such as for `assert_commutes_with_sum`. """ def _adaptive_many_to_one_encode_decode(state): """Implementation of the method for `AdaptiveEncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default(): shape = input_values[0].shape if state is None: state = stage.initial_state() encode_params, decode_params = stage.get_params(state) with self.session(server_graph) as sess: encode_params, decode_params, state = self.evaluate_tf_py_list( [encode_params, decode_params, state], sess) client_test_data = [] for x in input_values: client_graph = tf.Graph() with client_graph.as_default(): encoded_x, state_update_tensors = stage.encode(x, encode_params) with self.session(client_graph): encoded_x, state_update_tensors = self.evaluate( [encoded_x, state_update_tensors]) client_test_data.append( TestData(x, encoded_x, state_update_tensors=state_update_tensors)) server_test_data = [] with server_graph.as_default(): with self.session(server_graph) as sess: for test_data in client_test_data: decoded_x = stage.decode( test_data.encoded_x, decode_params, shape=shape) server_test_data.append( test_data._replace( decoded_x=sess.run(decoded_x), initial_state=state)) # Compute and append the updated state to all TestData objects. all_state_update_tensors = [ d.state_update_tensors for d in server_test_data ] aggregated_state_update_tensors = aggregate_state_update_tensors( stage, all_state_update_tensors) updated_state = sess.run( stage.update_state(state, aggregated_state_update_tensors)) server_test_data = [ d._replace(updated_state=updated_state) for d in server_test_data ] return server_test_data, decode_params def _non_adaptive_many_to_one_encode_decode(): """Implementation of the method for `EncodingStageInterface`.""" server_graph = tf.Graph() with server_graph.as_default(): shape = input_values[0].shape encode_params, decode_params = stage.get_params() with self.session(server_graph) as sess: encode_params, decode_params = self.evaluate_tf_py_list( [encode_params, decode_params], sess) client_test_data = [] for x in input_values: client_graph = tf.Graph() with client_graph.as_default(): encoded_x = stage.encode(x, encode_params) with self.session(client_graph): encoded_x = self.evaluate(encoded_x) client_test_data.append(TestData(x, encoded_x)) server_test_data = [] with server_graph.as_default(): with self.session(server_graph) as sess: for test_data in client_test_data: decoded_x = stage.decode( test_data.encoded_x, decode_params, shape=shape) server_test_data.append( test_data._replace(decoded_x=sess.run(decoded_x))) return server_test_data, decode_params if is_adaptive_stage(stage): return _adaptive_many_to_one_encode_decode(state) else: assert state is None return _non_adaptive_many_to_one_encode_decode() def evaluate_tf_py_list(self, fetches, session=None): """Evaluates only provided `Tensor` objects and returns numpy values. Different from `self.evaluate` or `session.run`, which only takes TensorFlow objects to be evaluated, this method can take a combination of Python and TensorFlow objects, separates them, evaluates only the TensorFlow objects, and merges the resulting numpy values back with the original python values. Args: fetches: A `list` of fetches to be evalutated. session: An optional `tf.Session` object to be used for evaluation, if necessary to explicitly specify. If `None`, the default session will be used. Returns: A list of the same structure as `fetches`, with TensorFlow objects replaced by the result of single call to `self.evaluate` (or `session.run`) with these TensorFlow objects as the input. """ # Split the fetches to two structures. py_fetches, tf_fetches = [], [] placeholder_empty_tuple = () assert isinstance(fetches, list), 'fetches should be a list.' for fetch in fetches: if isinstance(fetch, dict): d_py, d_tf = py_utils.split_dict_py_tf(fetch) py_fetches.append(d_py) tf_fetches.append(d_tf) elif tf.is_tensor(fetch): py_fetches.append(None) tf_fetches.append(fetch) else: py_fetches.append(fetch) # This empty tuple is here as a marker to retain the value from # py_fetches, while keeping the list length same for simplicity of # reconstruction. This is effectively None, but self.evaluate does not # accept None as an input argument. tf_fetches.append(placeholder_empty_tuple) eval_fetches = self.maybe_evaluate(tf_fetches, session) # Merge back the two structures, not containing Tensors. for i, value in enumerate(eval_fetches): if isinstance(value, dict): eval_fetches[i] = py_utils.merge_dicts(value, py_fetches[i]) elif value == placeholder_empty_tuple: eval_fetches[i] = py_fetches[i] return eval_fetches def evaluate_test_data(self, test_data, session=None): """Evaluates a `TestData` object. Args: test_data: A `TestData` namedtuple. session: Optional. A `tf.Session` object in the context of which the evaluation is to happen. Returns: A new `TestData` object with `Tensor` objects in `test_data` replaced by numpy values. Raises: TypeError: If `test_data` is not a `TestData` namedtuple. """ if not isinstance(test_data, TestData): raise TypeError('A TestData object must be provided.') _, data_tf = py_utils.split_dict_py_tf(test_data._asdict()) return test_data._replace(**self.maybe_evaluate(data_tf, session)) def maybe_evaluate(self, fetches, session=None): """Evaluates `fetches`, if containing any `Tensor` objects. Args: fetches: Any nested structure compatible with `tf.nest`. session: Optional. A `tf.Session` object in the context of which the evaluation is to happen. Returns: `fetches` with any `Tensor` objects replaced by numpy values. """ if any((tf.is_tensor(t) for t in tf.nest.flatten(fetches))): if session: fetches = session.run(fetches) else: fetches = self.evaluate(fetches) return fetches def generic_asserts(self, test_data, stage): """Collection of static checks every implementation is expected to satisfy. Args: test_data: A `TestData` tuple. All values should contain `Tensor` objects. stage: An `EncodingStageInterface` that generated the `test_data`. """ # Every key in compressible_tensors_keys should be in encoded_x. for key in stage.compressible_tensors_keys: self.assertIn(key, test_data.encoded_x) # The return structure of encode should only contain Tensor objects, and no # Python constants. for tensor in six.itervalues(test_data.encoded_x): self.assertIsInstance(tensor, tf.Tensor) # With a statically known input shape, the shape of decoded_x should be # statically known. If not statically known, both should be unknown. self.assertEqual(test_data.x.shape, test_data.decoded_x.shape) # The encoding should always return the same dtype as the original dtype. self.assertEqual(test_data.x.dtype, test_data.decoded_x.dtype) # The encoded and decoded Tensors should have appropriate substrings in # their names, as long as the encode or decode methods are not identities. # If they are identities, encoded_x must be a dictionaty with a single key, # mapping to the same Tensor as x or decoded_x, respectively. if (len(test_data.encoded_x) > 1 or test_data.x is not list(test_data.encoded_x.values())[0]): for t in six.itervalues(test_data.encoded_x): self.assertIn(encoding_stage.ENCODE_SCOPE_SUFFIX, t.name) if (len(test_data.encoded_x) > 1 or test_data.decoded_x is not list(test_data.encoded_x.values())[0]): self.assertIn(encoding_stage.DECODE_SCOPE_SUFFIX, test_data.decoded_x.name) if is_adaptive_stage(stage): # The property should have keys matching those of state_update_tensors. self.assertSameElements(stage.state_update_aggregation_modes.keys(), test_data.state_update_tensors.keys()) for mode in six.itervalues(stage.state_update_aggregation_modes): self.assertIn(mode, encoding_stage.StateAggregationMode) for tensor in six.itervalues(test_data.initial_state): self.assertTrue(tf.is_tensor(tensor)) for tensor in six.itervalues(test_data.state_update_tensors): self.assertTrue(tf.is_tensor(tensor)) for tensor in six.itervalues(test_data.updated_state): self.assertTrue(tf.is_tensor(tensor)) # The state related Tensors should have appropriate substrings in their # names. for tensor in six.itervalues(test_data.initial_state): self.assertIn(encoding_stage.INITIAL_STATE_SCOPE_SUFFIX, tensor.name) for tensor in six.itervalues(test_data.updated_state): self.assertIn(encoding_stage.UPDATE_STATE_SCOPE_SUFFIX, tensor.name) for tensor in six.itervalues(test_data.state_update_tensors): self.assertIn(encoding_stage.ENCODE_SCOPE_SUFFIX, tensor.name) def asserts_for_test_many_to_one_encode_decode(self, data): """Additional asserts for `test_many_to_one_encode_decode` method. By default, this method simply calls `common_asserts_for_test_data` on every element of `data`, but can be overridden by an implemented to provide custom or additional checks. Args: data: A `list` of `TestData` tuples containing numpy values to be used for the assertions. """ for d in data: self.common_asserts_for_test_data(d) def assert_commutes_with_sum(self, server_test_data, stage, decode_params, shape=None): """Asserts that provided `EncodingStageInterface` commutes with sum. Given a list of `TestData` namedtuples containing numpy values of input and corresponding encoded and decoded values, makes sure that the sum of the decoded values is the same as first summing encoded values, and then decoding. Args: server_test_data: A `list` of `TestData` namedtuples. stage: An `EncodingStageInterface` object that was used to generate `server_test_data` and is to be used in the assert. decode_params: Parameters to be used for decoding by `stage`. Must be the same values as used for generating `server_test_data`. shape: An optional shape for the `decode` method of `stage`. """ # This assert should be only used with an instance that commutes with sum. assert stage.commutes_with_sum num_summands = len(server_test_data) expected_sum = np.sum([d.decoded_x for d in server_test_data], axis=0) sum_encoded_x = {} for k in server_test_data[0].encoded_x: sum_encoded_x[k] = np.sum([d.encoded_x[k] for d in server_test_data], axis=0) with tf.Graph().as_default(): with self.session() as sess: decode_sum_encoded_x = sess.run( stage.decode(sum_encoded_x, decode_params, num_summands, shape)) self.assertAllClose( expected_sum, decode_sum_encoded_x, rtol=DEFAULT_RTOL, atol=DEFAULT_ATOL) @encoding_stage.tf_style_encoding_stage class PlusOneEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, adding 1. This is the simplest example implementation of an `EncodingStageInterface` - no state, no constructor arguments, no shape information needed for decoding, no commutativity with sum. """ ENCODED_VALUES_KEY = 'p1_values' ADD_PARAM_KEY = 'p1_add' @property def name(self): """See base class.""" return 'plus_one' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = {self.ADD_PARAM_KEY: tf.constant(1.0)} return params, params def encode(self, x, encode_params): """See base class.""" return {self.ENCODED_VALUES_KEY: x + encode_params[self.ADD_PARAM_KEY]} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del shape # Unused. decoded_x = ( encoded_tensors[self.ENCODED_VALUES_KEY] - decode_params[self.ADD_PARAM_KEY]) return decoded_x @encoding_stage.tf_style_encoding_stage class TimesTwoEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, multiplying by 2. This is an example implementation of an `EncodingStageInterface` that commutes with sum. """ ENCODED_VALUES_KEY = 't2_values' FACTOR_PARAM_KEY = 't2_factor' @property def name(self): """See base class.""" return 'times_two' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = {self.FACTOR_PARAM_KEY: tf.constant(2.0)} return params, params def encode(self, x, encode_params): """See base class.""" return {self.ENCODED_VALUES_KEY: x * encode_params[self.FACTOR_PARAM_KEY]} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del shape # Unused. decoded_x = ( encoded_tensors[self.ENCODED_VALUES_KEY] / decode_params[self.FACTOR_PARAM_KEY]) return decoded_x @encoding_stage.tf_style_encoding_stage class SimpleLinearEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, computing a simple linear transformation. This is an example implementation of an `EncodingStageInterface` that can take constructor arguments, which can be both python constants, or `tf.Variable` objects, and subsequently expose those via `encode_params` / `decode_params`. In addition, this is an example when commutativity with sum requires the `num_summands` argument. """ ENCODED_VALUES_KEY = 'sl_values' A_PARAM_KEY = 'sl_a_param' B_PARAM_KEY = 'sl_b_param' def __init__(self, a, b): self._a = a self._b = b @property def name(self): """See base class.""" return 'simple_linear' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = {self.A_PARAM_KEY: self._a, self.B_PARAM_KEY: self._b} return params, params def encode(self, x, encode_params): """See base class.""" a, b = encode_params[self.A_PARAM_KEY], encode_params[self.B_PARAM_KEY] return {self.ENCODED_VALUES_KEY: a * x + b} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del shape # Unused. a, b = decode_params[self.A_PARAM_KEY], decode_params[self.B_PARAM_KEY] if num_summands is not None: shift = b * tf.cast(num_summands, b.dtype) else: shift = b return (encoded_tensors[self.ENCODED_VALUES_KEY] - shift) / a @encoding_stage.tf_style_encoding_stage class ReduceMeanEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, computing a mean and remembering original shape. This is an example implementation of an `EncodingStageInterface` that requires the original shape information for decoding. Note that the encoding does not store the shape in the return structure of the `encode` method. Instead, the shape information will be handled separately by the higher level `Encoder`. """ ENCODED_VALUES_KEY = 'rm_values' @property def name(self): """See base class.""" return 'reduce_mean' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return True def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. return {self.ENCODED_VALUES_KEY: tf.reduce_mean(x, keepdims=True)} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del decode_params # Unused. return tf.tile(encoded_tensors[self.ENCODED_VALUES_KEY], shape) @encoding_stage.tf_style_encoding_stage class RandomAddSubtractOneEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, randomly adding or subtracting 1. This is an example implementation of an `EncodingStageInterface` that is not lossless, but unbiased on expectation. This is a propery of a variety implementations of the interface, and this class serves as an example of how the unbiasedness can be tested. """ ENCODED_VALUES_KEY = 'ras_values' @property def name(self): """See base class.""" return 'random_add_subtract' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. return {self.ENCODED_VALUES_KEY: x + tf.sign(tf.random.normal(tf.shape(x)))} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del decode_params # Unused. del num_summands # Unused. del shape # Unused. return encoded_tensors[self.ENCODED_VALUES_KEY] @encoding_stage.tf_style_encoding_stage class SignIntFloatEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, encoding input into multiple outputs. This is an example implementation of an `EncodingStageInterface` that is losless and splits the input into three components - the integer part, the floating part and the signs. """ ENCODED_SIGNS_KEY = 'sif_signs' ENCODED_INTS_KEY = 'sif_ints' ENCODED_FLOATS_KEY = 'sif_floats' @property def name(self): """See base class.""" return 'sign_int_float' @property def compressible_tensors_keys(self): """See base class.""" return [ self.ENCODED_SIGNS_KEY, self.ENCODED_INTS_KEY, self.ENCODED_FLOATS_KEY ] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" return {}, {} def encode(self, x, encode_params): """See base class.""" del encode_params # Unused. signs = tf.sign(x) abs_vals = tf.abs(x) ints = tf.floor(abs_vals) floats = abs_vals - ints return { self.ENCODED_SIGNS_KEY: signs, self.ENCODED_INTS_KEY: ints, self.ENCODED_FLOATS_KEY: floats } def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del decode_params # Unused. del num_summands # Unused. del shape # Unused. signs = encoded_tensors[self.ENCODED_SIGNS_KEY] ints = encoded_tensors[self.ENCODED_INTS_KEY] floats = encoded_tensors[self.ENCODED_FLOATS_KEY] return signs * (ints + floats) def dummy_rng_source(seed, num_elements): """Dummy TensorFlow random number generator. We need a custom random source, which would be always deterministic given a random seed. That is not currently available available in TensorFlow. This simple function serves an illustrative purpose. It is *not* a useful random number generator, and should only be used in tests. Args: seed: A random seed. num_elements: Number of random values to generate. Returns: A `Tensor` of shape `(num_elements)` containing pseudorandom values. """ def next_num(num): # This creates a cycle of length 136. return tf.mod((num * 13), 137) num = tf.reshape(tf.mod(seed, 136) + 1, (1,)) result = num for _ in range(num_elements - 1): num = next_num(num) result = tf.concat([result, num], 0) return tf.to_float(result) @encoding_stage.tf_style_encoding_stage class PlusRandomNumEncodingStage(encoding_stage.EncodingStageInterface): """[Example] encoding stage, adding random values given a random seed. This is an example implementation of an `EncodingStageInterface` that depends on a shared random seed. The seed `Tensor` should be created in the `get_params` method, and the same values should evantually be passed to both `encode` and `decode` methods, making sure a randomized transform is invertible. """ ENCODED_VALUES_KEY = 'prn_values' SEED_PARAM_KEY = 'prn_seed' @property def name(self): """See base class.""" return 'plus_random_num' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False def get_params(self): """See base class.""" params = { self.SEED_PARAM_KEY: tf.random.uniform((), maxval=tf.int32.max, dtype=tf.int32) } return params, params def encode(self, x, encode_params): """See base class.""" addend = dummy_rng_source(encode_params[self.SEED_PARAM_KEY], x.shape.num_elements()) addend = tf.reshape(addend, x.shape) return {self.ENCODED_VALUES_KEY: x + addend} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" del num_summands # Unused. del shape # Unused. x = encoded_tensors[self.ENCODED_VALUES_KEY] addend = dummy_rng_source(decode_params[self.SEED_PARAM_KEY], x.shape.num_elements()) addend = tf.reshape(addend, x.shape) return x - addend @encoding_stage.tf_style_adaptive_encoding_stage class PlusOneOverNEncodingStage(encoding_stage.AdaptiveEncodingStageInterface): """[Example] adaptive encoding stage, adding 1/N in N-th iteration. This is an example implementation of an `AdaptiveEncodingStageInterface` that modifies state, which controls the creation of params. This is also a simple example of how an `EncodingStageInterface` can be wrapped as an `AdaptiveEncodingStageInterface`, without modifying the wrapped encode and decode methods. """ ENCODED_VALUES_KEY = PlusOneEncodingStage.ENCODED_VALUES_KEY ADD_PARAM_KEY = PlusOneEncodingStage.ADD_PARAM_KEY ITERATION_STATE_KEY = 'pn_iteration' def __init__(self): self._stage = PlusOneEncodingStage() @property def name(self): """See base class.""" return 'plus_one_over_n' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return False @property def decode_needs_input_shape(self): """See base class.""" return False @property def state_update_aggregation_modes(self): """See base class.""" return {} def initial_state(self): """See base class.""" return {self.ITERATION_STATE_KEY: tf.constant(1, dtype=tf.int32)} def update_state(self, state, state_update_tensors): """See base class.""" del state_update_tensors # Unused. return { self.ITERATION_STATE_KEY: state[self.ITERATION_STATE_KEY] + tf.constant(1, dtype=tf.int32) } def get_params(self, state): """See base class.""" params = { self.ADD_PARAM_KEY: 1 / tf.to_float(state[self.ITERATION_STATE_KEY]) } return params, params def encode(self, x, encode_params): """See base class.""" return self._stage.encode(x, encode_params), {} def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" return self._stage.decode(encoded_tensors, decode_params, num_summands, shape) @encoding_stage.tf_style_adaptive_encoding_stage class AdaptiveNormalizeEncodingStage( encoding_stage.AdaptiveEncodingStageInterface): """[Example] encoding stage, adaptively normalizing data. This is an example implementation of an `AdaptiveEncodingStageInterface` that updates the state based on information stored in `state_update_tensors`. This implementation wraps `TimesTwoEncodingStage`, and adaptively changes the parameters that control the `encode` and `decode` methods. It assumes that over iterations, the input values to be encoded come from certain static distribution, and tries to find a good factor to normalize the input to be of unit norm. """ ENCODED_VALUES_KEY = TimesTwoEncodingStage.ENCODED_VALUES_KEY FACTOR_PARAM_KEY = TimesTwoEncodingStage.FACTOR_PARAM_KEY FACTOR_STATE_KEY = 'an_factor' NORM_STATE_UPDATE_KEY = 'an_norm' def __init__(self): self._stage = TimesTwoEncodingStage() @property def name(self): """See base class.""" return 'adaptive_normalize' @property def compressible_tensors_keys(self): """See base class.""" return [self.ENCODED_VALUES_KEY] @property def commutes_with_sum(self): """See base class.""" return True @property def decode_needs_input_shape(self): """See base class.""" return False @property def state_update_aggregation_modes(self): """See base class.""" return { self.NORM_STATE_UPDATE_KEY: encoding_stage.StateAggregationMode.STACK } def initial_state(self): """See base class.""" return {self.FACTOR_STATE_KEY: tf.constant(1.0)} # pylint: disable=g-doc-args,g-doc-return-or-yield def update_state(self, state, state_update_tensors): """Updates the state (see base class). This method illustrates how the implementation can handle state update based on a single encoding, or based on a multiple encodings collectively. As specified by `self.state_update_aggregation_modes`, the `NORM_STATE_UPDATE_KEY` from `state_update_tensors` are to be stacked. That means, that the corresponding input to this method should be a `Tensor` with each element corresponding to a single output of an encoding. So this can be a single element, in the one-to-many setting, or multiple elements, in the many-to-one setting. The `update_state` method thus can compute arbitrary function of the relevant values. In this case, it maintains a rolling average of previous states, where the weight to be used depends on the number of updates received. Note that the specific implementation is not necessarily useful or efficient; it rather serves as an illustration of what can be done. """ num_updates = state_update_tensors[ self.NORM_STATE_UPDATE_KEY].shape.num_elements() norm_mean = tf.reduce_mean(state_update_tensors[self.NORM_STATE_UPDATE_KEY]) weight = 0.9**num_updates # Use a stronger weight for more updates. new_factor = ( weight * state[self.FACTOR_STATE_KEY] + (1 - weight) / norm_mean) return {self.FACTOR_STATE_KEY: new_factor} def get_params(self, state): """See base class.""" params = {self.FACTOR_PARAM_KEY: state[self.FACTOR_STATE_KEY]} return params, params def encode(self, x, encode_params): """See base class.""" return (self._stage.encode(x, encode_params), { self.NORM_STATE_UPDATE_KEY: tf.norm(x) }) def decode(self, encoded_tensors, decode_params, num_summands=None, shape=None): """See base class.""" return self._stage.decode(encoded_tensors, decode_params, num_summands, shape) def get_tensor_with_random_shape(expected_num_elements=10, source_fn=tf.random.uniform): """Returns a 1-D `Tensor` with random shape. The `Tensor` is created by creating a `Tensor` with `2*expected_num_elements` and inlcude each element in the rerurned `Tensor` with probability `0.5`. Thus, the returned `Tensor` has unknown, and non-deterministic shape. Args: expected_num_elements: The number of elements the returned `Tensor` should have on expectation. source_fn: A Python callable that generates values for the returned `Tensor`. Returns: A 1-D `Tensor` with random shape. """ return tf.squeeze( tf.gather( source_fn([2 * expected_num_elements]), tf.where( tf.less(tf.random_uniform([2 * expected_num_elements]), 0.5))), 1) def is_adaptive_stage(stage): """Returns `True` if `stage` is an `AdaptiveEncodingStageInterface`.""" if isinstance(stage, encoding_stage.EncodingStageInterface): assert not isinstance(stage, encoding_stage.AdaptiveEncodingStageInterface) return False elif isinstance(stage, encoding_stage.AdaptiveEncodingStageInterface): return True else: raise TypeError( 'The provided `stage` must be either `EncodingStageInterface` or ' '`AdaptiveEncodingStageInterface`.') def aggregate_state_update_tensors(stage, state_update_tensors): """Aggregates a collection of values for state update. This method in an trivial example of implementation of the aggregation modes, when all the values are available as numpy values simultaneously. Args: stage: An `AdaptiveEncodingStageInterface` object. state_update_tensors: A `list` of `dict` objects, each of which corresponds to `state_update_tensors` generated by the `stage.encode` method. Each dictionary thus needs to have the same structure, corresponding to `stage.state_update_aggregation_modes`, and contain numpy values. Returns: A dictionary of aggregated values. Raises: TypeError: If `stage` is not an `AdaptiveEncodingStageInterface`. """ def _aggregate(values, aggregation_mode): """Aggregates values according to aggregation mode.""" if aggregation_mode == encoding_stage.StateAggregationMode.SUM: return np.sum(np.stack(values), axis=0) elif aggregation_mode == encoding_stage.StateAggregationMode.MAX: return np.amax(np.stack(values), axis=0) elif aggregation_mode == encoding_stage.StateAggregationMode.MIN: return np.amin(np.stack(values), axis=0) elif aggregation_mode == encoding_stage.StateAggregationMode.STACK: return np.stack(values) if not is_adaptive_stage(stage): raise TypeError( 'The provided `stage` must be an `AdaptiveEncodingStageInterface`.') aggregated_state_update_tensors = {} for key, mode in six.iteritems(stage.state_update_aggregation_modes): aggregated_state_update_tensors[key] = _aggregate( [t[key] for t in state_update_tensors], mode) return aggregated_state_update_tensors
[ "tensorflow.sign", "tensorflow.concat", "tensorflow.cast", "tensorflow.get_default_graph", "tensorflow.nest.flatten", "tensorflow.Graph", "tensorflow.floor", "numpy.stack", "tensorflow.to_float", "tensorflow.tile", "tensorflow.norm", "tensorflow.is_tensor", "tensorflow.shape", "tensorflow.random.uniform", "numpy.sum", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.mod", "tensorflow.test.assert_equal_graph_def", "tensorflow.random_uniform", "tensorflow.abs" ]
tensorflow_model_optimization/python/core/internal/tensor_encoding/testing/test_utils.py
[(44, 'collections.namedtuple', 'collections.namedtuple', (['"""TestData"""', "['x', 'encoded_x', 'decoded_x', 'initial_state', 'state_update_tensors',\n 'updated_state']"], {}), False, 'import collections\n'), (67, 'six.add_metaclass', 'six.add_metaclass', (['ParameterizedABCMeta'], {}), False, 'import six\n'), (1048, 'six.moves.range', 'range', (['(num_elements - 1)'], {}), False, 'from six.moves import range\n'), (1051, 'tensorflow.to_float', 'tf.to_float', (['result'], {}), True, 'import tensorflow as tf\n'), (1369, 'six.iteritems', 'six.iteritems', (['stage.state_update_aggregation_modes'], {}), False, 'import six\n'), (155, 'tensorflow.test.assert_equal_graph_def', 'tf.test.assert_equal_graph_def', (['graph_def', 'new_graph_def'], {}), True, 'import tensorflow as tf\n'), (578, 'six.itervalues', 'six.itervalues', (['test_data.encoded_x'], {}), False, 'import six\n'), (662, 'numpy.sum', 'np.sum', (['[d.decoded_x for d in server_test_data]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (903, 'tensorflow.tile', 'tf.tile', (['encoded_tensors[self.ENCODED_VALUES_KEY]', 'shape'], {}), True, 'import tensorflow as tf\n'), (1001, 'tensorflow.sign', 'tf.sign', (['x'], {}), True, 'import tensorflow as tf\n'), (1002, 'tensorflow.abs', 'tf.abs', (['x'], {}), True, 'import tensorflow as tf\n'), (1003, 'tensorflow.floor', 'tf.floor', (['abs_vals'], {}), True, 'import tensorflow as tf\n'), (1044, 'tensorflow.mod', 'tf.mod', (['(num * 13)', '(137)'], {}), True, 'import tensorflow as tf\n'), (1050, 'tensorflow.concat', 'tf.concat', (['[result, num]', '(0)'], {}), True, 'import tensorflow as tf\n'), (1100, 'tensorflow.reshape', 'tf.reshape', (['addend', 'x.shape'], {}), True, 'import tensorflow as tf\n'), (1114, 'tensorflow.reshape', 'tf.reshape', (['addend', 'x.shape'], {}), True, 'import tensorflow as tf\n'), (1270, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['state_update_tensors[self.NORM_STATE_UPDATE_KEY]'], {}), True, 'import tensorflow as tf\n'), (281, 'tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils.static_or_dynamic_shape', 'py_utils.static_or_dynamic_shape', (['x'], {}), False, 'from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils\n'), (315, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (358, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (400, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (446, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (594, 'six.itervalues', 'six.itervalues', (['test_data.encoded_x'], {}), False, 'import six\n'), (605, 'six.itervalues', 'six.itervalues', (['stage.state_update_aggregation_modes'], {}), False, 'import six\n'), (608, 'six.itervalues', 'six.itervalues', (['test_data.initial_state'], {}), False, 'import six\n'), (610, 'six.itervalues', 'six.itervalues', (['test_data.state_update_tensors'], {}), False, 'import six\n'), (612, 'six.itervalues', 'six.itervalues', (['test_data.updated_state'], {}), False, 'import six\n'), (617, 'six.itervalues', 'six.itervalues', (['test_data.initial_state'], {}), False, 'import six\n'), (619, 'six.itervalues', 'six.itervalues', (['test_data.updated_state'], {}), False, 'import six\n'), (621, 'six.itervalues', 'six.itervalues', (['test_data.state_update_tensors'], {}), False, 'import six\n'), (665, 'numpy.sum', 'np.sum', (['[d.encoded_x[k] for d in server_test_data]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (712, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (766, 'tensorflow.constant', 'tf.constant', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (893, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'keepdims': '(True)'}), True, 'import tensorflow as tf\n'), (1046, 'tensorflow.mod', 'tf.mod', (['seed', '(136)'], {}), True, 'import tensorflow as tf\n'), (1092, 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {'maxval': 'tf.int32.max', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (1163, 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (1246, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (242, 'numpy.sum', 'np.sum', (['[d.x for d in server_test_data]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (243, 'numpy.sum', 'np.sum', (['[d.decoded_x for d in server_test_data]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (318, 'tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils.static_or_dynamic_shape', 'py_utils.static_or_dynamic_shape', (['x'], {}), False, 'from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils\n'), (348, 'tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils.static_or_dynamic_shape', 'py_utils.static_or_dynamic_shape', (['x'], {}), False, 'from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils\n'), (412, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (456, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (504, 'tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils.split_dict_py_tf', 'py_utils.split_dict_py_tf', (['fetch'], {}), False, 'from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils\n'), (507, 'tensorflow.is_tensor', 'tf.is_tensor', (['fetch'], {}), True, 'import tensorflow as tf\n'), (522, 'tensorflow_model_optimization.python.core.internal.tensor_encoding.utils.py_utils.merge_dicts', 'py_utils.merge_dicts', (['value', 'py_fetches[i]'], {}), False, 'from tensorflow_model_optimization.python.core.internal.tensor_encoding.utils import py_utils\n'), (558, 'tensorflow.is_tensor', 'tf.is_tensor', (['t'], {}), True, 'import tensorflow as tf\n'), (846, 'tensorflow.cast', 'tf.cast', (['num_summands', 'b.dtype'], {}), True, 'import tensorflow as tf\n'), (1170, 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (1176, 'tensorflow.to_float', 'tf.to_float', (['state[self.ITERATION_STATE_KEY]'], {}), True, 'import tensorflow as tf\n'), (1284, 'tensorflow.norm', 'tf.norm', (['x'], {}), True, 'import tensorflow as tf\n'), (1357, 'numpy.stack', 'np.stack', (['values'], {}), True, 'import numpy as np\n'), (236, 'six.moves.range', 'range', (['(3)'], {}), False, 'from six.moves import range\n'), (558, 'tensorflow.nest.flatten', 'tf.nest.flatten', (['fetches'], {}), True, 'import tensorflow as tf\n'), (609, 'tensorflow.is_tensor', 'tf.is_tensor', (['tensor'], {}), True, 'import tensorflow as tf\n'), (611, 'tensorflow.is_tensor', 'tf.is_tensor', (['tensor'], {}), True, 'import tensorflow as tf\n'), (613, 'tensorflow.is_tensor', 'tf.is_tensor', (['tensor'], {}), True, 'import tensorflow as tf\n'), (667, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (1318, 'tensorflow.random_uniform', 'tf.random_uniform', (['[2 * expected_num_elements]'], {}), True, 'import tensorflow as tf\n'), (1359, 'numpy.stack', 'np.stack', (['values'], {}), True, 'import numpy as np\n'), (945, 'tensorflow.shape', 'tf.shape', (['x'], {}), True, 'import tensorflow as tf\n'), (1361, 'numpy.stack', 'np.stack', (['values'], {}), True, 'import numpy as np\n'), (1363, 'numpy.stack', 'np.stack', (['values'], {}), True, 'import numpy as np\n')]
wolfiex/transform
1a51a522fa23bedc34859035671715cd6b497902
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensorflow_transform.internal.schema_inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # GOOGLE-INITIALIZATION import tensorflow as tf from tensorflow_transform import mappers from tensorflow_transform import schema_inference from tensorflow_transform import test_case from tensorflow_transform.tf_metadata import schema_utils_legacy from tensorflow_transform.tf_metadata import schema_utils from google.protobuf import text_format import unittest from tensorflow_metadata.proto.v0 import schema_pb2 def _make_tensors_with_override(): x = tf.compat.v1.placeholder(tf.int64, (None,)) schema_inference.set_tensor_schema_override(x, tf.constant(5), tf.constant(6)) return {'x': x} class SchemaInferenceTest(test_case.TransformTestCase): # pylint: disable=g-long-lambda @test_case.named_parameters( dict( testcase_name='fixed_len_int', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.int64, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}), dict( testcase_name='fixed_len_string', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.string, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.string)}), dict( testcase_name='fixed_len_float', make_tensors_fn=lambda: {'x': tf.compat.v1.placeholder(tf.float32, (None,))}, feature_spec={'x': tf.io.FixedLenFeature([], tf.float32)}), dict( testcase_name='override', make_tensors_fn=_make_tensors_with_override, feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}, domains={'x': schema_pb2.IntDomain(is_categorical=True)}), dict( testcase_name='override_with_session', make_tensors_fn=_make_tensors_with_override, feature_spec={'x': tf.io.FixedLenFeature([], tf.int64)}, domains={ 'x': schema_pb2.IntDomain(min=5, max=6, is_categorical=True) }, create_session=True)) # pylint: enable=g-long-lambda def test_infer_feature_schema(self, make_tensors_fn, feature_spec, domains=None, create_session=False): with tf.compat.v1.Graph().as_default() as graph: tensors = make_tensors_fn() if create_session: with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(tensors, graph, session) else: schema = schema_inference.infer_feature_schema(tensors, graph) expected_schema = schema_utils.schema_from_feature_spec( feature_spec, domains) self.assertEqual(schema, expected_schema) def test_infer_feature_schema_bad_rank(self): with tf.compat.v1.Graph().as_default() as graph: tensors = { 'a': tf.compat.v1.placeholder(tf.float32, ()), } with self.assertRaises(ValueError): schema_inference.infer_feature_schema(tensors, graph) def test_bucketization_annotation(self): # TODO(b/132098015): Schema annotations aren't yet supported in OSS builds. # pylint: disable=g-import-not-at-top try: from tensorflow_transform import annotations_pb2 except ImportError: return # pylint: enable=g-import-not-at-top with tf.compat.v1.Graph().as_default() as graph: inputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3]), 'bar': tf.convert_to_tensor([0, 2, 0, 2]), } boundaries_foo = tf.expand_dims(tf.convert_to_tensor([.5, 1.5]), axis=0) boundaries_bar = tf.expand_dims(tf.convert_to_tensor([.1, .2]), axis=0) outputs = {} # tft.apply_buckets will annotate the feature in the output schema to # indicate the bucket boundaries that were applied. outputs['Bucketized_foo'] = mappers.apply_buckets(inputs['foo'], boundaries_foo) outputs['Bucketized_bar'] = mappers.apply_buckets(inputs['bar'], boundaries_bar) # Create a session to actually evaluate the annotations and extract the # the output schema with annotations applied. with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session) self.assertLen(schema.feature, 2) for feature in schema.feature: self.assertLen(feature.annotation.extra_metadata, 1) for annotation in feature.annotation.extra_metadata: # Extract the annotated message and validate its contents message = annotations_pb2.BucketBoundaries() annotation.Unpack(message) if feature.name == 'Bucketized_foo': self.assertAllClose(list(message.boundaries), [.5, 1.5]) elif feature.name == 'Bucketized_bar': self.assertAllClose(list(message.boundaries), [.1, .2]) else: raise RuntimeError('Unexpected features in schema') def test_global_annotation(self): # TODO(b/132098015): Schema annotations aren't yet supported in OSS builds. # pylint: disable=g-import-not-at-top try: from tensorflow_transform import annotations_pb2 except ImportError: return # pylint: enable=g-import-not-at-top with tf.compat.v1.Graph().as_default() as graph: outputs = { 'foo': tf.convert_to_tensor([0, 1, 2, 3], dtype=tf.int64), 'bar': tf.convert_to_tensor([0, 2, 0, 2], dtype=tf.int64), } # Annotate an arbitrary proto at the schema level (not sure what global # schema boundaries would mean, but hey I'm just a test). boundaries = tf.constant([[1.0]]) message_type = annotations_pb2.BucketBoundaries.DESCRIPTOR.full_name sizes = tf.expand_dims([tf.size(boundaries)], axis=0) message_proto = tf.raw_ops.EncodeProto( sizes=sizes, values=[tf.cast(boundaries, tf.float32)], field_names=['boundaries'], message_type=message_type)[0] type_url = os.path.join('type.googleapis.com', message_type) schema_inference.annotate(type_url, message_proto) with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session) self.assertLen(schema.annotation.extra_metadata, 1) for annotation in schema.annotation.extra_metadata: # Extract the annotated message and validate its contents message = annotations_pb2.BucketBoundaries() annotation.Unpack(message) self.assertAllClose(list(message.boundaries), [1]) def test_infer_feature_schema_with_ragged_tensor(self): with tf.compat.v1.Graph().as_default() as graph: outputs = { 'foo': tf.RaggedTensor.from_row_splits( values=tf.constant([3, 1, 4, 1, 5, 9, 2, 6], tf.int64), row_splits=[0, 4, 4, 7, 8, 8]), } with tf.compat.v1.Session(graph=graph) as session: schema = schema_inference.infer_feature_schema(outputs, graph, session) expected_schema_ascii = """feature { name: "foo" type: INT annotation { tag: "ragged_tensor" } } """ expected_schema = text_format.Parse(expected_schema_ascii, schema_pb2.Schema()) schema_utils_legacy.set_generate_legacy_feature_spec(expected_schema, False) self.assertProtoEquals(expected_schema, schema) with self.assertRaisesRegexp(ValueError, 'Feature "foo" had tag "ragged_tensor"'): schema_utils.schema_as_feature_spec(schema) if __name__ == '__main__': unittest.main()
[ "tensorflow.convert_to_tensor", "tensorflow.constant", "tensorflow.cast", "tensorflow.compat.v1.Session", "tensorflow.io.FixedLenFeature", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.Graph", "tensorflow.size" ]
tensorflow_transform/schema_inference_test.py
[(37, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.int64', '(None,)'], {}), True, 'import tensorflow as tf\n'), (205, 'unittest.main', 'unittest.main', ([], {}), False, 'import unittest\n'), (38, 'tensorflow.constant', 'tf.constant', (['(5)'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.constant', 'tf.constant', (['(6)'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow_transform.tf_metadata.schema_utils.schema_from_feature_spec', 'schema_utils.schema_from_feature_spec', (['feature_spec', 'domains'], {}), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), (87, 'tensorflow_transform.schema_inference.infer_feature_schema', 'schema_inference.infer_feature_schema', (['tensors', 'graph'], {}), False, 'from tensorflow_transform import schema_inference\n'), (99, 'tensorflow_transform.schema_inference.infer_feature_schema', 'schema_inference.infer_feature_schema', (['tensors', 'graph'], {}), False, 'from tensorflow_transform import schema_inference\n'), (120, 'tensorflow_transform.mappers.apply_buckets', 'mappers.apply_buckets', (["inputs['foo']", 'boundaries_foo'], {}), False, 'from tensorflow_transform import mappers\n'), (122, 'tensorflow_transform.mappers.apply_buckets', 'mappers.apply_buckets', (["inputs['bar']", 'boundaries_bar'], {}), False, 'from tensorflow_transform import mappers\n'), (159, 'tensorflow.constant', 'tf.constant', (['[[1.0]]'], {}), True, 'import tensorflow as tf\n'), (165, 'os.path.join', 'os.path.join', (['"""type.googleapis.com"""', 'message_type'], {}), False, 'import os\n'), (166, 'tensorflow_transform.schema_inference.annotate', 'schema_inference.annotate', (['type_url', 'message_proto'], {}), False, 'from tensorflow_transform import schema_inference\n'), (84, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), True, 'import tensorflow as tf\n'), (85, 'tensorflow_transform.schema_inference.infer_feature_schema', 'schema_inference.infer_feature_schema', (['tensors', 'graph', 'session'], {}), False, 'from tensorflow_transform import schema_inference\n'), (96, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '()'], {}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 1, 2, 3]'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 2, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0.5, 1.5]'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0.1, 0.2]'], {}), True, 'import tensorflow as tf\n'), (126, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow_transform.schema_inference.infer_feature_schema', 'schema_inference.infer_feature_schema', (['outputs', 'graph', 'session'], {}), False, 'from tensorflow_transform import schema_inference\n'), (153, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 1, 2, 3]'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 2, 0, 2]'], {'dtype': 'tf.int64'}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), True, 'import tensorflow as tf\n'), (169, 'tensorflow_transform.schema_inference.infer_feature_schema', 'schema_inference.infer_feature_schema', (['outputs', 'graph', 'session'], {}), False, 'from tensorflow_transform import schema_inference\n'), (184, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'graph'}), True, 'import tensorflow as tf\n'), (185, 'tensorflow_transform.schema_inference.infer_feature_schema', 'schema_inference.infer_feature_schema', (['outputs', 'graph', 'session'], {}), False, 'from tensorflow_transform import schema_inference\n'), (196, 'tensorflow_transform.tf_metadata.schema_utils_legacy.set_generate_legacy_feature_spec', 'schema_utils_legacy.set_generate_legacy_feature_spec', (['expected_schema', '(False)'], {}), False, 'from tensorflow_transform.tf_metadata import schema_utils_legacy\n'), (80, 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (65, 'tensorflow_metadata.proto.v0.schema_pb2.IntDomain', 'schema_pb2.IntDomain', ([], {'is_categorical': '(True)'}), False, 'from tensorflow_metadata.proto.v0 import schema_pb2\n'), (69, 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (71, 'tensorflow_metadata.proto.v0.schema_pb2.IntDomain', 'schema_pb2.IntDomain', ([], {'min': '(5)', 'max': '(6)', 'is_categorical': '(True)'}), False, 'from tensorflow_metadata.proto.v0 import schema_pb2\n'), (94, 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), True, 'import tensorflow as tf\n'), (109, 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.size', 'tf.size', (['boundaries'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow_transform.annotations_pb2.BucketBoundaries', 'annotations_pb2.BucketBoundaries', ([], {}), False, 'from tensorflow_transform import annotations_pb2\n'), (178, 'tensorflow.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow_metadata.proto.v0.schema_pb2.Schema', 'schema_pb2.Schema', ([], {}), False, 'from tensorflow_metadata.proto.v0 import schema_pb2\n'), (201, 'tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec', 'schema_utils.schema_as_feature_spec', (['schema'], {}), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), (49, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.int64', '(None,)'], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.string', '(None,)'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '(None,)'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow_transform.annotations_pb2.BucketBoundaries', 'annotations_pb2.BucketBoundaries', ([], {}), False, 'from tensorflow_transform import annotations_pb2\n'), (181, 'tensorflow.constant', 'tf.constant', (['[3, 1, 4, 1, 5, 9, 2, 6]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (163, 'tensorflow.cast', 'tf.cast', (['boundaries', 'tf.float32'], {}), True, 'import tensorflow as tf\n')]
ixlan/Deep-learning
246e5285b6fb6508814762fddfd00d54515ccf79
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np from support import new_conv_layer, new_fc_layer, flatten_layer class Siamese(object): """ This class implements a siamese convolutional neural network in TensorFlow. Term siamese is used to refer to architectures which incorporate two branches of convolutional networks parametrized identically (i.e. weights are shared). These graphs accept two input tensors and a label in general. """ def inference(self, x, reuse=False): """ Defines the model used for inference. Output of this model is fed to the objective (or loss) function defined for the task. Here we recommend you to consider using variable and name scopes in order to make your graph more intelligible for later references in TensorBoard and so on. You can define a name scope for the whole model or for each operator group (e.g. conv+pool+relu) individually to group them by name. Variable scopes are essential components in TensorFlow for parameter sharing. You can use the variable scope to activate/deactivate 'variable reuse'. Args: x: 4D float Tensor of size [batch_size, input_height, input_width, input_channels] reuse: Python bool to switch reusing on/off. Returns: l2_out: L2-normalized output tensor of shape [batch_size, 192] Hint: Parameter reuse indicates whether the inference graph should use parameter sharing or not. You can study how to implement parameter sharing in TensorFlow from the following sources: https://www.tensorflow.org/versions/r0.11/how_tos/variable_scope/index.html """ with tf.variable_scope('Siamese', reuse=reuse): ######################## # PUT YOUR CODE HERE # ######################## ######################## logits = self.__forward_pass(x, reuse) l2_out = tf.nn.l2_normalize(logits, dim=1) ######################## return l2_out def __forward_pass(self, x, reuse): fc_size1 = 384 fc_size2 = 192 # convolutional layers with tf.variable_scope('conv1'): layer1, weights1 = new_conv_layer(x, name="conv1", num_input_channels=3, num_filters=64, filter_size=5, ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1]) with tf.variable_scope('conv2'): layer2, weights2 = new_conv_layer(input=layer1, name="conv2", num_input_channels=64, num_filters=64, filter_size=5, ac_fun=tf.nn.relu, pool_ksize=[1, 3, 3, 1]) with tf.name_scope('flatten'): layer3, num_features = flatten_layer(layer2) # fully connected layers with tf.variable_scope('fc1'): layer4, weights4 = new_fc_layer(input=layer3, name="fc1", num_inputs=num_features, num_outputs=fc_size1) # print(layer4) with tf.variable_scope('fc2'): logits, weights5 = new_fc_layer(input=layer4, name="fc2", num_inputs=fc_size1, num_outputs=fc_size2) # add histograms if not reuse: tf.histogram_summary(weights1.name, weights1) tf.histogram_summary(weights2.name, weights2) return logits def loss(self, channel_1, channel_2, label, margin): """ Defines the contrastive loss. This loss ties the outputs of the branches to compute the following: L = Y * d^2 + (1-Y) * max(margin - d^2, 0) where d is the L2 distance between the given input pair s.t. d = ||x_1 - x_2||_2 and Y is label associated with the pair of input tensors. Y is 1 if the inputs belong to the same class in CIFAR10 and is 0 otherwise. For more information please see: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf Args: channel_1: output of first channel (i.e. branch_1), tensor of size [batch_size, 192] channel_2: output of second channel (i.e. branch_2), tensor of size [batch_size, 192] label: Tensor of shape [batch_size] margin: Margin of the contrastive loss Returns: loss: scalar float Tensor """ ######################## # PUT YOUR CODE HERE # ######################## D = (tf.reduce_sum((channel_1 - channel_2)**2, reduction_indices=1))**0.5 zeros = tf.fill(tf.shape(D), 0.0) # loss = 0.5*(label*(D**2.) + (1-label) * (tf.reduce_max([zeros, margin - D], reduction_indices=0))**2) loss = label*(D**2) + (1-label) * (tf.reduce_max([zeros, margin - D**2], 0)) ######################## # END OF YOUR CODE # ######################## return loss
[ "tensorflow.nn.l2_normalize", "tensorflow.reduce_max", "tensorflow.shape", "tensorflow.reduce_sum", "tensorflow.name_scope", "tensorflow.histogram_summary", "tensorflow.variable_scope" ]
hw3/siamese.py
[(46, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Siamese"""'], {'reuse': 'reuse'}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['logits'], {'dim': '(1)'}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv1"""'], {}), True, 'import tensorflow as tf\n'), (64, 'support.new_conv_layer', 'new_conv_layer', (['x'], {'name': '"""conv1"""', 'num_input_channels': '(3)', 'num_filters': '(64)', 'filter_size': '(5)', 'ac_fun': 'tf.nn.relu', 'pool_ksize': '[1, 3, 3, 1]'}), False, 'from support import new_conv_layer, new_fc_layer, flatten_layer\n'), (66, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv2"""'], {}), True, 'import tensorflow as tf\n'), (67, 'support.new_conv_layer', 'new_conv_layer', ([], {'input': 'layer1', 'name': '"""conv2"""', 'num_input_channels': '(64)', 'num_filters': '(64)', 'filter_size': '(5)', 'ac_fun': 'tf.nn.relu', 'pool_ksize': '[1, 3, 3, 1]'}), False, 'from support import new_conv_layer, new_fc_layer, flatten_layer\n'), (69, 'tensorflow.name_scope', 'tf.name_scope', (['"""flatten"""'], {}), True, 'import tensorflow as tf\n'), (70, 'support.flatten_layer', 'flatten_layer', (['layer2'], {}), False, 'from support import new_conv_layer, new_fc_layer, flatten_layer\n'), (72, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fc1"""'], {}), True, 'import tensorflow as tf\n'), (73, 'support.new_fc_layer', 'new_fc_layer', ([], {'input': 'layer3', 'name': '"""fc1"""', 'num_inputs': 'num_features', 'num_outputs': 'fc_size1'}), False, 'from support import new_conv_layer, new_fc_layer, flatten_layer\n'), (75, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fc2"""'], {}), True, 'import tensorflow as tf\n'), (76, 'support.new_fc_layer', 'new_fc_layer', ([], {'input': 'layer4', 'name': '"""fc2"""', 'num_inputs': 'fc_size1', 'num_outputs': 'fc_size2'}), False, 'from support import new_conv_layer, new_fc_layer, flatten_layer\n'), (80, 'tensorflow.histogram_summary', 'tf.histogram_summary', (['weights1.name', 'weights1'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.histogram_summary', 'tf.histogram_summary', (['weights2.name', 'weights2'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['((channel_1 - channel_2) ** 2)'], {'reduction_indices': '(1)'}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.shape', 'tf.shape', (['D'], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.reduce_max', 'tf.reduce_max', (['[zeros, margin - D ** 2]', '(0)'], {}), True, 'import tensorflow as tf\n')]
manueltonneau/bert
75d1246f497d1075ba0adefbc957cfd7d3dc6667
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint, different_vocabulary=False) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all tf.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
[ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.train.Features", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.train.Scaffold", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.variable_scope" ]
run_classifier.py
[(102, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (483, 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), True, 'import tensorflow as tf\n'), (577, 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'import modeling\n'), (784, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (793, 'tokenization.validate_case_matches_checkpoint', 'tokenization.validate_case_matches_checkpoint', (['FLAGS.do_lower_case', 'FLAGS.init_checkpoint'], {}), False, 'import tokenization\n'), (800, 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'import modeling\n'), (808, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (819, 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), False, 'import tokenization\n'), (859, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size', 'predict_batch_size': 'FLAGS.predict_batch_size'}), True, 'import tensorflow as tf\n'), (981, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (461, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), True, 'import tensorflow as tf\n'), (462, 'tensorflow.logging.info', 'tf.logging.info', (["('guid: %s' % example.guid)"], {}), True, 'import tensorflow as tf\n'), (468, 'tensorflow.logging.info', 'tf.logging.info', (["('label: %s (id = %d)' % (example.label, label_id))"], {}), True, 'import tensorflow as tf\n'), (496, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (514, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (515, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (517, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (518, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (523, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (541, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), True, 'import tensorflow as tf\n'), (601, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), True, 'import tensorflow as tf\n'), (606, 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (607, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (608, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (609, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (611, 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (614, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (627, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (647, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (663, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (824, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (868, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), False, 'import os\n'), (871, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (873, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (874, 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), True, 'import tensorflow as tf\n'), (894, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval.tf_record"""'], {}), False, 'import os\n'), (898, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), True, 'import tensorflow as tf\n'), (902, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), True, 'import tensorflow as tf\n'), (921, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), False, 'import os\n'), (939, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""predict.tf_record"""'], {}), False, 'import os\n'), (944, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running prediction*****"""'], {}), True, 'import tensorflow as tf\n'), (948, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), True, 'import tensorflow as tf\n'), (959, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""test_results.tsv"""'], {}), False, 'import os\n'), (199, 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (200, 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), False, 'import csv\n'), (216, 'os.path.join', 'os.path.join', (['data_dir', '"""multinli"""', "('multinli.train.%s.tsv' % self.language)"], {}), False, 'import os\n'), (223, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (224, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (225, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), False, 'import tokenization\n'), (234, 'os.path.join', 'os.path.join', (['data_dir', '"""xnli.dev.tsv"""'], {}), False, 'import os\n'), (240, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (243, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[6]'], {}), False, 'import tokenization\n'), (244, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[7]'], {}), False, 'import tokenization\n'), (245, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (285, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[8]'], {}), False, 'import tokenization\n'), (286, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[9]'], {}), False, 'import tokenization\n'), (325, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (326, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[4]'], {}), False, 'import tokenization\n'), (596, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (599, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (604, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), True, 'import tensorflow as tf\n'), (613, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (629, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (637, 'tensorflow.cast', 'tf.cast', (["features['is_real_example']"], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (652, 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {'different_vocabulary': '(False)'}), False, 'import modeling\n'), (668, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (674, 'optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), False, 'import optimization\n'), (677, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (833, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (922, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_eval_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (923, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Eval results *****"""'], {}), True, 'import tensorflow as tf\n'), (960, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_predict_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (962, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Predict results *****"""'], {}), True, 'import tensorflow as tf\n'), (226, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradictory"""'], {}), False, 'import tokenization\n'), (227, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradiction"""'], {}), False, 'import tokenization\n'), (241, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['self.language'], {}), False, 'import tokenization\n'), (261, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (266, 'os.path.join', 'os.path.join', (['data_dir', '"""dev_matched.tsv"""'], {}), False, 'import os\n'), (272, 'os.path.join', 'os.path.join', (['data_dir', '"""test_matched.tsv"""'], {}), False, 'import os\n'), (290, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'import tokenization\n'), (302, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (307, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (312, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (330, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (342, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (347, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (352, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (367, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (370, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (371, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (504, 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), True, 'import tensorflow as tf\n'), (530, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (639, 'tensorflow.shape', 'tf.shape', (['label_ids'], {}), True, 'import tensorflow as tf\n'), (661, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (696, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (702, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': "{'probabilities': probabilities}", 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (738, 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (742, 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (747, 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (752, 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (284, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (656, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (685, 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (686, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (688, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (464, 'tokenization.printable_text', 'tokenization.printable_text', (['x'], {}), False, 'import tokenization\n')]
JiangFeng07/NLPIK
bacd52e24690e8ba706895b54a076ee05d785d7b
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os from model.bert import modeling from model.bert import optimization from model.bert import tokenization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all tf.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
[ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.train.Features", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.train.Scaffold", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.variable_scope" ]
model/bert/run_classifier.py
[(102, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (483, 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), True, 'import tensorflow as tf\n'), (577, 'model.bert.modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'from model.bert import modeling\n'), (784, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (793, 'model.bert.tokenization.validate_case_matches_checkpoint', 'tokenization.validate_case_matches_checkpoint', (['FLAGS.do_lower_case', 'FLAGS.init_checkpoint'], {}), False, 'from model.bert import tokenization\n'), (800, 'model.bert.modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'from model.bert import modeling\n'), (808, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (819, 'model.bert.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), False, 'from model.bert import tokenization\n'), (859, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size', 'predict_batch_size': 'FLAGS.predict_batch_size'}), True, 'import tensorflow as tf\n'), (981, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (461, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), True, 'import tensorflow as tf\n'), (462, 'tensorflow.logging.info', 'tf.logging.info', (["('guid: %s' % example.guid)"], {}), True, 'import tensorflow as tf\n'), (468, 'tensorflow.logging.info', 'tf.logging.info', (["('label: %s (id = %d)' % (example.label, label_id))"], {}), True, 'import tensorflow as tf\n'), (496, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (514, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (515, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (517, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (518, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (523, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (541, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), True, 'import tensorflow as tf\n'), (601, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), True, 'import tensorflow as tf\n'), (606, 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (607, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (608, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (609, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (611, 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (614, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (627, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (647, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (663, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (824, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (868, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), False, 'import os\n'), (871, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (873, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (874, 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), True, 'import tensorflow as tf\n'), (894, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval.tf_record"""'], {}), False, 'import os\n'), (898, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), True, 'import tensorflow as tf\n'), (902, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), True, 'import tensorflow as tf\n'), (921, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), False, 'import os\n'), (939, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""predict.tf_record"""'], {}), False, 'import os\n'), (944, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running prediction*****"""'], {}), True, 'import tensorflow as tf\n'), (948, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), True, 'import tensorflow as tf\n'), (959, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""test_results.tsv"""'], {}), False, 'import os\n'), (199, 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (200, 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), False, 'import csv\n'), (216, 'os.path.join', 'os.path.join', (['data_dir', '"""multinli"""', "('multinli.train.%s.tsv' % self.language)"], {}), False, 'import os\n'), (223, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'from model.bert import tokenization\n'), (224, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'from model.bert import tokenization\n'), (225, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), False, 'from model.bert import tokenization\n'), (234, 'os.path.join', 'os.path.join', (['data_dir', '"""xnli.dev.tsv"""'], {}), False, 'import os\n'), (240, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'from model.bert import tokenization\n'), (243, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[6]'], {}), False, 'from model.bert import tokenization\n'), (244, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[7]'], {}), False, 'from model.bert import tokenization\n'), (245, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'from model.bert import tokenization\n'), (285, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[8]'], {}), False, 'from model.bert import tokenization\n'), (286, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[9]'], {}), False, 'from model.bert import tokenization\n'), (325, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'from model.bert import tokenization\n'), (326, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[4]'], {}), False, 'from model.bert import tokenization\n'), (596, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (599, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (604, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), True, 'import tensorflow as tf\n'), (613, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (629, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (637, 'tensorflow.cast', 'tf.cast', (["features['is_real_example']"], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (652, 'model.bert.modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), False, 'from model.bert import modeling\n'), (668, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (674, 'model.bert.optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), False, 'from model.bert import optimization\n'), (677, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (833, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (922, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_eval_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (923, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Eval results *****"""'], {}), True, 'import tensorflow as tf\n'), (960, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_predict_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (962, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Predict results *****"""'], {}), True, 'import tensorflow as tf\n'), (226, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradictory"""'], {}), False, 'from model.bert import tokenization\n'), (227, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradiction"""'], {}), False, 'from model.bert import tokenization\n'), (241, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['self.language'], {}), False, 'from model.bert import tokenization\n'), (261, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (266, 'os.path.join', 'os.path.join', (['data_dir', '"""dev_matched.tsv"""'], {}), False, 'import os\n'), (272, 'os.path.join', 'os.path.join', (['data_dir', '"""test_matched.tsv"""'], {}), False, 'import os\n'), (290, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'from model.bert import tokenization\n'), (302, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (307, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (312, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (330, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'from model.bert import tokenization\n'), (342, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (347, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (352, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (367, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'from model.bert import tokenization\n'), (370, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'from model.bert import tokenization\n'), (371, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'from model.bert import tokenization\n'), (504, 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), True, 'import tensorflow as tf\n'), (530, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (639, 'tensorflow.shape', 'tf.shape', (['label_ids'], {}), True, 'import tensorflow as tf\n'), (661, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (696, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (702, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': "{'probabilities': probabilities}", 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (738, 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (742, 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (747, 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (752, 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (284, 'model.bert.tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'from model.bert import tokenization\n'), (656, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (657, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (685, 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (686, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (688, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (464, 'model.bert.tokenization.printable_text', 'tokenization.printable_text', (['x'], {}), False, 'from model.bert import tokenization\n')]
NLeSC/parallel-roofit-scripts
70de07edfd8e400650af4cb34789dbb8b8fc9574
# -*- coding: utf-8 -*- # @Author: patrick # @Date: 2016-09-01 17:04:53 # @Last Modified by: Patrick Bos # @Last Modified time: 2016-10-26 14:48:09 # as per tensorflow styleguide # https://www.tensorflow.org/versions/r0.11/how_tos/style_guide.html from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.platform import tf_logging as logging import numpy as np import matplotlib.pyplot as plt from timeit import default_timer as timer import time import os tf.logging.set_verbosity(tf.logging.INFO) project_dn = os.path.expanduser("~/projects/apcocsm/") # project_dn = "/home/pbos/apcocsm/" m0_num = 5.291 argpar_num = -20.0 constraint = {} constraint['sigmean'] = (5.20, 5.30) constraint['sigwidth'] = (0.001, 1.) constraint['argpar'] = (-100., -1.) constraint['nsig'] = (0., 10000) constraint['nbkg'] = (0., 10000) constraint['mes'] = (5.20, 5.30) # keep a variable dictionary for easy key-based access compatible with constraints vdict = {} pi = tf.constant(np.pi, dtype=tf.float64, name="pi") sqrt2pi = tf.constant(np.sqrt(2 * np.pi), dtype=tf.float64, name="sqrt2pi") two = tf.constant(2, dtype=tf.float64, name="two") one = tf.constant(1, dtype=tf.float64, name="one") zero = tf.constant(0, dtype=tf.float64, name="zero") def gradsafe_sqrt(x, clip_low=1e-18, name=None): with tf.name_scope(name, "gradsafe_sqrt"): return tf.sqrt(tf.clip_by_value(x, clip_low, x)) def argus_integral_phalf(m_low, m_high, m0, c): """ Only valid for argus_pdf with p=0.5! Otherwise need to do numerical integral. """ def F(m_bound, name=None): with tf.name_scope(name, "argus_integral_phalf_primitive"): a = tf.minimum(m_bound, m0) x = 1 - tf.pow(a / m0, 2) primitive = -0.5 * m0 * m0 * (tf.exp(c * x) * tf.sqrt(x) / c + 0.5 / tf.pow(-c, 1.5) * tf.sqrt(pi) * tf.erf(gradsafe_sqrt(-c * x))) # We have to safeguard the sqrt, because otherwise the analytic # derivative blows up for x = 0 return primitive area = tf.sub(F(m_high, name="F2"), F(m_low, name="F1"), name="argus_integral_phalf") return area def argus_pdf_phalf_WN(m, m0, c, m_low, m_high): """ WN: with normalization """ norm = argus_integral_phalf(m_low, m_high, m0, c) return argus_pdf(m, m0, c) / norm # // --- Observable --- # RooRealVar mes("mes","m_{ES} (GeV)",5.20,5.30) ; # // --- Build Gaussian signal PDF --- # RooRealVar sigmean("sigmean","B^{#pm} mass",5.28,5.20,5.30) ; # RooRealVar sigwidth("sigwidth","B^{#pm} width",0.0027,0.001,1.) ; sigmean = tf.Variable(5.28, name="sigmean", dtype=tf.float64) sigwidth = tf.Variable(0.0027, name="sigwidth", dtype=tf.float64) vdict['sigmean'] = sigmean vdict['sigwidth'] = sigwidth # RooGaussian gauss("gauss","gaussian PDF",mes,sigmean,sigwidth) ; def gaussian_pdf(x, mean, std): val = tf.div(tf.exp(-tf.pow((x - mean) / std, 2) / two), (sqrt2pi * std), name="gaussian_pdf") return val # // --- Build Argus background PDF --- # RooRealVar argpar("argpar","argus shape parameter",-20.0,-100.,-1.) ; # RooConstVar m0("m0", "resonant mass", 5.291); argpar = tf.Variable(argpar_num, name="argpar", dtype=tf.float64) m0 = tf.constant(m0_num, name="m0", dtype=tf.float64) vdict['argpar'] = argpar # RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ; def argus_pdf(m, m0, c, p=0.5): t = m / m0 u = 1 - t * t argus_t_ge_1 = m * tf.pow(u, p) * tf.exp(c * u) return tf.maximum(tf.zeros_like(m), argus_t_ge_1, name="argus_pdf") # // --- Construct signal+background PDF --- # RooRealVar nsig("nsig","#signal events",200,0.,10000) ; # RooRealVar nbkg("nbkg","#background events",800,0.,10000) ; nsig = tf.Variable(200, name="nsig", dtype=tf.float64) nbkg = tf.Variable(800, name="nbkg", dtype=tf.float64) vdict['nsig'] = nsig vdict['nbkg'] = nbkg # RooAddPdf sum("sum","g+a",RooArgList(gauss,argus),RooArgList(nsig,nbkg)) ; # // --- Generate a toyMC sample from composite PDF --- # RooDataSet *data = sum.generate(mes,2000) ; def sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, mes_low, mes_high): add = tf.add(nsig * gaussian_pdf(mes, sigmean, sigwidth), nbkg * argus_pdf_phalf_WN(mes, m0, argpar, mes_low, mes_high), name="sum_pdf") return tf.div(add, nsig + nbkg, name="sum_pdf_normalized") # data in RooFit genereren en importeren # draai dit in ROOT: # data.write("roofit_demo_random_data_values.dat"); # om het weer in te lezen: # RooDataSet *data; # data->RooDataSet.read("roofit_demo_random_data_values.dat", RooArgList(mes)) data_raw = np.loadtxt(project_dn + "roofit_demo_random_data_values.dat", dtype=np.float64) data = tf.constant(data_raw, name='event_data', dtype=tf.float64) # // --- Perform extended ML fit of composite PDF to toy data --- # sum.fitTo(*data,"Extended") ; # convert to tf constants, otherwise you'll get complaints about float32s... constraint_tf = {} for key in constraint.keys(): low = constraint[key][0] high = constraint[key][1] constraint_tf[key] = (tf.constant(low, dtype=tf.float64), tf.constant(high, dtype=tf.float64)) print("N.B.: using direct data entry") likelihood = sum_pdf(data, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint_tf['mes'][0], constraint_tf['mes'][1]) nll = tf.neg(tf.reduce_sum(tf.log(likelihood)), name="nll") variables = tf.all_variables() grads = tf.gradients(nll, variables) # ### build constraint inequalities inequalities = [] for key, (lower, upper) in constraint_tf.iteritems(): if key != 'mes': inequalities.append(vdict[key] - lower) inequalities.append(upper - vdict[key]) # ### build bounds instead of inequalities (only for L-BFGS-B, TNC and SLSQP) # N.B.: order important! Also supply variables to be sure the orders match. bounds = [] for v in variables: key = v.name[:v.name.find(':')] lower, upper = constraint[key] bounds.append((lower, upper)) max_steps = 1000 status_every = 1 # Create an optimizer with the desired parameters. opt = tf.contrib.opt.ScipyOptimizerInterface(nll, options={'maxiter': max_steps, # 'disp': True, # 'tol': 1e-20, 'maxls': 10, }, # inequalities=inequalities, # method='SLSQP' # supports inequalities # method='BFGS', bounds=bounds, var_list=variables, # supply with bounds to match order! tol=1e-14, ) tf.scalar_summary('nll', nll) init_op = tf.initialize_all_variables() # from http://stackoverflow.com/a/35907755/1199693 config = tf.ConfigProto(graph_options=tf.GraphOptions( # optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L2))) # L2 werkt niet (wrs eruit gehaald) optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1))) # start session with tf.Session(config=config) as sess: # Merge all the summaries and write them out to /tmp/mnist_logs (by default) summarize_merged = tf.merge_all_summaries() summary_writer = tf.train.SummaryWriter('./train/%i' % int(time.time()), sess.graph) # Run the init operation. sess.run(init_op) true_vars = {} for v in variables: key = v.name[:v.name.find(':')] true_vars[key] = v.eval() true_vars['m0'] = m0.eval() print("name\t" + "\t".join([v.name.ljust(10) for v in variables]) + "\t | <nll>\t\t | step") print("init\t" + "\t".join(["%6.4e" % v for v in sess.run(variables)]) + "\t | %f" % np.mean(sess.run(nll))) print("") step = 0 nll_value_opt = sess.run(nll) def step_callback(var_values_opt): global step, sess, summary_writer, nll_value_opt summary = sess.run(summarize_merged) summary_writer.add_summary(summary, step) if step % status_every == 0: print("opt\t" + "\t".join(["%6.4e" % v for v in var_values_opt]) + "\t | %f\t | %i" % (np.mean(nll_value_opt), step)) step += 1 def loss_callback(nll_value_opt_step, g1, g2, g3, g4, g5, *other_vars): global nll_value_opt nll_value_opt = nll_value_opt_step print("loss_callback:") print("nll:", nll_value_opt) print("gradients:", g1, g2, g3, g4, g5) ov = "\t".join([str(v) for v in other_vars]) if ov: print("variables:", ov) print("") """ start = timer() opt.minimize(session=sess, step_callback=step_callback, loss_callback=loss_callback, fetches=[nll] + grads + variables) # N.B.: callbacks not supported with SLSQP! end = timer() print("Loop took %f seconds" % (end - start)) """ N_loops = 100 timings = [] tf.logging.set_verbosity(tf.logging.ERROR) for i in range(N_loops): sess.run(init_op) start = timer() opt.minimize(session=sess) end = timer() timings.append(end - start) tf.logging.set_verbosity(tf.logging.INFO) print("Timing total: %f s, average: %f s, minimum: %f s" % (np.sum(timings), np.mean(timings), np.min(timings))) # logging.info("get fitted variables") fit_vars = {} for v in variables: key = v.name[:v.name.find(':')] fit_vars[key] = v.eval() fit_vars['m0'] = m0.eval() print("fit \t" + "\t".join(["%6.4e" % v for v in sess.run(variables)]) + "\t | %f" % np.mean(sess.run(nll))) root_fit_vals = {'argpar': -22.8765, 'nbkg': 816.137, 'nsig': 195.976, 'sigmean': 5.27987, 'sigwidth': 3.01048e-3, 'nll': -4976.4} print("=== WARNING: setting variables to ROOT fit values! ===") for v in variables: key = v.name[:v.name.find(':')] sess.run(v.assign(root_fit_vals[key])) nll_root_val = sess.run(nll) print("ROOT \t" + "\t".join(["%6.4e" % root_fit_vals[v.name[:v.name.find(':')]] for v in variables]) + "\t | %f (own calc: %f)" % (root_fit_vals['nll'], nll_root_val)) # FCN=-4976.4 FROM MIGRAD STATUS=CONVERGED 101 CALLS 102 TOTAL # EDM=1.00861e-05 STRATEGY= 1 ERROR MATRIX ACCURATE # EXT PARAMETER STEP FIRST # NO. NAME VALUE ERROR SIZE DERIVATIVE # 1 argpar -2.28765e+01 3.42616e+00 3.56317e-03 -1.23184e-02 # 2 nbkg 8.16137e+02 9.44657e+02 1.04092e-03 7.76879e-02 # 3 nsig 1.95976e+02 2.30582e+02 4.93414e-04 -1.64158e-01 # 4 sigmean 5.27987e+00 2.15796e-04 2.61026e-04 -3.20933e-01 # 5 sigwidth 3.01048e-03 1.99232e-04 1.93308e-04 5.48995e-01 # // --- Plot toy data and composite PDF overlaid --- # RooPlot* mesframe = mes.frame() ; # data->plotOn(mesframe) ; # sum.plotOn(mesframe) ; # sum.plotOn(mesframe,Components(argus),LineStyle(kDashed)) ; # mesframe->Draw(); # logging.info("create data histogram") counts, bins = np.histogram(data.eval(), bins=100) x_bins = (bins[:-1] + bins[1:]) / 2 # logging.info("evaluate pdf values") y_fit = sum_pdf(x_bins, mes_low=constraint_tf['mes'][0], mes_high=constraint_tf['mes'][1], **fit_vars).eval() argus_fit = argus_pdf_phalf_WN(x_bins, fit_vars['m0'], fit_vars['argpar'], m_low=constraint_tf['mes'][0], m_high=constraint_tf['mes'][1]).eval() y_true = sum_pdf(x_bins, mes_low=constraint_tf['mes'][0], mes_high=constraint_tf['mes'][1], **true_vars).eval() # normalize fit values to data counts y_fit_norm = np.sum(counts) / np.sum(y_fit) y_fit = [y * y_fit_norm for y in y_fit] argus_fit_norm = fit_vars['nbkg'] / (fit_vars['nsig'] + fit_vars['nbkg']) argus_fit = [a * argus_fit_norm * y_fit_norm for a in argus_fit] y_true_norm = np.sum(counts) / np.sum(y_true) y_true = [y * y_true_norm for y in y_true] # plot results # plt.errorbar(x_bins, counts, yerr=np.sqrt(counts), fmt='.g', label="input data") # plt.plot(x_bins, y_fit, '-b', label="fit sum_pdf") # plt.plot(x_bins, argus_fit, '--b', label="fit argus_pdf") # plt.plot(x_bins, y_true, ':k', label="true sum_pdf") # plt.legend(loc='best') # plt.show()
[ "numpy.sqrt", "tensorflow.minimum", "numpy.mean", "tensorflow.all_variables", "tensorflow.Variable", "tensorflow.merge_all_summaries", "tensorflow.OptimizerOptions", "tensorflow.gradients", "tensorflow.div", "tensorflow.initialize_all_variables", "tensorflow.logging.set_verbosity", "tensorflow.Session", "tensorflow.name_scope", "tensorflow.contrib.opt.ScipyOptimizerInterface", "tensorflow.pow", "numpy.min", "tensorflow.exp", "tensorflow.zeros_like", "numpy.sum", "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.scalar_summary", "tensorflow.log", "tensorflow.sqrt", "numpy.loadtxt" ]
tensorflow_testing/tensorflow_roofit_demo_3_scipy.py
[(21, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (24, 'os.path.expanduser', 'os.path.expanduser', (['"""~/projects/apcocsm/"""'], {}), False, 'import os\n'), (43, 'tensorflow.constant', 'tf.constant', (['np.pi'], {'dtype': 'tf.float64', 'name': '"""pi"""'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.float64', 'name': '"""two"""'}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.float64', 'name': '"""one"""'}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float64', 'name': '"""zero"""'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.Variable', 'tf.Variable', (['(5.28)'], {'name': '"""sigmean"""', 'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.Variable', 'tf.Variable', (['(0.0027)'], {'name': '"""sigwidth"""', 'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.Variable', 'tf.Variable', (['argpar_num'], {'name': '"""argpar"""', 'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.constant', 'tf.constant', (['m0_num'], {'name': '"""m0"""', 'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.Variable', 'tf.Variable', (['(200)'], {'name': '"""nsig"""', 'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.Variable', 'tf.Variable', (['(800)'], {'name': '"""nbkg"""', 'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (148, 'numpy.loadtxt', 'np.loadtxt', (["(project_dn + 'roofit_demo_random_data_values.dat')"], {'dtype': 'np.float64'}), True, 'import numpy as np\n'), (150, 'tensorflow.constant', 'tf.constant', (['data_raw'], {'name': '"""event_data"""', 'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.all_variables', 'tf.all_variables', ([], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.gradients', 'tf.gradients', (['nll', 'variables'], {}), True, 'import tensorflow as tf\n'), (194, 'tensorflow.contrib.opt.ScipyOptimizerInterface', 'tf.contrib.opt.ScipyOptimizerInterface', (['nll'], {'options': "{'maxiter': max_steps, 'maxls': 10}", 'bounds': 'bounds', 'var_list': 'variables', 'tol': '(1e-14)'}), True, 'import tensorflow as tf\n'), (208, 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""nll"""', 'nll'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), True, 'import tensorflow as tf\n'), (44, 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), True, 'import numpy as np\n'), (139, 'tensorflow.div', 'tf.div', (['add', '(nsig + nbkg)'], {'name': '"""sum_pdf_normalized"""'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.merge_all_summaries', 'tf.merge_all_summaries', ([], {}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""gradsafe_sqrt"""'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.exp', 'tf.exp', (['(c * u)'], {}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.zeros_like', 'tf.zeros_like', (['m'], {}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.constant', 'tf.constant', (['low'], {'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.constant', 'tf.constant', (['high'], {'dtype': 'tf.float64'}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.log', 'tf.log', (['likelihood'], {}), True, 'import tensorflow as tf\n'), (279, 'timeit.default_timer', 'timer', ([], {}), True, 'from timeit import default_timer as timer\n'), (281, 'timeit.default_timer', 'timer', ([], {}), True, 'from timeit import default_timer as timer\n'), (338, 'numpy.sum', 'np.sum', (['counts'], {}), True, 'import numpy as np\n'), (338, 'numpy.sum', 'np.sum', (['y_fit'], {}), True, 'import numpy as np\n'), (344, 'numpy.sum', 'np.sum', (['counts'], {}), True, 'import numpy as np\n'), (344, 'numpy.sum', 'np.sum', (['y_true'], {}), True, 'import numpy as np\n'), (52, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', 'clip_low', 'x'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""argus_integral_phalf_primitive"""'], {}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.minimum', 'tf.minimum', (['m_bound', 'm0'], {}), True, 'import tensorflow as tf\n'), (116, 'tensorflow.pow', 'tf.pow', (['u', 'p'], {}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.pow', 'tf.pow', (['(a / m0)', '(2)'], {}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.OptimizerOptions', 'tf.OptimizerOptions', ([], {'opt_level': 'tf.OptimizerOptions.L1'}), True, 'import tensorflow as tf\n'), (221, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (286, 'numpy.sum', 'np.sum', (['timings'], {}), True, 'import numpy as np\n'), (286, 'numpy.mean', 'np.mean', (['timings'], {}), True, 'import numpy as np\n'), (286, 'numpy.min', 'np.min', (['timings'], {}), True, 'import numpy as np\n'), (97, 'tensorflow.pow', 'tf.pow', (['((x - mean) / std)', '(2)'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.exp', 'tf.exp', (['(c * x)'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.sqrt', 'tf.sqrt', (['x'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.sqrt', 'tf.sqrt', (['pi'], {}), True, 'import tensorflow as tf\n'), (246, 'numpy.mean', 'np.mean', (['nll_value_opt'], {}), True, 'import numpy as np\n'), (64, 'tensorflow.pow', 'tf.pow', (['(-c)', '(1.5)'], {}), True, 'import tensorflow as tf\n')]
chunnlp/text_gen
b4ee8fe1ef01c2fe9ad981365111bdfb636e70e7
import time import numpy as np import tensorflow as tf import data_reader from tensorflow.python.client import device_lib flags = tf.flags logging = tf.logging flags.DEFINE_string('model', 'medium', 'model config') flags.DEFINE_string('data_path', 'data', 'path to data') flags.DEFINE_string('save_path', 'model', 'path to save model') flags.DEFINE_integer('num_gpus', 1, 'number of gpus') flags.DEFINE_string('rnn_mode', None, 'rnn type') flags.DEFINE_string('mode', 'train', 'train or test') FLAGS = flags.FLAGS BASIC = 'basic' CUDNN = 'cudnn' BLOCK = 'block' class DataInput(object): def __init__(self, config, data, name=None): self.batch_size = batch_size = config.batch_size self.num_steps = num_steps = config.num_steps self.epoch_size = ((len(data) // batch_size) - 1) // num_steps self.input_data, self.targets = reader.ptb_producer( data, batch_size, num_steps, name=name) class Model(object): def __init__(self, is_training, config, input_, graph): self._is_training = is_training self._input = input_ self._rnn_params = None self._cell = None self.batch_size = input_.batch_size self.num_steps = input_.num_steps hidden_size = config.hidden_size vocab_size = config.vocab_size self.graph = graph with self.graph.as_default(): with tf.device('/cpu:0'): embedding = tf.get_variable( 'embedding', [vocab_size, hidden_size], dtype=tf.float32) inputs = tf.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) output, state = self._build_rnn_graph(inputs, config, is_training) softmax_w = tf.get_variable( 'softmax_w', [hidden_size, vocab_size], dtype=tf.float32) softmax_b = tf.get_variable('softmax_b', [vocab_size], dtype=tf.float32) logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b) logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size]) loss = tf.contrib.seq2seq.sequence_loss( logits, input_.targets, tf.ones([self.batch_size, self.num_steps], dtype=tf.float32), average_across_timesteps=False, average_across_batch=True) self._cost = tf.reduce_sum(loss) self._final_state = state if not is_training: return self._lr = tf.Variable(0., trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr) self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name='new_learning_rate') self._lr_update = tf.assign(self._lr, self._new_lr) self.saver = tf.train.Saver(tf.global_variables()) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0., state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.) raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode)) def _build_rnn_graph(self, inputs, config, is_training): def make_cell(): cell = self._get_lstm_cell(config, is_training) if is_training and config.keep_prob < 1: cell = tf.contrib.rnn.DropoutWrapper( cell, output_keep_prob=config.keep_prob) return cell cell = tf.contrib.rnn.MultiRNNCell( [make_cell() for _ in range(config.num_layers)], state_is_tuple=True) self._initial_state = cell.zero_state(config.batch_size, tf.float32) state = self._initial_state outputs = [] with tf.variable_scope('RNN'): for time_step in range(self.num_steps): if time_step > 0: tf.get_variable_scope().reuse_variables() (cell_output, state) = cell(inputs[:, time_step, :], state) outputs.append(cell_output) output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size]) return output, state def assign_lr(self, session, lr_value): session.run(self._lr_update, feed_dict={self._new_lr: lr_value}) def with_prefix(self, prefix, name): return '/'.join((prefix, name)) def export_ops(self, name): self._name = name ops = {self.with_prefix(self._name, 'cost'): self._cost} if self._is_training: ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update) if self._rnn_params: ops.update(rnn_params=self._rnn_params) for name, op in ops.items(): tf.add_to_collection(name, op) self._initial_state_name = self.with_prefix(self._name, 'initial') self._final_state_name = self.with_prefix(self._name, 'final') for state_tuple in self._initial_state: tf.add_to_collection(self._initial_state_name, state_tuple.c) tf.add_to_collection(self._initial_state_name, state_tuple.h) for state_tuple in self._final_state: tf.add_to_collection(self._final_state_name, state_tuple.c) tf.add_to_collection(self._final_state_name, state_tuple.h) def import_state_tuples(self, state_tuples, name, num_replicas): restored = [] for i in range(len(state_tuples) * num_replicas): c = tf.get_collection_ref(name)[2 * i + 0] h = tf.get_collection_ref(name)[2 * i + 1] restored.append(tf.contrib.rnn.LSTMStateTuple(c, h)) return tuple(restored) def import_ops(self): if self._is_training: self._train_op = tf.get_collection_ref('train_op')[0] self._lr = tf.get_collection_ref('lr')[0] self._new_lr = tf.get_collection_ref('new_lr')[0] self._lr_update = tf.get_collection_ref('lr_update')[0] rnn_params = tf.get_collection_ref('rnn_params') if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope='Model/RNN') tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable) self._cost = tf.get_collection_ref(self.with_prefix(self._name, 'cost'))[0] num_replicas = FLAGS.num_gpus if self._name == 'Train' else 1 self._initial_state = self.import_state_tuples( self._initial_state, self._initial_state_name, num_replicas) self._final_state = self.import_state_tuples( self._final_state, self._final_state_name, num_replicas) @property def input(self): return self._input @property def initial_state(self): return self._initial_state @property def cost(self): return self._cost @property def final_state(self): return self._final_state @property def lr(self): return self._lr @property def train_op(self): return self._train_op @property def initial_state_name(self): return self._initial_state_name @property def final_state_name(self): return self._final_state_name class MediumConfig(object): init_scale = 0.05 learning_rate = 1. max_grad_norm = 5 num_layers = 2 num_steps = 35 hidden_size = 650 max_epoch = 6 max_max_epoch = 39 keep_prob = 0.5 lr_decay = 0.8 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK class LargeConfig(object): init_scale = 0.04 learning_rate = 1. max_grad_norm = 10 num_layers = 2 num_steps = 35 hidden_size = 1500 max_epoch = 14 max_max_epoch = 55 keep_prob = 0.35 lr_decay = 1 / 1.15 batch_size = 20 vocab_size = 10000 rnn_mode = BLOCK def run_epoch(session, model, eval_op=None, verbose=False): start_time = time.time() costs = 0. iters = 0 state = session.run(model.initial_state) fetches = { 'cost': model.cost, 'final_state': model.final_state } if eval_op is not None: fetches['eval_op'] = eval_op for step in range(model.input.epoch_size): feed_dict = {} for i, (c, h) in enumerate(model.initial_state): feed_dict[h] = state[i].c feed_dict[c] = state[i].h vals = session.run(fetches, feed_dict) cost = vals['cost'] state = vals['final_state'] costs += cost iters += model.input.num_steps if verbose and step % (model.input.epoch_size // 10) == 10: print('{:.3f} perplexity: {:.3f} speed: {:.0f} wps'.format( step * 1. / model.input.epoch_size, np.exp(costs / iters), iters * model.input.batch_size * max(1, FLAGS.num_gpus) / (time.time() - start_time))) return np.exp(costs / iters) def get_config(): config = None if FLAGS.model == 'medium': config = MediumConfig() elif FLAGS.model == 'large': config = LargeConfig() else: raise ValueError('Invalid model: {}'.format(FLAGS.model)) if FLAGS.rnn_mode: config.rnn_mode = FLAGS.rnn_mode if FLAGS.num_gpus != 1 or tf.__version__ < '1.3.0': config.rnn_mode = BASIC return config def main(_): if not FLAGS.data_path: raise ValueError('data_path must be set') gpus = [ x.name for x in device_lib.list_local_devices() if x.device_type == 'GPU' ] if FLAGS.num_gpus > len(gpus): raise ValueError('Invalid num_gpus') raw_data = reader.ptb_raw_data(FLAGS.data_path) train_data, valid_data, test_data, _ = raw_data config = get_config() eval_config = get_config() eval_config.batch_size = 1 eval_config.num_steps = 1 train_graph = tf.Graph() eval_graph = tf.Graph() infer_graph = tf.Graph() with train_graph.as_default(): initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.name_scope('Train'): train_input = DataInput(config=config, data=train_data, name='TrainInput') with tf.variable_scope('Model', reuse=None, initializer=initializer): m = Model(is_training=True, config=config, input_=train_input, graph=train_graph) tf.summary.scalar('Training Loss', m.cost) tf.summary.scalar('Learning rate', m.lr) latest_ckpt = tf.train.latest_checkpoint(FLAGS.save_path) with train_graph.as_default(): sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True) with sv.managed_session(config=config_proto) as train_sess: #with tf.Session(config=config_proto) as train_sess: train_sess.run(tf.global_variables_initializer()) for i in range(config.max_max_epoch): lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.) m.assign_lr(train_sess, config.learning_rate * lr_decay) train_perplexity = run_epoch(train_sess, m, #eval_op=m.train_op, verbose=True) print('Epoch {} Train Perplexity: {:.3f}'.format(i + 1, train_perplexity)) if i % 5 == 0: sv.saver.save(train_sess, FLAGS.save_path, global_step=sv.global_step) if __name__ == '__main__': tf.app.run()
[ "tensorflow.get_variable", "tensorflow.device", "tensorflow.concat", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.reduce_sum", "tensorflow.global_variables", "tensorflow.contrib.rnn.LSTMBlockCell", "numpy.exp", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.random_uniform_initializer", "tensorflow.gradients", "tensorflow.train.get_or_create_global_step", "tensorflow.ConfigProto", "tensorflow.name_scope", "tensorflow.trainable_variables", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.nn.xw_plus_b", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.contrib.rnn.LSTMStateTuple", "tensorflow.add_to_collection", "tensorflow.nn.embedding_lookup", "tensorflow.train.latest_checkpoint", "tensorflow.get_collection_ref", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.reshape", "tensorflow.assign", "tensorflow.ones", "tensorflow.train.Supervisor", "tensorflow.contrib.cudnn_rnn.RNNParamsSaveable", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ]
text_generator.py
[(258, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (289, 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), True, 'import numpy as np\n'), (325, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (326, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.Graph', 'tf.Graph', ([], {}), True, 'import tensorflow as tf\n'), (339, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.save_path'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (329, 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-config.init_scale)', 'config.init_scale'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {'logdir': 'FLAGS.save_path'}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)', 'allow_soft_placement': '(True)'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_w"""', '[hidden_size, vocab_size]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.get_variable', 'tf.get_variable', (['"""softmax_b"""', '[vocab_size]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['output', 'softmax_w', 'softmax_b'], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.reshape', 'tf.reshape', (['logits', '[self.batch_size, self.num_steps, vocab_size]'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self._lr'], {}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]', 'name': '"""new_learning_rate"""'}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.assign', 'tf.assign', (['self._lr', 'self._new_lr'], {}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['config.hidden_size'], {'forget_bias': '(0.0)', 'state_is_tuple': '(True)', 'reuse': '(not is_training)'}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.contrib.rnn.LSTMBlockCell', 'tf.contrib.rnn.LSTMBlockCell', (['config.hidden_size'], {'forget_bias': '(0.0)'}), True, 'import tensorflow as tf\n'), (117, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""RNN"""'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.concat', 'tf.concat', (['outputs', '(1)'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['name', 'op'], {}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['self._initial_state_name', 'state_tuple.c'], {}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['self._initial_state_name', 'state_tuple.h'], {}), True, 'import tensorflow as tf\n'), (149, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['self._final_state_name', 'state_tuple.c'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['self._final_state_name', 'state_tuple.h'], {}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""rnn_params"""'], {}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), False, 'from tensorflow.python.client import device_lib\n'), (332, 'tensorflow.name_scope', 'tf.name_scope', (['"""Train"""'], {}), True, 'import tensorflow as tf\n'), (336, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Training Loss"""', 'm.cost'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Learning rate"""', 'm.lr'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.get_variable', 'tf.get_variable', (['"""embedding"""', '[vocab_size, hidden_size]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'input_.input_data'], {}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', 'config.keep_prob'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.ones', 'tf.ones', (['[self.batch_size, self.num_steps]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.gradients', 'tf.gradients', (['self._cost', 'tvars'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.global_variables', 'tf.global_variables', ([], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.contrib.rnn.DropoutWrapper', 'tf.contrib.rnn.DropoutWrapper', (['cell'], {'output_keep_prob': 'config.keep_prob'}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['name'], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['name'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', (['c', 'h'], {}), True, 'import tensorflow as tf\n'), (164, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""train_op"""'], {}), True, 'import tensorflow as tf\n'), (165, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""lr"""'], {}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""new_lr"""'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""lr_update"""'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.contrib.cudnn_rnn.RNNParamsSaveable', 'tf.contrib.cudnn_rnn.RNNParamsSaveable', (['self._cell', 'self._cell.params_to_canonical', 'self._cell.canonical_to_params', 'rnn_params'], {'base_variable_scope': '"""Model/RNN"""'}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.SAVEABLE_OBJECTS', 'params_saveable'], {}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'None', 'initializer': 'initializer'}), True, 'import tensorflow as tf\n'), (347, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), True, 'import tensorflow as tf\n'), (286, 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), True, 'import numpy as np\n'), (119, 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), True, 'import tensorflow as tf\n'), (287, 'time.time', 'time.time', ([], {}), False, 'import time\n')]
caifederated/mlhead-release
703fe2294f210b7259cd1404632d7757766f5a7d
from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.framework import ops from tensorflow.python.training import optimizer import tensorflow as tf class PerGodGradientDescent(optimizer.Optimizer): """Implementation of Perturbed gold Gradient Descent, i.e., FedDane optimizer""" def __init__(self, learning_rate=0.001, mu=0.01, use_locking=False, name="PGD"): super(PerGodGradientDescent, self).__init__(use_locking, name) self._lr = learning_rate self._mu = mu # Tensor versions of the constructor arguments, created in _prepare(). self._lr_t = None self._mu_t = None def _prepare(self): self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate") self._mu_t = ops.convert_to_tensor(self._mu, name="prox_mu") def _create_slots(self, var_list): # Create slots for the global solution. for v in var_list: self._zeros_slot(v, "vstar", self._name) self._zeros_slot(v, "gold", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") var_update = state_ops.assign_sub(var, lr_t*(grad + gold + mu_t*(var-vstar))) #Update 'ref' by subtracting 'value #Create an op that groups multiple operations. #When this op finishes, all ops in input have finished return control_flow_ops.group(*[var_update,]) def _apply_sparse_shared(self, grad, var, indices, scatter_add): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold") # glod is not sparse v_diff = state_ops.assign(vstar, mu_t * (var - vstar), use_locking=self._use_locking) with ops.control_dependencies([v_diff]): # run v_diff operation before scatter_add scaled_grad = scatter_add(vstar, indices, grad) var_update = state_ops.assign_sub(var, lr_t * (scaled_grad + gold)) return control_flow_ops.group(*[var_update, ]) def _apply_sparse(self, grad, var): # sparse grad (only for the shakespeare model) return self._apply_sparse_shared( grad.values, var, grad.indices, lambda x, i, v: state_ops.scatter_add(x, i, v)) def set_params(self, cog, avg_gradient, client): with client.model.graph.as_default(): all_vars = tf.trainable_variables() for variable, value in zip(all_vars, cog): vstar = self.get_slot(variable, "vstar") vstar.load(value, client.model.sess) # get old gradient _, gprev = client.get_grads() # Find g_t - F'(old) gdiff = [g1 - g2 for g1, g2 in zip(avg_gradient, gprev)] with client.model.graph.as_default(): all_vars = tf.trainable_variables() for variable, grad in zip(all_vars, gdiff): gold = self.get_slot(variable, "gold") gold.load(grad, client.model.sess)
[ "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.state_ops.assign_sub", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.state_ops.assign", "tensorflow.trainable_variables", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.state_ops.scatter_add", "tensorflow.python.ops.math_ops.cast" ]
all_baselines/fed-dane/flearn/optimizer/pggd.py
[(21, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._lr'], {'name': '"""learning_rate"""'}), False, 'from tensorflow.python.framework import ops\n'), (22, 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['self._mu'], {'name': '"""prox_mu"""'}), False, 'from tensorflow.python.framework import ops\n'), (31, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._lr_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (32, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._mu_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (37, 'tensorflow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', (['var', '(lr_t * (grad + gold + mu_t * (var - vstar)))'], {}), False, 'from tensorflow.python.ops import state_ops\n'), (40, 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['*[var_update]'], {}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (44, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._lr_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (45, 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self._mu_t', 'var.dtype.base_dtype'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (49, 'tensorflow.python.ops.state_ops.assign', 'state_ops.assign', (['vstar', '(mu_t * (var - vstar))'], {'use_locking': 'self._use_locking'}), False, 'from tensorflow.python.ops import state_ops\n'), (53, 'tensorflow.python.ops.state_ops.assign_sub', 'state_ops.assign_sub', (['var', '(lr_t * (scaled_grad + gold))'], {}), False, 'from tensorflow.python.ops import state_ops\n'), (55, 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['*[var_update]'], {}), False, 'from tensorflow.python.ops import control_flow_ops\n'), (51, 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[v_diff]'], {}), False, 'from tensorflow.python.framework import ops\n'), (63, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.python.ops.state_ops.scatter_add', 'state_ops.scatter_add', (['x', 'i', 'v'], {}), False, 'from tensorflow.python.ops import state_ops\n')]
ChenKuanSun/TheObstacleTowerChallenge
c2de16930dd88949c0bc6a460f378beae3a04204
# 這個文件直接執行是給GCP用的 from obstacle_tower_env import ObstacleTowerEnv import numpy as np import tensorflow as tf import threading import queue # 運行環境設定: # 設置幾個代理 N_WORKER = 6 # 代理人自己更新的步數 EP_LEN = 500 # 最大訓練回合數(每個代理人加起來的回合) EP_MAX = N_WORKER * 200 # 設定更新整個模型:每個代理走了N步就更新 UPDATE_STEP = 20 # 本身是循環更新步 MIN_BATCH_SIZE = N_WORKER * UPDATE_STEP * 3 # 設定輸入的維度 image_features, action_dim = 512, 1 # 限制控制,提高收斂程度 ACTION_BOUND = [6, 12] # 超參數 # Agent目標替換率 EPSILON = 0.4 # Reward discount factor GAMMA = 0.7 # Actor 學習率 # A_LR = 0.0001 A_LR = 0.001 # Critic 學習率 # C_LR = 0.0002 C_LR = 0.002 from obstacle_tower_env import ObstacleTowerEnv import numpy as np import tensorflow as tf import os import time import threading import queue # 運行環境設定: # 設置幾個代理 EP_LEN = 500 # 最大訓練回合數(每個代理人加起來的回合) EP_MAX = N_WORKER * 10 # 設定更新整個模型:每個代理走了N步就更新 UPDATE_STEP = 20 # 本身是循環更新步 MIN_BATCH_SIZE = N_WORKER * UPDATE_STEP * 3 # 設定輸入的維度 image_features, action_dim = 512, 1 # 限制控制,提高收斂程度 ACTION_BOUND = [6, 12] # 超參數 # Agent目標替換率 EPSILON = 0.3 # Reward discount factor GAMMA = 0.7 # Actor 學習率 # A_LR = 0.0001 A_LR = 0.001 # Critic 學習率 # C_LR = 0.0002 C_LR = 0.002 class MODEL(object): def __init__(self): self.sess = tf.Session() self.tfs = tf.placeholder(tf.float32, [None, 84, 84, 3], 'state') c0 = tf.cast(self.tfs, tf.float32) / 255. c1 = tf.nn.relu(self.conv(c0, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))) c2 = tf.nn.relu( self.conv( c1, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))) c3 = tf.nn.relu( self.conv( c2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = tf.reshape(c3, [-1, nh]) pre_s = tf.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) # Critic # 定義變數 # self.tfs = tf.placeholder(tf.float32, [None, image_features], 'state') self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r') # 建立網路層 l1 = tf.layers.dense( inputs=pre_s, units=100, # number of hidden units activation=tf.nn.relu, name='l1' ) self.v = tf.layers.dense( inputs=l1, units=1, # output units activation=None, name='V' ) # 計算損益 self.advantage = self.tfdc_r - self.v self.closs = tf.reduce_mean(tf.square(self.advantage)) self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs) # Actor # 建立網路 action_op, action_op_params = self._build_anet( 'action_op', trainable=True) old_action_op, old_action_op_params = self._build_anet( 'old_action_op', trainable=False) # 定義輸出範例 self.sample_op = tf.squeeze( action_op.sample(1), axis=0) # operation of choosing action # 更新 self.update_old_action_op_op = [ olda.assign(a) for a, olda in zip( action_op_params, old_action_op_params)] # 定義輸入變數 self.tfa = tf.placeholder(tf.float32, [None, action_dim], 'action') self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage') # 機率比較 ratio = action_op.prob(self.tfa) / \ (old_action_op.prob(self.tfa) + 1e-5) # 替代損失 surr = ratio * self.tfadv # 減少代理損失 self.aloss = -tf.reduce_mean(tf.minimum( surr, tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv)) self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss) # log self.train_writer = tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.tableAction = self.createActionTable() def createActionTable(self): tableAction = [] for a in range(0, 3): for b in range(0, 3): for c in range(0, 2): tableAction.append([a, b, c, 0]) # print("Action option: ", tableAction[0:17]) return tableAction def update(self): global GLOBAL_UPDATE_COUNTER while not COORD.should_stop(): if GLOBAL_EP < EP_MAX: # 等待收集資料 UPDATE_EVENT.wait() # 用新的思考模式取代掉舊的模式 self.sess.run(self.update_old_action_op_op) # # 從各個平台內收集資料 s = QUEUE.get() a = QUEUE.get() r = QUEUE.get() # s, a, r = data[:, :image_features], data[:, # image_features: image_features + action_dim], data[:, -1:] adv = self.sess.run( self.advantage, { self.tfs: s, self.tfdc_r: r}) # 更新AC [self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)] [self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range( UPDATE_STEP)] # 完成更新作業 UPDATE_EVENT.clear() # 重新計數 GLOBAL_UPDATE_COUNTER = 0 # 設成可以使用 ROLLING_EVENT.set() # from Open AI baseline # def cnn(self, s): # return tf.reshape(h, [-1]).eval() def conv(self, x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False): channel_ax = 3 strides = [1, stride, stride, 1] bshape = [1, 1, 1, nf] bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1] nin = x.get_shape()[channel_ax].value wshape = [rf, rf, nin, nf] with tf.variable_scope(scope): w = tf.get_variable( "w", wshape, initializer=self.ortho_init(init_scale)) b = tf.get_variable( "b", bias_var_shape, initializer=tf.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = tf.reshape(b, bshape) return tf.nn.conv2d( x, w, strides=strides, padding=pad, data_format=data_format) + b def fc(self, x, scope, nh, *, init_scale=1.0, init_bias=0.0): with tf.variable_scope(scope): nin = x.get_shape()[1].value w = tf.get_variable( "w", [nin, nh], initializer=self.ortho_init(init_scale)) b = tf.get_variable( "b", [nh], initializer=tf.constant_initializer(init_bias)) return tf.matmul(x, w) + b def ortho_init(self, scale=1.0): def _ortho_init(shape, dtype, partition_info=None): # lasagne ortho init for tf shape = tuple(shape) if len(shape) == 2: flat_shape = shape elif len(shape) == 4: # assumes NHWC flat_shape = (np.prod(shape[:-1]), shape[-1]) else: raise NotImplementedError a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) q = u if u.shape == flat_shape else v # pick the one with the correct shape q = q.reshape(shape) return (scale * q[:shape[0], :shape[1]]).astype(np.float32) return _ortho_init #################################################### def _build_anet(self, name, trainable): # 定義Actor 新舊的網路模型 with tf.variable_scope(name): c0 = tf.cast(self.tfs, tf.float32) / 255. c1 = tf.nn.relu(self.conv(c0, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))) c2 = tf.nn.relu(self.conv(c1, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))) c3 = tf.nn.relu(self.conv(c2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))) nh = np.prod([v.value for v in c3.get_shape()[1:]]) h3 = tf.reshape(c3, [-1, nh]) pre_s = tf.nn.relu(self.fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) l1 = tf.layers.dense(inputs=pre_s, units=200, # number of hidden units activation=tf.nn.relu, name='l1', trainable=trainable ) mu = 2 * tf.layers.dense(inputs=l1, units=action_dim, # number of hidden units activation=tf.nn.tanh, name='mu', trainable=trainable ) sigma = tf.layers.dense(inputs=l1, units=action_dim, # output units activation=tf.nn.softplus, # get action probabilities name='sigma', trainable=trainable ) norm_dist = tf.distributions.Normal(loc=mu, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def choose_action(self, s): # 決定下一步該怎麼做 # s = s[np.newaxis, :] s = s.reshape(-1, 84, 84, 3) a = self.sess.run(self.sample_op, {self.tfs: s})[0] return np.clip(a, ACTION_BOUND[0], ACTION_BOUND[1]) def get_v(self, s): if s.ndim < 4: s = s[np.newaxis, :] return self.sess.run(self.v, {self.tfs: s})[0, 0] def load(self): saver = tf.train.Saver() saver.restore(self.sess, './model_save/params') def save(self): saver = tf.train.Saver() saver.save(self.sess, './model_save/params', write_meta_graph=False) class Worker(object): def __init__( self, envpath, wid, retro, realtime_mode, env_seed=0, env_floor=0): self.wid = wid self.env = ObstacleTowerEnv(environment_filename=envpath, worker_id=wid, retro=retro, realtime_mode=realtime_mode) self.kprun = GLOBAL_KPRUN self.tableAction = self.createActionTable() # 設定關卡 self.env_seed = env_seed self.env_floor = env_floor self.step = 0 self.summary = tf.Summary( value=[ tf.Summary.Value( tag="Stage_reward " + str(self.wid), simple_value=0)]) self.kprun.train_writer.add_summary(self.summary, 0) def createActionTable(self): tableAction = [] for a in range(0, 3): for b in range(0, 3): for c in range(0, 2): tableAction.append([a, b, c, 0]) # print("Action option: ", tableAction[0:17]) return tableAction def reward_compute( self, done, reward_total, keys, previous_keys, reward, previous_reward, time_remaining, previous_time_remaining, previous_stage_time_remaining): # 定義獎勵公式 # reward 是從環境傳來的破關數 # keys 是撿到鑰匙的數量 # time_remaining 是剩餘時間 # 過關最大獎勵為10 # 一把鑰匙為5 # 時間果實暫時只給0.5,因為結束會結算剩餘時間,會有獎勵累加的問題。 # 如果過關,給予十倍過關獎勵 - (場景開始的時間-剩餘時間)/1000 # print("time_remaining ", time_remaining, # " previous_time_remaining ", previous_time_remaining, # " reward ", reward) # 通過一個會開門的綠門會加0.1 if (reward - previous_reward) > 0 and (reward - previous_reward) < 0.3: reward_total += 3 elif (reward - previous_reward) > 0.9: # ***如果剩餘時間比場景時間多會變成加分獎勵,可能會極大增加Agent吃時間果實的機率。 # ***另一種方式是剩餘的時間直接/1000加上去,這樣就沒有累加效果。 print("Pass ", reward, " Stage!") # reward_total += (reward - previous_reward) * 100 - \ # (previous_stage_time_remaining - time_remaining) reward_total += 200 # 過關之後把時間留到下一關,儲存這回合時間供下次計算過關使用 previous_time_remaining = time_remaining previous_stage_time_remaining = time_remaining # Lesson 1 repeat if reward > 6.5: # self.total_step +=1 # if self.total_step >=5: # done = True # return reward_total, previous_stage_time_remaining, done self.env.seed(np.random.randint(5)) # env.reset() done = True return reward_total, previous_stage_time_remaining, done # 假設過關的時候有順便吃到果實或鑰匙,所以預設為同時可以加成 if previous_keys > keys: # print("Get Key") reward_total += 5 if previous_time_remaining < time_remaining and previous_time_remaining != 0: # print("Get time power up") reward_total += 2 else: reward_total -= 0.1 if done and previous_time_remaining > 100: print("Agent died") # 如果剩餘時間越多就掛點,扣更多 # reward_total -= (10 + time_remaining / 100) reward_total -= 100 return reward_total, previous_stage_time_remaining, done def work(self): global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER # 設定關卡 self.env.seed(self.env_seed) self.env.floor(self.env_floor) # 只要還沒達到目標回合就LOOP while not COORD.should_stop(): # 紀錄步數 self.step += 1 # 重設關卡 obs = self.env.reset() # 初始化 done = False stage_reward = 0.0 reward = 0 keys = 0 # 檢查是否有吃到加時間的,如果是第一回合出來沒有time_remaining,事先定義 time_remaining = 3000 previous_stage_time_remaining = time_remaining # 預處理圖像 # previous_preprocessed_observation_image = np.reshape(obs[0], [-1]) previous_preprocessed_observation_image = obs[0] buffer_s, buffer_a, buffer_r = [], [], [] # 只要沒死 while not done: # 如果模型正在更新就等待更新完成 if not ROLLING_EVENT.is_set(): # 等待更新完成 ROLLING_EVENT.wait() # 清除記憶體,使用新的代理收集資料 buffer_s, buffer_a, buffer_r = [], [], [] # 儲存上一個動作狀態,供計算獎勵用 previous_keys = keys previous_reward = reward previous_time_remaining = time_remaining # 根據上一次的狀態決定動作 action = self.kprun.choose_action( previous_preprocessed_observation_image) action = np.clip(np.random.normal( action, 1.), *[6, 12]) # 做出動作,獲得場景資訊,已過關數,代理資訊 observation, reward, done, info = self.env.step( np.array(self.tableAction[int(action)])) # 預處理模型需要的資料 observation_image, keys, time_remaining = observation # preprocessed_observation_image = np.reshape( # observation_image, [-1]) preprocessed_observation_image = observation_image stage_reward, previous_stage_time_remaining, done = self.reward_compute(done=done, reward_total=stage_reward, keys=keys, previous_keys=previous_keys, reward=reward, previous_reward=previous_reward, time_remaining=time_remaining, previous_time_remaining=previous_time_remaining, previous_stage_time_remaining=previous_stage_time_remaining) # Normalize reward~不知道中文怎麼打 stage_reward = stage_reward+8 / 8 # 把這次狀態存入 記憶體 buffer_s.append(np.array([preprocessed_observation_image])) buffer_a.append(action) buffer_r.append(stage_reward) # 儲存下一步要參考的圖像 previous_preprocessed_observation_image = preprocessed_observation_image # 達到更新時,自己先做處理。 GLOBAL_UPDATE_COUNTER += 1 # 太多自己就先處理更新 if len(buffer_s) == EP_LEN - \ 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE: v_s_ = self.kprun.get_v(preprocessed_observation_image) # 計算折扣獎勵 discounted_r = [] for r in buffer_r[::-1]: v_s_ = r + GAMMA * v_s_ discounted_r.append(v_s_) discounted_r.reverse() # 整理維度 bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis] # 把資料放入共享記憶體 QUEUE.put(bs) QUEUE.put(ba) QUEUE.put(br) # 清空暫存 buffer_s, buffer_a, buffer_r = [], [], [] # 如果整個模型步數到達最小BATCH 就整個更新 if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE: # 停止收集資料 ROLLING_EVENT.clear() # 更新PPO UPDATE_EVENT.set() # 達到最多EP停止訓練 if GLOBAL_EP >= EP_MAX: COORD.request_stop() break # 紀錄獎勵 self.summary = tf.Summary( value=[ tf.Summary.Value( tag="Stage_reward " + str(self.wid), simple_value=stage_reward)]) self.kprun.train_writer.add_summary(self.summary, self.step) GLOBAL_EP += 1 print( '{0:.1f}%'.format( GLOBAL_EP / EP_MAX * 100), '|W%i' % self.wid, '|Ep_r: %.2f' % stage_reward, ) self.env.close() if __name__ == '__main__': # 建立物件 GLOBAL_KPRUN = MODEL() # GLOBAL_KPRUN.load() # 建立多執行緒 UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() # 現在不更新 UPDATE_EVENT.clear() # 設定開始 ROLLING_EVENT.set() workers = [Worker(envpath='./ObstacleTower/obstacletower.exe', wid=i, retro=False, realtime_mode=False, env_seed=0, env_floor=0) for i in range(N_WORKER)] # 觀察者 # workers.append(Worker(envpath='./ObstacleTower/obstacletower.exe', # wid=N_WORKER + 1, # retro=False, # realtime_mode=True, # env_seed=0, # env_floor=0)) GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0 GLOBAL_RUNNING_R = [] COORD = tf.train.Coordinator() # 宣告共用記憶體 QUEUE = queue.Queue() threads = [] for worker in workers: # worker threads t = threading.Thread(target=worker.work, args=()) t.start() # training threads.append(t) # 建立模型更新的執行緒 threads.append(threading.Thread(target=GLOBAL_KPRUN.update, )) threads[-1].start() COORD.join(threads) # 儲存模型 GLOBAL_KPRUN.save() time.sleep(5) # 試跑 env = ObstacleTowerEnv('./ObstacleTower/obstacletower.exe', worker_id=10, retro=False, realtime_mode=True) obs = env.reset() print("執行測試環境,如果要離開請按Q") previous_preprocessed_observation_image = np.reshape(obs[0], [-1]) while True: action = GLOBAL_KPRUN.choose_action( previous_preprocessed_observation_image) # 多執行緒會有跑不動的問題 if np.isnan(action): action = np.random.randint(6, high=12) # 做出動作,獲得場景資訊,已過關數,代理資訊 observation, reward, done, info = env.step( np.array(GLOBAL_KPRUN.tableAction[int(action)])) # 預處理模型需要的資料 observation_image, keys, time_remaining = observation preprocessed_observation_image = np.reshape( observation_image, [-1]) if 0xFF == ord('q'): break previous_preprocessed_observation_image = preprocessed_observation_image env.close() if __name__ == '__main__': # 建立物件 GLOBAL_KPRUN = MODEL() # GLOBAL_KPRUN.load() # 建立多執行緒 UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() # 現在不更新 UPDATE_EVENT.clear() # 設定開始 ROLLING_EVENT.set() workers = [Worker(envpath='./ObstacleTower/obstacletower.x86_64', wid=i, retro=False, realtime_mode=False, env_seed=np.random.randint(10), env_floor=0) for i in range(N_WORKER)] GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0 GLOBAL_RUNNING_R = [] COORD = tf.train.Coordinator() # 宣告共用記憶體 QUEUE = queue.Queue() threads = [] for worker in workers: # worker threads t = threading.Thread(target=worker.work, args=()) t.start() # training threads.append(t) # 建立模型更新的執行緒 threads.append(threading.Thread(target=GLOBAL_KPRUN.update, )) threads[-1].start() COORD.join(threads) # 儲存模型 GLOBAL_KPRUN.save()
[ "numpy.sqrt", "tensorflow.cast", "tensorflow.train.AdamOptimizer", "numpy.random.randint", "tensorflow.nn.conv2d", "numpy.linalg.svd", "numpy.clip", "numpy.reshape", "tensorflow.get_collection", "tensorflow.layers.dense", "tensorflow.Session", "tensorflow.square", "tensorflow.train.Saver", "tensorflow.matmul", "numpy.isnan", "tensorflow.train.Coordinator", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.array", "tensorflow.clip_by_value", "tensorflow.summary.FileWriter", "tensorflow.distributions.Normal", "tensorflow.reshape", "tensorflow.constant_initializer", "numpy.random.normal", "numpy.prod", "tensorflow.variable_scope", "numpy.vstack" ]
keepitpossible/backup/ck_cnnlstm_oppo.py
[(588, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (590, 'queue.Queue', 'queue.Queue', ([], {}), False, 'import queue\n'), (602, 'time.sleep', 'time.sleep', (['(5)'], {}), False, 'import time\n'), (604, 'obstacle_tower_env.ObstacleTowerEnv', 'ObstacleTowerEnv', (['"""./ObstacleTower/obstacletower.exe"""'], {'worker_id': '(10)', 'retro': '(False)', 'realtime_mode': '(True)'}), False, 'from obstacle_tower_env import ObstacleTowerEnv\n'), (610, 'numpy.reshape', 'np.reshape', (['obs[0]', '[-1]'], {}), True, 'import numpy as np\n'), (649, 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), True, 'import tensorflow as tf\n'), (651, 'queue.Queue', 'queue.Queue', ([], {}), False, 'import queue\n'), (75, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 84, 84, 3]', '"""state"""'], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.reshape', 'tf.reshape', (['c3', '[-1, nh]'], {}), True, 'import tensorflow as tf\n'), (106, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""discounted_r"""'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pre_s', 'units': '(100)', 'activation': 'tf.nn.relu', 'name': '"""l1"""'}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'l1', 'units': '(1)', 'activation': 'None', 'name': '"""V"""'}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, action_dim]', '"""action"""'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""advantage"""'], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""logs/"""', 'self.sess.graph'], {}), True, 'import tensorflow as tf\n'), (311, 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'name'}), True, 'import tensorflow as tf\n'), (319, 'numpy.clip', 'np.clip', (['a', 'ACTION_BOUND[0]', 'ACTION_BOUND[1]'], {}), True, 'import numpy as np\n'), (327, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (331, 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), True, 'import tensorflow as tf\n'), (345, 'obstacle_tower_env.ObstacleTowerEnv', 'ObstacleTowerEnv', ([], {'environment_filename': 'envpath', 'worker_id': 'wid', 'retro': 'retro', 'realtime_mode': 'realtime_mode'}), False, 'from obstacle_tower_env import ObstacleTowerEnv\n'), (565, 'threading.Event', 'threading.Event', ([], {}), False, 'import threading\n'), (565, 'threading.Event', 'threading.Event', ([], {}), False, 'import threading\n'), (593, 'threading.Thread', 'threading.Thread', ([], {'target': 'worker.work', 'args': '()'}), False, 'import threading\n'), (597, 'threading.Thread', 'threading.Thread', ([], {'target': 'GLOBAL_KPRUN.update'}), False, 'import threading\n'), (615, 'numpy.isnan', 'np.isnan', (['action'], {}), True, 'import numpy as np\n'), (622, 'numpy.reshape', 'np.reshape', (['observation_image', '[-1]'], {}), True, 'import numpy as np\n'), (634, 'threading.Event', 'threading.Event', ([], {}), False, 'import threading\n'), (634, 'threading.Event', 'threading.Event', ([], {}), False, 'import threading\n'), (654, 'threading.Thread', 'threading.Thread', ([], {'target': 'worker.work', 'args': '()'}), False, 'import threading\n'), (658, 'threading.Thread', 'threading.Thread', ([], {'target': 'GLOBAL_KPRUN.update'}), False, 'import threading\n'), (77, 'tensorflow.cast', 'tf.cast', (['self.tfs', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.square', 'tf.square', (['self.advantage'], {}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), True, 'import tensorflow as tf\n'), (221, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), True, 'import tensorflow as tf\n'), (256, 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', 'flat_shape'], {}), True, 'import numpy as np\n'), (257, 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {'full_matrices': '(False)'}), True, 'import numpy as np\n'), (266, 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.reshape', 'tf.reshape', (['c3', '[-1, nh]'], {}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pre_s', 'units': '(200)', 'activation': 'tf.nn.relu', 'name': '"""l1"""', 'trainable': 'trainable'}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'l1', 'units': 'action_dim', 'activation': 'tf.nn.softplus', 'name': '"""sigma"""', 'trainable': 'trainable'}), True, 'import tensorflow as tf\n'), (310, 'tensorflow.distributions.Normal', 'tf.distributions.Normal', ([], {'loc': 'mu', 'scale': 'sigma'}), True, 'import tensorflow as tf\n'), (616, 'numpy.random.randint', 'np.random.randint', (['(6)'], {'high': '(12)'}), True, 'import numpy as np\n'), (123, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['C_LR'], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['A_LR'], {}), True, 'import tensorflow as tf\n'), (229, 'tensorflow.reshape', 'tf.reshape', (['b', 'bshape'], {}), True, 'import tensorflow as tf\n'), (230, 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'w'], {'strides': 'strides', 'padding': 'pad', 'data_format': 'data_format'}), True, 'import tensorflow as tf\n'), (244, 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.cast', 'tf.cast', (['self.tfs', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'l1', 'units': 'action_dim', 'activation': 'tf.nn.tanh', 'name': '"""mu"""', 'trainable': 'trainable'}), True, 'import tensorflow as tf\n'), (644, 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), True, 'import numpy as np\n'), (83, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (91, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (99, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (102, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (227, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.constant_initializer', 'tf.constant_initializer', (['init_bias'], {}), True, 'import tensorflow as tf\n'), (475, 'numpy.random.normal', 'np.random.normal', (['action', '(1.0)'], {}), True, 'import numpy as np\n'), (502, 'numpy.array', 'np.array', (['[preprocessed_observation_image]'], {}), True, 'import numpy as np\n'), (151, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(1.0 - EPSILON)', '(1.0 + EPSILON)'], {}), True, 'import tensorflow as tf\n'), (253, 'numpy.prod', 'np.prod', (['shape[:-1]'], {}), True, 'import numpy as np\n'), (273, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (279, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (285, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (291, 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), True, 'import numpy as np\n'), (413, 'numpy.random.randint', 'np.random.randint', (['(5)'], {}), True, 'import numpy as np\n'), (522, 'numpy.vstack', 'np.vstack', (['buffer_s'], {}), True, 'import numpy as np\n'), (522, 'numpy.vstack', 'np.vstack', (['buffer_a'], {}), True, 'import numpy as np\n'), (522, 'numpy.array', 'np.array', (['discounted_r'], {}), True, 'import numpy as np\n')]
gyy8426/TF_concaption
7b3face47c96c885b2715605122328b7b6bef609
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A decoder for tf.SequenceExample""" import tensorflow as tf from tensorflow.contrib.slim.python.slim.data import data_decoder class TFSEquenceSplitTokensDecoder(data_decoder.DataDecoder): """A decoder for TensorFlow Examples. Decoding Example proto buffers is comprised of two stages: (1) Example parsing ,and (2) tensor manipulation. In the first stage, the tf.parse_example function is called with a list of FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse the example. The output of this stage is a set of tensors. And in this stage, the decoder will add "START" and "END" for tokens In the second stage, the resulting tensors are manipulated to provide the requested 'item' tensors. To perform this decoding operation, an ExampleDecoder is given a list of ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and contains the instructions for post_processing its tensors for stage 2. """ def __init__(self, context_keys_to_features, sequence_keys_to_features, items_to_handlers, delimiter=" ", tokens_feature_name="tokens", length_feature_name="length", prepend_token=None, append_token=None): """Constructs the decoder. Args: keys_to_features: a dictionary from TF-Example keys to either tf.VarLenFeature or tf.FixedLenFeature instances. See tensorflow's parsing_ops.py. items_to_handlers: a dictionary from items (strings) to ItemHandler instances. Note that the ItemHandler's are provided the keys that they use to return the final item Tensors. """ self.delimiter = delimiter self.tokens_feature_name = tokens_feature_name self.length_feature_name = length_feature_name self.prepend_token = prepend_token self.append_token = append_token self._context_keys_to_features = context_keys_to_features self._sequence_keys_to_features = sequence_keys_to_features self._items_to_handlers = items_to_handlers def list_items(self): """See base class.""" return list(self._items_to_handlers.keys()) def decode(self, serialized_example, items=None): """Decodes the given serialized TF-example. Args: serialized_example: a serialized TF-example tensor. items: the list of items to decode. These must be a subset of the item keys in self._items_to_handlers. If `items` is left as None, then all of the items in self._items_to_handlers are decoded. Returns: the decoded items, a list of tensor. """ context, sequence = tf.parse_single_sequence_example( serialized_example, self._context_keys_to_features, self._sequence_keys_to_features) tokens_raw = sequence[self.tokens_feature_name] tokens = tf.string_split(tokens_raw, delimiter=self.delimiter).values # Optionally prepend a special token if self.prepend_token is not None: tokens = tf.concat([[self.prepend_token], tokens], 0) # Optionally append a special token if self.append_token is not None: tokens = tf.concat([tokens, [self.append_token]], 0) sequence[self.tokens_feature_name] = tokens # Merge context and sequence features example = {} example.update(context) example.update(sequence) all_features = {} all_features.update(self._context_keys_to_features) all_features.update(self._sequence_keys_to_features) # Reshape non-sparse elements just once: for k, value in all_features.items(): if isinstance(value, tf.FixedLenFeature): example[k] = tf.reshape(example[k], value.shape) if not items: items = self._items_to_handlers.keys() outputs = [] for item in items: handler = self._items_to_handlers[item] keys_to_tensors = {key: example[key] for key in handler.keys} outputs.append(handler.tensors_to_item(keys_to_tensors)) return outputs
[ "tensorflow.string_split", "tensorflow.parse_single_sequence_example", "tensorflow.reshape", "tensorflow.concat" ]
seq2seq/data/sequence_split_tokens_decoder.py
[(75, 'tensorflow.parse_single_sequence_example', 'tf.parse_single_sequence_example', (['serialized_example', 'self._context_keys_to_features', 'self._sequence_keys_to_features'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.string_split', 'tf.string_split', (['tokens_raw'], {'delimiter': 'self.delimiter'}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.concat', 'tf.concat', (['[[self.prepend_token], tokens]', '(0)'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.concat', 'tf.concat', (['[tokens, [self.append_token]]', '(0)'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.reshape', 'tf.reshape', (['example[k]', 'value.shape'], {}), True, 'import tensorflow as tf\n')]
simonchu47/TFSegmentation
52f268523daed3f650dc21538e97f159f10c019c
""" Trainer class to train Segmentation models """ from train.basic_train import BasicTrain from metrics.metrics import Metrics from utils.reporter import Reporter from utils.misc import timeit from utils.average_meter import FPSMeter from tqdm import tqdm import numpy as np import tensorflow as tf import matplotlib import time import h5py import pickle from utils.augmentation import flip_randomly_left_right_image_with_annotation, \ scale_randomly_image_with_annotation_with_fixed_size_output import scipy.misc as misc matplotlib.use('Agg') import matplotlib.pyplot as plt # import cv2 from utils.img_utils import decode_labels from utils.seg_dataloader import SegDataLoader from tensorflow.contrib.data import Iterator import os import pdb import torchfile from data.postprocess import postprocess class Train(BasicTrain): """ Trainer class """ def __init__(self, args, sess, train_model, test_model): """ Call the constructor of the base class init summaries init loading data :param args: :param sess: :param model: :return: """ super().__init__(args, sess, train_model, test_model) ################################################################################## # Init summaries # Summary variables self.scalar_summary_tags = ['mean_iou_on_val', 'train-loss-per-epoch', 'val-loss-per-epoch', 'train-acc-per-epoch', 'val-acc-per-epoch'] self.images_summary_tags = [ ('train_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3]), ('val_prediction_sample', [None, self.params.img_height, self.params.img_width * 2, 3])] self.summary_tags = [] self.summary_placeholders = {} self.summary_ops = {} # init summaries and it's operators self.init_summaries() # Create summary writer self.summary_writer = tf.summary.FileWriter(self.args.summary_dir, self.sess.graph) ################################################################################## # Init load data and generator self.generator = None if self.args.data_mode == "experiment_tfdata": self.data_session = None self.train_next_batch, self.train_data_len = self.init_tfdata(self.args.batch_size, self.args.abs_data_dir, (self.args.img_height, self.args.img_width), mode='train') self.num_iterations_training_per_epoch = self.train_data_len // self.args.batch_size self.generator = self.train_tfdata_generator elif self.args.data_mode == "experiment_h5": self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data_h5() self.generator = self.train_h5_generator elif self.args.data_mode == "experiment_v2": self.targets_resize = self.args.targets_resize self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data(v2=True) self.generator = self.train_generator elif self.args.data_mode == "experiment": self.train_data = None self.train_data_len = None self.val_data = None self.val_data_len = None self.num_iterations_training_per_epoch = None self.num_iterations_validation_per_epoch = None self.load_train_data() self.generator = self.train_generator elif self.args.data_mode == "test_tfdata": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data() self.generator = self.test_tfdata_generator elif self.args.data_mode == "test": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data() self.generator = self.test_generator elif self.args.data_mode == "test_eval": self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.names_mapper = None self.load_test_data() self.generator = self.test_generator elif self.args.data_mode == "test_v2": self.targets_resize = self.args.targets_resize self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_val_data(v2=True) self.generator = self.test_generator elif self.args.data_mode == "video": self.args.data_mode = "test" self.test_data = None self.test_data_len = None self.num_iterations_testing_per_epoch = None self.load_vid_data() self.generator = self.test_generator elif self.args.data_mode == "debug": print("Debugging photo loading..") # self.debug_x= misc.imread('/data/menna/cityscapes/leftImg8bit/val/lindau/lindau_000048_000019_leftImg8bit.png') # self.debug_y= misc.imread('/data/menna/cityscapes/gtFine/val/lindau/lindau_000048_000019_gtFine_labelIds.png') # self.debug_x= np.expand_dims(misc.imresize(self.debug_x, (512,1024)), axis=0) # self.debug_y= np.expand_dims(misc.imresize(self.debug_y, (512,1024)), axis=0) self.debug_x = np.load('data/debug/debug_x.npy') self.debug_y = np.load('data/debug/debug_y.npy') print("Debugging photo loaded") else: print("ERROR Please select a proper data_mode BYE") exit(-1) ################################################################################## # Init metrics class self.metrics = Metrics(self.args.num_classes) # Init reporter class if self.args.mode == 'train' or 'overfit': self.reporter = Reporter(self.args.out_dir + 'report_train.json', self.args) elif self.args.mode == 'test': self.reporter = Reporter(self.args.out_dir + 'report_test.json', self.args) ################################################################################## def crop(self): sh = self.val_data['X'].shape temp_val_data = {'X': np.zeros((sh[0] * 2, sh[1], sh[2] // 2, sh[3]), self.val_data['X'].dtype), 'Y': np.zeros((sh[0] * 2, sh[1], sh[2] // 2), self.val_data['Y'].dtype)} for i in range(sh[0]): temp_val_data['X'][i * 2, :, :, :] = self.val_data['X'][i, :, :sh[2] // 2, :] temp_val_data['X'][i * 2 + 1, :, :, :] = self.val_data['X'][i, :, sh[2] // 2:, :] temp_val_data['Y'][i * 2, :, :] = self.val_data['Y'][i, :, :sh[2] // 2] temp_val_data['Y'][i * 2 + 1, :, :] = self.val_data['Y'][i, :, sh[2] // 2:] self.val_data = temp_val_data def init_tfdata(self, batch_size, main_dir, resize_shape, mode='train'): self.data_session = tf.Session() print("Creating the iterator for training data") with tf.device('/cpu:0'): segdl = SegDataLoader(main_dir, batch_size, (resize_shape[0], resize_shape[1]), resize_shape, # * 2), resize_shape, 'data/cityscapes_tfdata/train.txt') iterator = Iterator.from_structure(segdl.data_tr.output_types, segdl.data_tr.output_shapes) next_batch = iterator.get_next() self.init_op = iterator.make_initializer(segdl.data_tr) self.data_session.run(self.init_op) print("Loading Validation data in memoryfor faster training..") self.val_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} # self.crop() # import cv2 # cv2.imshow('crop1', self.val_data['X'][0,:,:,:]) # cv2.imshow('crop2', self.val_data['X'][1,:,:,:]) # cv2.imshow('seg1', self.val_data['Y'][0,:,:]) # cv2.imshow('seg2', self.val_data['Y'][1,:,:]) # cv2.waitKey() self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size # self.num_iterations_validation_per_epoch = ( # self.val_data_len + self.args.batch_size - 1) // self.args.batch_size self.num_iterations_validation_per_epoch = self.val_data_len // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") return next_batch, segdl.data_len @timeit def load_overfit_data(self): print("Loading data..") self.train_data = {'X': np.load(self.args.data_dir + "X_train.npy"), 'Y': np.load(self.args.data_dir + "Y_train.npy")} self.train_data_len = self.train_data['X'].shape[0] - self.train_data['X'].shape[0] % self.args.batch_size self.num_iterations_training_per_epoch = ( self.train_data_len + self.args.batch_size - 1) // self.args.batch_size print("Train-shape-x -- " + str(self.train_data['X'].shape)) print("Train-shape-y -- " + str(self.train_data['Y'].shape)) print("Num of iterations in one epoch -- " + str(self.num_iterations_training_per_epoch)) print("Overfitting data is loaded") print("Loading Validation data..") self.val_data = self.train_data self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size self.num_iterations_validation_per_epoch = ( self.val_data_len + self.args.batch_size - 1) // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") def overfit_generator(self): start = 0 new_epoch_flag = True idx = None while True: # init index array if it is a new_epoch if new_epoch_flag: if self.args.shuffle: idx = np.random.choice(self.train_data_len, self.train_data_len, replace=False) else: idx = np.arange(self.train_data_len) new_epoch_flag = False # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.train_data['X'][mask] y_batch = self.train_data['Y'][mask] start += self.args.batch_size if start >= self.train_data_len: start = 0 new_epoch_flag = True yield x_batch, y_batch def init_summaries(self): """ Create the summary part of the graph :return: """ with tf.variable_scope('train-summary-per-epoch'): for tag in self.scalar_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag) self.summary_ops[tag] = tf.summary.scalar(tag, self.summary_placeholders[tag]) for tag, shape in self.images_summary_tags: self.summary_tags += tag self.summary_placeholders[tag] = tf.placeholder('float32', shape, name=tag) self.summary_ops[tag] = tf.summary.image(tag, self.summary_placeholders[tag], max_outputs=10) def add_summary(self, step, summaries_dict=None, summaries_merged=None): """ Add the summaries to tensorboard :param step: :param summaries_dict: :param summaries_merged: :return: """ if summaries_dict is not None: summary_list = self.sess.run([self.summary_ops[tag] for tag in summaries_dict.keys()], {self.summary_placeholders[tag]: value for tag, value in summaries_dict.items()}) for summary in summary_list: self.summary_writer.add_summary(summary, step) if summaries_merged is not None: self.summary_writer.add_summary(summaries_merged, step) @timeit def load_train_data(self, v2=False): print("Loading Training data..") self.train_data = {'X': np.load(self.args.data_dir + "X_train.npy"), 'Y': np.load(self.args.data_dir + "Y_train.npy")} self.train_data = self.resize(self.train_data) if v2: out_shape = (self.train_data['Y'].shape[1] // self.targets_resize, self.train_data['Y'].shape[2] // self.targets_resize) yy = np.zeros((self.train_data['Y'].shape[0], out_shape[0], out_shape[1]), dtype=self.train_data['Y'].dtype) for y in range(self.train_data['Y'].shape[0]): yy[y, ...] = misc.imresize(self.train_data['Y'][y, ...], out_shape, interp='nearest') self.train_data['Y'] = yy self.train_data_len = self.train_data['X'].shape[0] self.num_iterations_training_per_epoch = ( self.train_data_len + self.args.batch_size - 1) // self.args.batch_size print("Train-shape-x -- " + str(self.train_data['X'].shape) + " " + str(self.train_data_len)) print("Train-shape-y -- " + str(self.train_data['Y'].shape)) print("Num of iterations on training data in one epoch -- " + str(self.num_iterations_training_per_epoch)) print("Training data is loaded") print("Loading Validation data..") self.val_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} self.val_data['Y_large'] = self.val_data['Y'] if v2: out_shape = (self.val_data['Y'].shape[1] // self.targets_resize, self.val_data['Y'].shape[2] // self.targets_resize) yy = np.zeros((self.val_data['Y'].shape[0], out_shape[0], out_shape[1]), dtype=self.train_data['Y'].dtype) for y in range(self.val_data['Y'].shape[0]): yy[y, ...] = misc.imresize(self.val_data['Y'][y, ...], out_shape, interp='nearest') self.val_data['Y'] = yy self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size self.num_iterations_validation_per_epoch = ( self.val_data_len + self.args.batch_size - 1) // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") @timeit def load_train_data_h5(self): print("Loading Training data..") self.train_data = h5py.File(self.args.data_dir + self.args.h5_train_file, 'r') self.train_data_len = self.args.h5_train_len self.num_iterations_training_per_epoch = ( self.train_data_len + self.args.batch_size - 1) // self.args.batch_size print("Train-shape-x -- " + str(self.train_data['X'].shape) + " " + str(self.train_data_len)) print("Train-shape-y -- " + str(self.train_data['Y'].shape)) print("Num of iterations on training data in one epoch -- " + str(self.num_iterations_training_per_epoch)) print("Training data is loaded") print("Loading Validation data..") self.val_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} self.val_data_len = self.val_data['X'].shape[0] - self.val_data['X'].shape[0] % self.args.batch_size self.num_iterations_validation_per_epoch = ( self.val_data_len + self.args.batch_size - 1) // self.args.batch_size print("Val-shape-x -- " + str(self.val_data['X'].shape) + " " + str(self.val_data_len)) print("Val-shape-y -- " + str(self.val_data['Y'].shape)) print("Num of iterations on validation data in one epoch -- " + str(self.num_iterations_validation_per_epoch)) print("Validation data is loaded") @timeit def load_vid_data(self): print("Loading Video data..") self.test_data = {'X': np.load(self.args.data_dir + "X_vid.npy")} self.test_data['Y'] = np.zeros(self.test_data['X'].shape[:3]) self.test_data_len = self.test_data['X'].shape[0] print("Vid-shape-x -- " + str(self.test_data['X'].shape)) print("Vid-shape-y -- " + str(self.test_data['Y'].shape)) self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size print("Video data is loaded") @timeit def load_val_data(self, v2=False): print("Loading Validation data..") self.test_data = {'X': np.load(self.args.data_dir + "X_val.npy"), 'Y': np.load(self.args.data_dir + "Y_val.npy")} self.test_data = self.resize(self.test_data) self.test_data['Y_large'] = self.test_data['Y'] if v2: out_shape = (self.test_data['Y'].shape[1] // self.targets_resize, self.test_data['Y'].shape[2] // self.targets_resize) yy = np.zeros((self.test_data['Y'].shape[0], out_shape[0], out_shape[1]), dtype=self.test_data['Y'].dtype) for y in range(self.test_data['Y'].shape[0]): yy[y, ...] = misc.imresize(self.test_data['Y'][y, ...], out_shape, interp='nearest') self.test_data['Y'] = yy self.test_data_len = self.test_data['X'].shape[0] - self.test_data['X'].shape[0] % self.args.batch_size print("Validation-shape-x -- " + str(self.test_data['X'].shape)) print("Validation-shape-y -- " + str(self.test_data['Y'].shape)) self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size print("Validation data is loaded") @timeit def load_test_data(self): print("Loading Testing data..") self.test_data = {'X': np.load(self.args.data_dir + "X_test.npy")} self.names_mapper = {'X': np.load(self.args.data_dir + "xnames_test.npy"), 'Y': np.load(self.args.data_dir + "ynames_test.npy")} self.test_data_len = self.test_data['X'].shape[0] - self.test_data['X'].shape[0] % self.args.batch_size print("Test-shape-x -- " + str(self.test_data['X'].shape)) self.num_iterations_testing_per_epoch = (self.test_data_len + self.args.batch_size - 1) // self.args.batch_size print("Test data is loaded") def test_generator(self): start = 0 new_epoch_flag = True idx = None while True: # init index array if it is a new_epoch if new_epoch_flag: if self.args.shuffle: idx = np.random.choice(self.test_data_len, self.test_data_len, replace=False) else: idx = np.arange(self.test_data_len) new_epoch_flag = False # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.test_data['X'][mask] y_batch = self.test_data['Y'][mask] # update start idx start += self.args.batch_size if start >= self.test_data_len: start = 0 new_epoch_flag = True yield x_batch, y_batch def train_generator(self): start = 0 idx = np.random.choice(self.train_data_len, self.num_iterations_training_per_epoch * self.args.batch_size, replace=True) while True: # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.train_data['X'][mask] y_batch = self.train_data['Y'][mask] # update start idx start += self.args.batch_size yield x_batch, y_batch if start >= self.train_data_len: return def train_tfdata_generator(self): with tf.device('/cpu:0'): while True: x_batch, y_batch = self.data_session.run(self.train_next_batch) yield x_batch, y_batch[:, :, :, 0] def train_h5_generator(self): start = 0 idx = np.random.choice(self.train_data_len, self.train_data_len, replace=False) while True: # select the mini_batches mask = idx[start:start + self.args.batch_size] x_batch = self.train_data['X'][sorted(mask.tolist())] y_batch = self.train_data['Y'][sorted(mask.tolist())] # update start idx start += self.args.batch_size if start >= self.train_data_len: return yield x_batch, y_batch def resize(self, data): X = [] Y = [] for i in range(data['X'].shape[0]): X.append(misc.imresize(data['X'][i, ...], (self.args.img_height, self.args.img_width))) Y.append(misc.imresize(data['Y'][i, ...], (self.args.img_height, self.args.img_width), 'nearest')) data['X'] = np.asarray(X) data['Y'] = np.asarray(Y) return data def train(self): print("Training mode will begin NOW ..") # curr_lr= self.model.args.learning_rate for cur_epoch in range(self.model.global_epoch_tensor.eval(self.sess) + 1, self.args.num_epochs + 1, 1): # init tqdm and get the epoch value tt = tqdm(self.generator(), total=self.num_iterations_training_per_epoch, desc="epoch-" + str(cur_epoch) + "-") # init the current iterations cur_iteration = 0 # init acc and loss lists loss_list = [] acc_list = [] # loop by the number of iterations for x_batch, y_batch in tt: # get the cur_it for the summary cur_it = self.model.global_step_tensor.eval(self.sess) # Feed this variables to the network feed_dict = {self.model.x_pl: x_batch, self.model.y_pl: y_batch, self.model.is_training: True # self.model.curr_learning_rate:curr_lr } # Run the feed forward but the last iteration finalize what you want to do if cur_iteration < self.num_iterations_training_per_epoch - 1: # run the feed_forward _, loss, acc, summaries_merged = self.sess.run( [self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries], feed_dict=feed_dict) # log loss and acc loss_list += [loss] acc_list += [acc] # summarize # self.add_summary(cur_it, summaries_merged=summaries_merged) else: # run the feed_forward if self.args.data_mode == 'experiment_v2': _, loss, acc, summaries_merged = self.sess.run( [self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries], feed_dict=feed_dict) else: _, loss, acc, summaries_merged, segmented_imgs = self.sess.run( [self.model.train_op, self.model.loss, self.model.accuracy, self.model.merged_summaries, self.model.segmented_summary], feed_dict=feed_dict) # log loss and acc loss_list += [loss] acc_list += [acc] total_loss = np.mean(loss_list) total_acc = np.mean(acc_list) # summarize summaries_dict = dict() summaries_dict['train-loss-per-epoch'] = total_loss summaries_dict['train-acc-per-epoch'] = total_acc if self.args.data_mode != 'experiment_v2': summaries_dict['train_prediction_sample'] = segmented_imgs # self.add_summary(cur_it, summaries_dict=summaries_dict, summaries_merged=summaries_merged) # report self.reporter.report_experiment_statistics('train-acc', 'epoch-' + str(cur_epoch), str(total_acc)) self.reporter.report_experiment_statistics('train-loss', 'epoch-' + str(cur_epoch), str(total_loss)) self.reporter.finalize() # Update the Global step self.model.global_step_assign_op.eval(session=self.sess, feed_dict={self.model.global_step_input: cur_it + 1}) # Update the Cur Epoch tensor # it is the last thing because if it is interrupted it repeat this self.model.global_epoch_assign_op.eval(session=self.sess, feed_dict={self.model.global_epoch_input: cur_epoch + 1}) # print in console tt.close() print("epoch-" + str(cur_epoch) + "-" + "loss:" + str(total_loss) + "-" + " acc:" + str(total_acc)[ :6]) # Break the loop to finalize this epoch break # Update the Global step self.model.global_step_assign_op.eval(session=self.sess, feed_dict={self.model.global_step_input: cur_it + 1}) # update the cur_iteration cur_iteration += 1 # Save the current checkpoint if cur_epoch % self.args.save_every == 0: self.save_model() # Test the model on validation if cur_epoch % self.args.test_every == 0: self.test_per_epoch(step=self.model.global_step_tensor.eval(self.sess), epoch=self.model.global_epoch_tensor.eval(self.sess)) # if cur_epoch % self.args.learning_decay_every == 0: # curr_lr= curr_lr*self.args.learning_decay # print('Current learning rate is ', curr_lr) print("Training Finished") def test_per_epoch(self, step, epoch): print("Validation at step:" + str(step) + " at epoch:" + str(epoch) + " ..") # init tqdm and get the epoch value tt = tqdm(range(self.num_iterations_validation_per_epoch), total=self.num_iterations_validation_per_epoch, desc="Val-epoch-" + str(epoch) + "-") # init acc and loss lists loss_list = [] acc_list = [] inf_list = [] # idx of minibatch idx = 0 # reset metrics self.metrics.reset() # get the maximum iou to compare with and save the best model max_iou = self.model.best_iou_tensor.eval(self.sess) # loop by the number of iterations for cur_iteration in tt: # load minibatches x_batch = self.val_data['X'][idx:idx + self.args.batch_size] y_batch = self.val_data['Y'][idx:idx + self.args.batch_size] if self.args.data_mode == 'experiment_v2': y_batch_large = self.val_data['Y_large'][idx:idx + self.args.batch_size] # update idx of minibatch idx += self.args.batch_size # Feed this variables to the network feed_dict = {self.model.x_pl: x_batch, self.model.y_pl: y_batch, self.model.is_training: False } # Run the feed forward but the last iteration finalize what you want to do if cur_iteration < self.num_iterations_validation_per_epoch - 1: start = time.time() # run the feed_forward out_argmax, loss, acc, summaries_merged = self.sess.run( [self.model.out_argmax, self.model.loss, self.model.accuracy, self.model.merged_summaries], feed_dict=feed_dict) end = time.time() # log loss and acc loss_list += [loss] acc_list += [acc] inf_list += [end - start] if self.args.data_mode == 'experiment_v2': yy = np.zeros((out_argmax.shape[0], y_batch_large.shape[1], y_batch_large.shape[2]), dtype=np.uint32) out_argmax = np.asarray(out_argmax, dtype=np.uint8) for y in range(out_argmax.shape[0]): yy[y, ...] = misc.imresize(out_argmax[y, ...], y_batch_large.shape[1:], interp='nearest') y_batch = y_batch_large out_argmax = yy # log metrics self.metrics.update_metrics_batch(out_argmax, y_batch) else: start = time.time() # run the feed_forward if self.args.data_mode == 'experiment_v2': # Issues in concatenating gt and img with diff sizes now for segmented_imgs out_argmax, acc = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy], feed_dict=feed_dict) else: out_argmax, acc, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy, self.test_model.segmented_summary], feed_dict=feed_dict) end = time.time() # log loss and acc acc_list += [acc] inf_list += [end - start] # log metrics self.metrics.update_metrics_batch(out_argmax, y_batch) # mean over batches total_acc = np.mean(acc_list) mean_iou = self.metrics.compute_final_metrics(self.num_iterations_validation_per_epoch) mean_iou_arr = self.metrics.iou mean_inference = str(np.mean(inf_list)) + '-seconds' # summarize summaries_dict = dict() summaries_dict['val-acc-per-epoch'] = total_acc summaries_dict['mean_iou_on_val'] = mean_iou if self.args.data_mode != 'experiment_v2': # Issues in concatenating gt and img with diff sizes now for segmented_imgs summaries_dict['val_prediction_sample'] = segmented_imgs # self.add_summary(step, summaries_dict=summaries_dict, summaries_merged=summaries_merged) # report self.reporter.report_experiment_statistics('validation-acc', 'epoch-' + str(epoch), str(total_acc)) self.reporter.report_experiment_statistics('avg_inference_time_on_validation', 'epoch-' + str(epoch), str(mean_inference)) self.reporter.report_experiment_validation_iou('epoch-' + str(epoch), str(mean_iou), mean_iou_arr) self.reporter.finalize() # print in console tt.close() print("Val-epoch-" + str(epoch) + "-" + "acc:" + str(total_acc)[:6] + "-mean_iou:" + str(mean_iou)) print("Last_max_iou: " + str(max_iou)) if mean_iou > max_iou: print("This validation got a new best iou. so we will save this one") # save the best model self.save_best_model() # Set the new maximum self.model.best_iou_assign_op.eval(session=self.sess, feed_dict={self.model.best_iou_input: mean_iou}) else: print("hmm not the best validation epoch :/..") break # Break the loop to finalize this epoch def linknet_postprocess(self, gt): gt2 = gt - 1 gt2[gt == -1] = 19 return gt2 def test(self, pkl=False): print("Testing mode will begin NOW..") # load the best model checkpoint to test on it if not pkl: self.load_best_model() # init tqdm and get the epoch value tt = tqdm(range(self.test_data_len)) # naming = np.load(self.args.data_dir + 'names_train.npy') # init acc and loss lists acc_list = [] img_list = [] # idx of image idx = 0 # reset metrics self.metrics.reset() # loop by the number of iterations for cur_iteration in tt: # load mini_batches x_batch = self.test_data['X'][idx:idx + 1] y_batch = self.test_data['Y'][idx:idx + 1] if self.args.data_mode == 'test_v2': y_batch_large = self.test_data['Y_large'][idx:idx + 1] idx += 1 # Feed this variables to the network if self.args.random_cropping: feed_dict = {self.test_model.x_pl_before: x_batch, self.test_model.y_pl_before: y_batch, self.test_model.is_training: False, } else: feed_dict = {self.test_model.x_pl: x_batch, self.test_model.y_pl: y_batch, self.test_model.is_training: False } # run the feed_forward if self.args.data_mode == 'test_v2': out_argmax, acc = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy], feed_dict=feed_dict) else: out_argmax, acc, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.accuracy, # self.test_model.merged_summaries, self.test_model.segmented_summary], self.test_model.segmented_summary], feed_dict=feed_dict) if self.args.data_mode == 'test_v2': yy = np.zeros((out_argmax.shape[0], y_batch_large.shape[1], y_batch_large.shape[2]), dtype=np.uint32) out_argmax = np.asarray(out_argmax, dtype=np.uint8) for y in range(out_argmax.shape[0]): yy[y, ...] = misc.imresize(out_argmax[y, ...], y_batch_large.shape[1:], interp='nearest') y_batch = y_batch_large out_argmax = yy if pkl: out_argmax[0] = self.linknet_postprocess(out_argmax[0]) segmented_imgs = decode_labels(out_argmax, 20) # print('mean preds ', out_argmax.mean()) # np.save(self.args.out_dir + 'npy/' + str(cur_iteration) + '.npy', out_argmax[0]) if self.args.data_mode == 'test': plt.imsave(self.args.out_dir + 'imgs/' + 'test_' + str(cur_iteration) + '.png', segmented_imgs[0]) # log loss and acc acc_list += [acc] # log metrics if self.args.random_cropping: y1 = np.expand_dims(y_batch[0, :, :512], axis=0) y2 = np.expand_dims(y_batch[0, :, 512:], axis=0) y_batch = np.concatenate((y1, y2), axis=0) self.metrics.update_metrics(out_argmax, y_batch, 0, 0) else: self.metrics.update_metrics(out_argmax[0], y_batch[0], 0, 0) # mean over batches total_loss = 0 total_acc = np.mean(acc_list) mean_iou = self.metrics.compute_final_metrics(self.test_data_len) # print in console tt.close() print("Here the statistics") print("Total_loss: " + str(total_loss)) print("Total_acc: " + str(total_acc)[:6]) print("mean_iou: " + str(mean_iou)) print("Plotting imgs") for i in range(len(img_list)): plt.imsave(self.args.imgs_dir + 'test_' + str(i) + '.png', img_list[i]) def test_eval(self, pkl=False): print("Testing mode will begin NOW..") # load the best model checkpoint to test on it if not pkl: self.load_best_model() # init tqdm and get the epoch value tt = tqdm(range(self.test_data_len)) # idx of image idx = 0 # loop by the number of iterations for cur_iteration in tt: # load mini_batches x_batch = self.test_data['X'][idx:idx + 1] # Feed this variables to the network if self.args.random_cropping: feed_dict = {self.test_model.x_pl_before: x_batch, self.test_model.is_training: False, } else: feed_dict = {self.test_model.x_pl: x_batch, self.test_model.is_training: False } # run the feed_forward out_argmax, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.segmented_summary], feed_dict=feed_dict) if pkl: out_argmax[0] = self.linknet_postprocess(out_argmax[0]) segmented_imgs = decode_labels(out_argmax, 20) # Colored results for visualization colored_save_path = self.args.out_dir + 'imgs/' + str(self.names_mapper['Y'][idx]) if not os.path.exists(os.path.dirname(colored_save_path)): os.makedirs(os.path.dirname(colored_save_path)) plt.imsave(colored_save_path, segmented_imgs[0]) # Results for official evaluation save_path = self.args.out_dir + 'results/' + str(self.names_mapper['Y'][idx]) if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) output = postprocess(out_argmax[0]) misc.imsave(save_path, misc.imresize(output, [1024, 2048], 'nearest')) idx += 1 # print in console tt.close() def test_inference(self): """ Like the testing function but this one is for calculate the inference time and measure the frame per second """ print("INFERENCE mode will begin NOW..") # load the best model checkpoint to test on it self.load_best_model() # output_node: network/output/Argmax # input_node: network/input/Placeholder # for n in tf.get_default_graph().as_graph_def().node: # if 'input' in n.name:#if 'Argmax' in n.name: # import pdb; pdb.set_trace() print("Saving graph...") tf.train.write_graph(self.sess.graph_def, ".", 'graph.pb') print("Graph saved successfully.\n\n") exit(1) # init tqdm and get the epoch value tt = tqdm(range(self.test_data_len)) # idx of image idx = 0 # create the FPS Meter fps_meter = FPSMeter() # loop by the number of iterations for cur_iteration in tt: # load mini_batches x_batch = self.test_data['X'][idx:idx + 1] y_batch = self.test_data['Y'][idx:idx + 1] # update idx of mini_batch idx += 1 # Feed this variables to the network if self.args.random_cropping: feed_dict = {self.test_model.x_pl_before: x_batch, self.test_model.y_pl_before: y_batch # self.test_model.is_training: False, } else: feed_dict = {self.test_model.x_pl: x_batch, self.test_model.y_pl: y_batch # self.test_model.is_training: False } # calculate the time of one inference start = time.time() # run the feed_forward _ = self.sess.run( [self.test_model.out_argmax], feed_dict=feed_dict) # update the FPS meter fps_meter.update(time.time() - start) fps_meter.print_statistics() def finalize(self): self.reporter.finalize() self.summary_writer.close() self.save_model() def debug_layers(self): """ This function will be responsible for output all outputs of all layers and dump them in a pickle :return: """ print("Debugging mode will begin NOW..") layers = tf.get_collection('debug_layers') print("ALL Layers in the collection that i wanna to run {} layer".format(len(layers))) for layer in layers: print(layer) # exit(0) # reset metrics self.metrics.reset() print('mean image ', self.debug_x.mean()) print('mean gt ', self.debug_y.mean()) self.debug_y = self.linknet_preprocess_gt(self.debug_y) feed_dict = {self.test_model.x_pl: self.debug_x, self.test_model.y_pl: self.debug_y, self.test_model.is_training: False } # var = [v for v in tf.all_variables() if v.op.name == "network/decoder_block_4/deconv/deconv/weights"] # conv_w= self.sess.run(var[0]) # var = [v for v in tf.all_variables() if v.op.name == "network/decoder_block_4/deconv/deconv/biases"] # bias= self.sess.run(var[0]) # run the feed_forward out_layers = self.sess.run(layers, feed_dict=feed_dict) for layer in out_layers: print(layer.shape) # dict_out= torchfile.load('out_networks_layers/dict_out.t7') ## init= tf.constant_initializer(conv_w) ## conv_w1 = tf.get_variable('my_weights', [3,3,128,128], tf.float32, initializer=init, trainable=True) # pp= tf.nn.relu(layers[39]) # out_relu= self.sess.run(pp, feed_dict={self.test_model.x_pl: self.debug_x, # self.test_model.y_pl: self.debug_y, # self.test_model.is_training: False # }) ## pp = tf.nn.conv2d_transpose(layers[39], conv_w, (1,32,64,128), strides=(1,2,2,1), padding="SAME") ## pp= tf.image.resize_images(layers[39], (32,64)) ## pp = tf.nn.conv2d(pp, conv_w, strides=(1,1,1,1), padding="SAME") ## bias1= tf.get_variable('my_bias', 128, tf.float32, tf.constant_initializer(bias)) # pp = tf.nn.bias_add(pp, bias) # #self.sess.run(conv_w1.initializer) # #self.sess.run(bias1.initializer) # out_deconv= self.sess.run(pp, feed_dict={self.test_model.x_pl: self.debug_x, # self.test_model.y_pl: self.debug_y, # self.test_model.is_training: False # }) # out_deconv_direct= self.sess.run(layers[40], feed_dict={self.test_model.x_pl: self.debug_x, # self.test_model.y_pl: self.debug_y, # self.test_model.is_training: False # }) # pdb.set_trace() # print(out_layers) # exit(0) # dump them in a pickle with open("out_networks_layers/out_linknet_layers.pkl", "wb") as f: pickle.dump(out_layers, f, protocol=2) # run the feed_forward again to see argmax and segmented out_argmax, segmented_imgs = self.sess.run( [self.test_model.out_argmax, self.test_model.segmented_summary], feed_dict=feed_dict) print('mean preds ', out_argmax[0].mean()) plt.imsave(self.args.out_dir + 'imgs/' + 'debug.png', segmented_imgs[0]) self.metrics.update_metrics(out_argmax[0], self.debug_y, 0, 0) mean_iou = self.metrics.compute_final_metrics(1) print("mean_iou_of_debug: " + str(mean_iou))
[ "tensorflow.device", "numpy.expand_dims", "numpy.asarray", "numpy.concatenate", "numpy.mean", "tensorflow.summary.scalar", "tensorflow.get_collection", "tensorflow.summary.image", "numpy.arange", "tensorflow.Session", "numpy.load", "numpy.zeros", "tensorflow.train.write_graph", "matplotlib.pyplot.imsave", "numpy.random.choice", "tensorflow.placeholder", "scipy.misc.imresize", "tensorflow.summary.FileWriter", "tensorflow.contrib.data.Iterator.from_structure", "matplotlib.use", "tensorflow.variable_scope" ]
train/train.py
[(22, 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), False, 'import matplotlib\n'), (67, 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.args.summary_dir', 'self.sess.graph'], {}), True, 'import tensorflow as tf\n'), (153, 'metrics.metrics.Metrics', 'Metrics', (['self.args.num_classes'], {}), False, 'from metrics.metrics import Metrics\n'), (174, 'tensorflow.Session', 'tf.Session', ([], {}), True, 'import tensorflow as tf\n'), (336, 'h5py.File', 'h5py.File', (['(self.args.data_dir + self.args.h5_train_file)', '"""r"""'], {}), False, 'import h5py\n'), (360, 'numpy.zeros', 'np.zeros', (["self.test_data['X'].shape[:3]"], {}), True, 'import numpy as np\n'), (428, 'numpy.random.choice', 'np.random.choice', (['self.train_data_len', '(self.num_iterations_training_per_epoch * self.args.batch_size)'], {'replace': '(True)'}), True, 'import numpy as np\n'), (452, 'numpy.random.choice', 'np.random.choice', (['self.train_data_len', 'self.train_data_len'], {'replace': '(False)'}), True, 'import numpy as np\n'), (474, 'numpy.asarray', 'np.asarray', (['X'], {}), True, 'import numpy as np\n'), (475, 'numpy.asarray', 'np.asarray', (['Y'], {}), True, 'import numpy as np\n'), (802, 'numpy.mean', 'np.mean', (['acc_list'], {}), True, 'import numpy as np\n'), (888, 'tensorflow.train.write_graph', 'tf.train.write_graph', (['self.sess.graph_def', '"""."""', '"""graph.pb"""'], {}), True, 'import tensorflow as tf\n'), (899, 'utils.average_meter.FPSMeter', 'FPSMeter', ([], {}), False, 'from utils.average_meter import FPSMeter\n'), (948, 'tensorflow.get_collection', 'tf.get_collection', (['"""debug_layers"""'], {}), True, 'import tensorflow as tf\n'), (1018, 'matplotlib.pyplot.imsave', 'plt.imsave', (["(self.args.out_dir + 'imgs/' + 'debug.png')", 'segmented_imgs[0]'], {}), True, 'import matplotlib.pyplot as plt\n'), (156, 'utils.reporter.Reporter', 'Reporter', (["(self.args.out_dir + 'report_train.json')", 'self.args'], {}), False, 'from utils.reporter import Reporter\n'), (163, 'numpy.zeros', 'np.zeros', (['(sh[0] * 2, sh[1], sh[2] // 2, sh[3])', "self.val_data['X'].dtype"], {}), True, 'import numpy as np\n'), (164, 'numpy.zeros', 'np.zeros', (['(sh[0] * 2, sh[1], sh[2] // 2)', "self.val_data['Y'].dtype"], {}), True, 'import numpy as np\n'), (176, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (177, 'utils.seg_dataloader.SegDataLoader', 'SegDataLoader', (['main_dir', 'batch_size', '(resize_shape[0], resize_shape[1])', 'resize_shape', '"""data/cityscapes_tfdata/train.txt"""'], {}), False, 'from utils.seg_dataloader import SegDataLoader\n'), (180, 'tensorflow.contrib.data.Iterator.from_structure', 'Iterator.from_structure', (['segdl.data_tr.output_types', 'segdl.data_tr.output_shapes'], {}), False, 'from tensorflow.contrib.data import Iterator\n'), (187, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_val.npy')"], {}), True, 'import numpy as np\n'), (188, 'numpy.load', 'np.load', (["(self.args.data_dir + 'Y_val.npy')"], {}), True, 'import numpy as np\n'), (212, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_train.npy')"], {}), True, 'import numpy as np\n'), (213, 'numpy.load', 'np.load', (["(self.args.data_dir + 'Y_train.npy')"], {}), True, 'import numpy as np\n'), (262, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""train-summary-per-epoch"""'], {}), True, 'import tensorflow as tf\n'), (292, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_train.npy')"], {}), True, 'import numpy as np\n'), (293, 'numpy.load', 'np.load', (["(self.args.data_dir + 'Y_train.npy')"], {}), True, 'import numpy as np\n'), (299, 'numpy.zeros', 'np.zeros', (["(self.train_data['Y'].shape[0], out_shape[0], out_shape[1])"], {'dtype': "self.train_data['Y'].dtype"}), True, 'import numpy as np\n'), (314, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_val.npy')"], {}), True, 'import numpy as np\n'), (315, 'numpy.load', 'np.load', (["(self.args.data_dir + 'Y_val.npy')"], {}), True, 'import numpy as np\n'), (320, 'numpy.zeros', 'np.zeros', (["(self.val_data['Y'].shape[0], out_shape[0], out_shape[1])"], {'dtype': "self.train_data['Y'].dtype"}), True, 'import numpy as np\n'), (346, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_val.npy')"], {}), True, 'import numpy as np\n'), (347, 'numpy.load', 'np.load', (["(self.args.data_dir + 'Y_val.npy')"], {}), True, 'import numpy as np\n'), (359, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_vid.npy')"], {}), True, 'import numpy as np\n'), (370, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_val.npy')"], {}), True, 'import numpy as np\n'), (371, 'numpy.load', 'np.load', (["(self.args.data_dir + 'Y_val.npy')"], {}), True, 'import numpy as np\n'), (377, 'numpy.zeros', 'np.zeros', (["(self.test_data['Y'].shape[0], out_shape[0], out_shape[1])"], {'dtype': "self.test_data['Y'].dtype"}), True, 'import numpy as np\n'), (391, 'numpy.load', 'np.load', (["(self.args.data_dir + 'X_test.npy')"], {}), True, 'import numpy as np\n'), (392, 'numpy.load', 'np.load', (["(self.args.data_dir + 'xnames_test.npy')"], {}), True, 'import numpy as np\n'), (393, 'numpy.load', 'np.load', (["(self.args.data_dir + 'ynames_test.npy')"], {}), True, 'import numpy as np\n'), (445, 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), True, 'import tensorflow as tf\n'), (858, 'matplotlib.pyplot.imsave', 'plt.imsave', (['colored_save_path', 'segmented_imgs[0]'], {}), True, 'import matplotlib.pyplot as plt\n'), (864, 'data.postprocess.postprocess', 'postprocess', (['out_argmax[0]'], {}), False, 'from data.postprocess import postprocess\n'), (923, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (1008, 'pickle.dump', 'pickle.dump', (['out_layers', 'f'], {'protocol': '(2)'}), False, 'import pickle\n'), (158, 'utils.reporter.Reporter', 'Reporter', (["(self.args.out_dir + 'report_test.json')", 'self.args'], {}), False, 'from utils.reporter import Reporter\n'), (265, 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', 'None'], {'name': 'tag'}), True, 'import tensorflow as tf\n'), (266, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['tag', 'self.summary_placeholders[tag]'], {}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', 'shape'], {'name': 'tag'}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.summary.image', 'tf.summary.image', (['tag', 'self.summary_placeholders[tag]'], {'max_outputs': '(10)'}), True, 'import tensorflow as tf\n'), (301, 'scipy.misc.imresize', 'misc.imresize', (["self.train_data['Y'][y, ...]", 'out_shape'], {'interp': '"""nearest"""'}), True, 'import scipy.misc as misc\n'), (322, 'scipy.misc.imresize', 'misc.imresize', (["self.val_data['Y'][y, ...]", 'out_shape'], {'interp': '"""nearest"""'}), True, 'import scipy.misc as misc\n'), (379, 'scipy.misc.imresize', 'misc.imresize', (["self.test_data['Y'][y, ...]", 'out_shape'], {'interp': '"""nearest"""'}), True, 'import scipy.misc as misc\n'), (472, 'scipy.misc.imresize', 'misc.imresize', (["data['X'][i, ...]", '(self.args.img_height, self.args.img_width)'], {}), True, 'import scipy.misc as misc\n'), (473, 'scipy.misc.imresize', 'misc.imresize', (["data['Y'][i, ...]", '(self.args.img_height, self.args.img_width)', '"""nearest"""'], {}), True, 'import scipy.misc as misc\n'), (631, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (638, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (656, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (667, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (674, 'numpy.mean', 'np.mean', (['acc_list'], {}), True, 'import numpy as np\n'), (772, 'numpy.zeros', 'np.zeros', (['(out_argmax.shape[0], y_batch_large.shape[1], y_batch_large.shape[2])'], {'dtype': 'np.uint32'}), True, 'import numpy as np\n'), (773, 'numpy.asarray', 'np.asarray', (['out_argmax'], {'dtype': 'np.uint8'}), True, 'import numpy as np\n'), (781, 'utils.img_utils.decode_labels', 'decode_labels', (['out_argmax', '(20)'], {}), False, 'from utils.img_utils import decode_labels\n'), (793, 'numpy.expand_dims', 'np.expand_dims', (['y_batch[(0), :, :512]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (794, 'numpy.expand_dims', 'np.expand_dims', (['y_batch[(0), :, 512:]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (795, 'numpy.concatenate', 'np.concatenate', (['(y1, y2)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (852, 'utils.img_utils.decode_labels', 'decode_labels', (['out_argmax', '(20)'], {}), False, 'from utils.img_utils import decode_labels\n'), (865, 'scipy.misc.imresize', 'misc.imresize', (['output', '[1024, 2048]', '"""nearest"""'], {}), True, 'import scipy.misc as misc\n'), (240, 'numpy.random.choice', 'np.random.choice', (['self.train_data_len', 'self.train_data_len'], {'replace': '(False)'}), True, 'import numpy as np\n'), (242, 'numpy.arange', 'np.arange', (['self.train_data_len'], {}), True, 'import numpy as np\n'), (407, 'numpy.random.choice', 'np.random.choice', (['self.test_data_len', 'self.test_data_len'], {'replace': '(False)'}), True, 'import numpy as np\n'), (409, 'numpy.arange', 'np.arange', (['self.test_data_len'], {}), True, 'import numpy as np\n'), (536, 'numpy.mean', 'np.mean', (['loss_list'], {}), True, 'import numpy as np\n'), (537, 'numpy.mean', 'np.mean', (['acc_list'], {}), True, 'import numpy as np\n'), (644, 'numpy.zeros', 'np.zeros', (['(out_argmax.shape[0], y_batch_large.shape[1], y_batch_large.shape[2])'], {'dtype': 'np.uint32'}), True, 'import numpy as np\n'), (646, 'numpy.asarray', 'np.asarray', (['out_argmax'], {'dtype': 'np.uint8'}), True, 'import numpy as np\n'), (775, 'scipy.misc.imresize', 'misc.imresize', (['out_argmax[y, ...]', 'y_batch_large.shape[1:]'], {'interp': '"""nearest"""'}), True, 'import scipy.misc as misc\n'), (856, 'os.path.dirname', 'os.path.dirname', (['colored_save_path'], {}), False, 'import os\n'), (857, 'os.path.dirname', 'os.path.dirname', (['colored_save_path'], {}), False, 'import os\n'), (862, 'os.path.dirname', 'os.path.dirname', (['save_path'], {}), False, 'import os\n'), (863, 'os.path.dirname', 'os.path.dirname', (['save_path'], {}), False, 'import os\n'), (931, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (648, 'scipy.misc.imresize', 'misc.imresize', (['out_argmax[y, ...]', 'y_batch_large.shape[1:]'], {'interp': '"""nearest"""'}), True, 'import scipy.misc as misc\n'), (677, 'numpy.mean', 'np.mean', (['inf_list'], {}), True, 'import numpy as np\n'), (145, 'numpy.load', 'np.load', (['"""data/debug/debug_x.npy"""'], {}), True, 'import numpy as np\n'), (146, 'numpy.load', 'np.load', (['"""data/debug/debug_y.npy"""'], {}), True, 'import numpy as np\n')]
chrisseiler96/bert-client-server-tests
a5b8ead400e91a3b3dbb67295e17583d714869c4
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_bool( "do_serve", False, "Whether to export the built model.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") tf.flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class XnliProcessor(DataProcessor): """Processor for the XNLI data set.""" def __init__(self): self.language = "zh" def get_train_examples(self, data_dir): """See base class.""" lines = self._read_tsv( os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" % self.language)) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "train-%d" % (i) text_a = tokenization.convert_to_unicode(line[0]) text_b = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == tokenization.convert_to_unicode("contradictory"): label = tokenization.convert_to_unicode("contradiction") examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_dev_examples(self, data_dir): """See base class.""" lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv")) examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "dev-%d" % (i) language = tokenization.convert_to_unicode(line[0]) if language != tokenization.convert_to_unicode(self.language): continue text_a = tokenization.convert_to_unicode(line[6]) text_b = tokenization.convert_to_unicode(line[7]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[8]) text_b = tokenization.convert_to_unicode(line[9]) if set_type == "test": label = "contradiction" else: label = tokenization.convert_to_unicode(line[-1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class AgnewsProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return [ "World", "Entertainment", "Sports", "Business", ] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: # for header continue single_example = self._create_example(line, set_type) examples.append(single_example) return examples def _create_example(self, line, set_type): guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0])) text_a = tokenization.convert_to_unicode(line[1]) if set_type == "test": label = "World" else: label = tokenization.convert_to_unicode(line[-1]) single_example = InputExample(guid=guid, text_a=text_a, label=label) return single_example class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = tokenization.convert_to_unicode(line[3]) text_b = tokenization.convert_to_unicode(line[4]) if set_type == "test": label = "0" else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # Only the test set has a header if set_type == "test" and i == 0: continue guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "0" else: text_a = tokenization.convert_to_unicode(line[3]) label = tokenization.convert_to_unicode(line[1]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) tf_example = from_record_to_tf_example(ex_index, example, label_list, max_seq_length, tokenizer) writer.write(tf_example.SerializeToString()) writer.close() def from_record_to_tf_example(ex_index, example, label_list, max_seq_length, tokenizer): feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) return tf_example def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings, do_serve): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold elif not do_serve: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities}, scaffold_fn=scaffold_fn) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "xnli": XnliProcessor, "agne": AgnewsProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict and not FLAGS.do_serve: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' or `do_serve` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu, do_serve=FLAGS.do_serve ) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. These do NOT count towards the metric (all tf.metrics # support a per-instance weight, and these get a weight of 0.0). while len(eval_examples) % FLAGS.eval_batch_size != 0: eval_examples.append(PaddingInputExample()) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None # However, if running eval on the TPU, you will need to specify the # number of steps. if FLAGS.use_tpu: assert len(eval_examples) % FLAGS.eval_batch_size == 0 eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) if FLAGS.use_tpu: # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. while len(predict_examples) % FLAGS.predict_batch_size != 0: predict_examples.append(PaddingInputExample()) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = True if FLAGS.use_tpu else False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] if i >= num_actual_predict_examples: break output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples if FLAGS.do_serve: def serving_input_fn(): with tf.variable_scope("foo"): feature_spec = { "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), } serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_example_tensor') receiver_tensors = {'examples': serialized_tf_example} features = tf.parse_example(serialized_tf_example, feature_spec) return tf.estimator.export.ServingInputReceiver(features, receiver_tensors) estimator._export_to_tpu = False # this is important path = estimator.export_savedmodel('export_t', serving_input_fn) print(path) if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
[ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.logging.set_verbosity", "tensorflow.estimator.export.ServingInputReceiver", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.parse_example", "tensorflow.zeros_initializer", "tensorflow.placeholder", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.train.Features", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.train.Scaffold", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.variable_scope" ]
run_classifier.py
[(106, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), True, 'import tensorflow as tf\n'), (536, 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), True, 'import tensorflow as tf\n'), (555, 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), False, 'import collections\n'), (634, 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), False, 'import modeling\n'), (841, 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), True, 'import tensorflow as tf\n'), (851, 'tokenization.validate_case_matches_checkpoint', 'tokenization.validate_case_matches_checkpoint', (['FLAGS.do_lower_case', 'FLAGS.init_checkpoint'], {}), False, 'import tokenization\n'), (858, 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), False, 'import modeling\n'), (866, 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), True, 'import tensorflow as tf\n'), (877, 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), False, 'import tokenization\n'), (919, 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size', 'predict_batch_size': 'FLAGS.predict_batch_size'}), True, 'import tensorflow as tf\n'), (1061, 'tensorflow.app.run', 'tf.app.run', ([], {}), True, 'import tensorflow as tf\n'), (340, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (514, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Example ***"""'], {}), True, 'import tensorflow as tf\n'), (515, 'tensorflow.logging.info', 'tf.logging.info', (["('guid: %s' % example.guid)"], {}), True, 'import tensorflow as tf\n'), (521, 'tensorflow.logging.info', 'tf.logging.info', (["('label: %s (id = %d)' % (example.label, label_id))"], {}), True, 'import tensorflow as tf\n'), (571, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (572, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (573, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (574, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (575, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (580, 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), True, 'import tensorflow as tf\n'), (598, 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), True, 'import tensorflow as tf\n'), (658, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), True, 'import tensorflow as tf\n'), (663, 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (664, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {}), True, 'import tensorflow as tf\n'), (665, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (666, 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (668, 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (671, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {}), True, 'import tensorflow as tf\n'), (684, 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), True, 'import tensorflow as tf\n'), (704, 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), True, 'import tensorflow as tf\n'), (720, 'tensorflow.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), True, 'import tensorflow as tf\n'), (882, 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), True, 'import tensorflow as tf\n'), (928, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), False, 'import os\n'), (931, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), True, 'import tensorflow as tf\n'), (933, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), True, 'import tensorflow as tf\n'), (934, 'tensorflow.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), True, 'import tensorflow as tf\n'), (954, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval.tf_record"""'], {}), False, 'import os\n'), (958, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running evaluation *****"""'], {}), True, 'import tensorflow as tf\n'), (962, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), True, 'import tensorflow as tf\n'), (981, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), False, 'import os\n'), (999, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""predict.tf_record"""'], {}), False, 'import os\n'), (1004, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Running prediction*****"""'], {}), True, 'import tensorflow as tf\n'), (1008, 'tensorflow.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), True, 'import tensorflow as tf\n'), (1019, 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""test_results.tsv"""'], {}), False, 'import os\n'), (203, 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), True, 'import tensorflow as tf\n'), (204, 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'quotechar'}), False, 'import csv\n'), (220, 'os.path.join', 'os.path.join', (['data_dir', '"""multinli"""', "('multinli.train.%s.tsv' % self.language)"], {}), False, 'import os\n'), (227, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (228, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (229, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[2]'], {}), False, 'import tokenization\n'), (238, 'os.path.join', 'os.path.join', (['data_dir', '"""xnli.dev.tsv"""'], {}), False, 'import os\n'), (244, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (247, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[6]'], {}), False, 'import tokenization\n'), (248, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[7]'], {}), False, 'import tokenization\n'), (249, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (289, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[8]'], {}), False, 'import tokenization\n'), (290, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[9]'], {}), False, 'import tokenization\n'), (344, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'import tokenization\n'), (378, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (379, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[4]'], {}), False, 'import tokenization\n'), (562, 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), True, 'import tensorflow as tf\n'), (653, 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), True, 'import tensorflow as tf\n'), (656, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (661, 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), True, 'import tensorflow as tf\n'), (670, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (686, 'tensorflow.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), True, 'import tensorflow as tf\n'), (694, 'tensorflow.cast', 'tf.cast', (["features['is_real_example']"], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (709, 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), False, 'import modeling\n'), (725, 'tensorflow.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), True, 'import tensorflow as tf\n'), (731, 'optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), False, 'import optimization\n'), (734, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (891, 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), True, 'import tensorflow as tf\n'), (982, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_eval_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (983, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Eval results *****"""'], {}), True, 'import tensorflow as tf\n'), (1020, 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_predict_file', '"""w"""'], {}), True, 'import tensorflow as tf\n'), (1022, 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Predict results *****"""'], {}), True, 'import tensorflow as tf\n'), (230, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradictory"""'], {}), False, 'import tokenization\n'), (231, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['"""contradiction"""'], {}), False, 'import tokenization\n'), (245, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['self.language'], {}), False, 'import tokenization\n'), (265, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (270, 'os.path.join', 'os.path.join', (['data_dir', '"""dev_matched.tsv"""'], {}), False, 'import os\n'), (276, 'os.path.join', 'os.path.join', (['data_dir', '"""test_matched.tsv"""'], {}), False, 'import os\n'), (294, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[-1]'], {}), False, 'import tokenization\n'), (306, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (311, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (317, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (339, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (355, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (360, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (365, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (383, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (395, 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), False, 'import os\n'), (400, 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), False, 'import os\n'), (405, 'os.path.join', 'os.path.join', (['data_dir', '"""test.tsv"""'], {}), False, 'import os\n'), (420, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (423, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[3]'], {}), False, 'import tokenization\n'), (424, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[1]'], {}), False, 'import tokenization\n'), (587, 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), True, 'import tensorflow as tf\n'), (696, 'tensorflow.shape', 'tf.shape', (['label_ids'], {}), True, 'import tensorflow as tf\n'), (753, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (759, 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'tf.contrib.tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': "{'probabilities': probabilities}", 'scaffold_fn': 'scaffold_fn'}), True, 'import tensorflow as tf\n'), (795, 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (799, 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (804, 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (809, 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (1037, 'tensorflow.variable_scope', 'tf.variable_scope', (['"""foo"""'], {}), True, 'import tensorflow as tf\n'), (1044, 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string', 'shape': '[None]', 'name': '"""input_example_tensor"""'}), True, 'import tensorflow as tf\n'), (1048, 'tensorflow.parse_example', 'tf.parse_example', (['serialized_tf_example', 'feature_spec'], {}), True, 'import tensorflow as tf\n'), (1049, 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', (['features', 'receiver_tensors'], {}), True, 'import tensorflow as tf\n'), (288, 'tokenization.convert_to_unicode', 'tokenization.convert_to_unicode', (['line[0]'], {}), False, 'import tokenization\n'), (713, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (714, 'tensorflow.train.Scaffold', 'tf.train.Scaffold', ([], {}), True, 'import tensorflow as tf\n'), (718, 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), True, 'import tensorflow as tf\n'), (742, 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), True, 'import tensorflow as tf\n'), (743, 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (745, 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss', 'weights': 'is_real_example'}), True, 'import tensorflow as tf\n'), (1039, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[FLAGS.max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (1040, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[FLAGS.max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (1041, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[FLAGS.max_seq_length]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (1042, 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (517, 'tokenization.printable_text', 'tokenization.printable_text', (['x'], {}), False, 'import tokenization\n')]