text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def experiment(qnum, repetitions=100): """Execute the phase estimator cirquit with multiple settings and show results. """ def example_gate(phi): """An example unitary 1-qubit gate U with an eigen vector |0> and an eigen value exp(2*Pi*i*phi) """ gate = cirq.SingleQubitMatrixGate( matrix=np.array([[np.exp(2*np.pi*1.0j*phi), 0], [0, 1]])) return gate print('Estimation with {}qubits.'.format(qnum)) print('Actual, Estimation (Raw binary)') errors = [] fold_func = lambda ms: ''.join(np.flip(ms, 0).astype(int).astype(str)) for phi in np.arange(0, 1, 0.1): result = run_estimate(example_gate(phi), qnum, repetitions) hist = result.histogram(key='phase', fold_func=fold_func) estimate_bin = hist.most_common(1)[0][0] estimate = (sum([float(s)*0.5**(order+1) for order, s in enumerate(estimate_bin)])) print('{:0.4f}, {:0.4f} ({})'.format(phi, estimate, estimate_bin)) errors.append((phi-estimate)**2) print('RMS Error: {:0.4f}\n'.format(np.sqrt(sum(errors)/len(errors))))
[ "def", "experiment", "(", "qnum", ",", "repetitions", "=", "100", ")", ":", "def", "example_gate", "(", "phi", ")", ":", "\"\"\"An example unitary 1-qubit gate U with an eigen vector |0> and an\n eigen value exp(2*Pi*i*phi)\n \"\"\"", "gate", "=", "cirq", ".", "SingleQubitMatrixGate", "(", "matrix", "=", "np", ".", "array", "(", "[", "[", "np", ".", "exp", "(", "2", "*", "np", ".", "pi", "*", "1.0j", "*", "phi", ")", ",", "0", "]", ",", "[", "0", ",", "1", "]", "]", ")", ")", "return", "gate", "print", "(", "'Estimation with {}qubits.'", ".", "format", "(", "qnum", ")", ")", "print", "(", "'Actual, Estimation (Raw binary)'", ")", "errors", "=", "[", "]", "fold_func", "=", "lambda", "ms", ":", "''", ".", "join", "(", "np", ".", "flip", "(", "ms", ",", "0", ")", ".", "astype", "(", "int", ")", ".", "astype", "(", "str", ")", ")", "for", "phi", "in", "np", ".", "arange", "(", "0", ",", "1", ",", "0.1", ")", ":", "result", "=", "run_estimate", "(", "example_gate", "(", "phi", ")", ",", "qnum", ",", "repetitions", ")", "hist", "=", "result", ".", "histogram", "(", "key", "=", "'phase'", ",", "fold_func", "=", "fold_func", ")", "estimate_bin", "=", "hist", ".", "most_common", "(", "1", ")", "[", "0", "]", "[", "0", "]", "estimate", "=", "(", "sum", "(", "[", "float", "(", "s", ")", "*", "0.5", "**", "(", "order", "+", "1", ")", "for", "order", ",", "s", "in", "enumerate", "(", "estimate_bin", ")", "]", ")", ")", "print", "(", "'{:0.4f}, {:0.4f} ({})'", ".", "format", "(", "phi", ",", "estimate", ",", "estimate_bin", ")", ")", "errors", ".", "append", "(", "(", "phi", "-", "estimate", ")", "**", "2", ")", "print", "(", "'RMS Error: {:0.4f}\\n'", ".", "format", "(", "np", ".", "sqrt", "(", "sum", "(", "errors", ")", "/", "len", "(", "errors", ")", ")", ")", ")" ]
41.185185
0.001757
def _update_rs_with_primary_from_member( sds, replica_set_name, server_description): """RS with known primary. Process a response from a non-primary. Pass in a dict of ServerDescriptions, current replica set name, and the ServerDescription we are processing. Returns new topology type. """ assert replica_set_name is not None if replica_set_name != server_description.replica_set_name: sds.pop(server_description.address) elif (server_description.me and server_description.address != server_description.me): sds.pop(server_description.address) # Had this member been the primary? return _check_has_primary(sds)
[ "def", "_update_rs_with_primary_from_member", "(", "sds", ",", "replica_set_name", ",", "server_description", ")", ":", "assert", "replica_set_name", "is", "not", "None", "if", "replica_set_name", "!=", "server_description", ".", "replica_set_name", ":", "sds", ".", "pop", "(", "server_description", ".", "address", ")", "elif", "(", "server_description", ".", "me", "and", "server_description", ".", "address", "!=", "server_description", ".", "me", ")", ":", "sds", ".", "pop", "(", "server_description", ".", "address", ")", "# Had this member been the primary?", "return", "_check_has_primary", "(", "sds", ")" ]
32.571429
0.00142
def set_widgets(self): """Set widgets on the Unit tab.""" self.clear_further_steps() # Set widgets purpose = self.parent.step_kw_purpose.selected_purpose() subcategory = self.parent.step_kw_subcategory.selected_subcategory() self.lblSelectUnit.setText( unit_question % (subcategory['name'], purpose['name'])) self.lblDescribeUnit.setText('') self.lstUnits.clear() subcat = self.parent.step_kw_subcategory.selected_subcategory()['key'] if purpose == layer_purpose_hazard: units_for_layer = hazard_units(subcat) else: units_for_layer = exposure_units(subcat) for unit_for_layer in units_for_layer: item = QListWidgetItem(unit_for_layer['name'], self.lstUnits) item.setData(QtCore.Qt.UserRole, unit_for_layer['key']) self.lstUnits.addItem(item) # Set values based on existing keywords (if already assigned) if self.parent.step_kw_purpose.\ selected_purpose() == layer_purpose_hazard: key = continuous_hazard_unit['key'] else: key = exposure_unit['key'] unit_id = self.parent.get_existing_keyword(key) if unit_id: units = [] for index in range(self.lstUnits.count()): item = self.lstUnits.item(index) units.append(item.data(QtCore.Qt.UserRole)) if unit_id in units: self.lstUnits.setCurrentRow(units.index(unit_id)) self.auto_select_one_item(self.lstUnits)
[ "def", "set_widgets", "(", "self", ")", ":", "self", ".", "clear_further_steps", "(", ")", "# Set widgets", "purpose", "=", "self", ".", "parent", ".", "step_kw_purpose", ".", "selected_purpose", "(", ")", "subcategory", "=", "self", ".", "parent", ".", "step_kw_subcategory", ".", "selected_subcategory", "(", ")", "self", ".", "lblSelectUnit", ".", "setText", "(", "unit_question", "%", "(", "subcategory", "[", "'name'", "]", ",", "purpose", "[", "'name'", "]", ")", ")", "self", ".", "lblDescribeUnit", ".", "setText", "(", "''", ")", "self", ".", "lstUnits", ".", "clear", "(", ")", "subcat", "=", "self", ".", "parent", ".", "step_kw_subcategory", ".", "selected_subcategory", "(", ")", "[", "'key'", "]", "if", "purpose", "==", "layer_purpose_hazard", ":", "units_for_layer", "=", "hazard_units", "(", "subcat", ")", "else", ":", "units_for_layer", "=", "exposure_units", "(", "subcat", ")", "for", "unit_for_layer", "in", "units_for_layer", ":", "item", "=", "QListWidgetItem", "(", "unit_for_layer", "[", "'name'", "]", ",", "self", ".", "lstUnits", ")", "item", ".", "setData", "(", "QtCore", ".", "Qt", ".", "UserRole", ",", "unit_for_layer", "[", "'key'", "]", ")", "self", ".", "lstUnits", ".", "addItem", "(", "item", ")", "# Set values based on existing keywords (if already assigned)", "if", "self", ".", "parent", ".", "step_kw_purpose", ".", "selected_purpose", "(", ")", "==", "layer_purpose_hazard", ":", "key", "=", "continuous_hazard_unit", "[", "'key'", "]", "else", ":", "key", "=", "exposure_unit", "[", "'key'", "]", "unit_id", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "key", ")", "if", "unit_id", ":", "units", "=", "[", "]", "for", "index", "in", "range", "(", "self", ".", "lstUnits", ".", "count", "(", ")", ")", ":", "item", "=", "self", ".", "lstUnits", ".", "item", "(", "index", ")", "units", ".", "append", "(", "item", ".", "data", "(", "QtCore", ".", "Qt", ".", "UserRole", ")", ")", "if", "unit_id", "in", "units", ":", "self", ".", "lstUnits", ".", "setCurrentRow", "(", "units", ".", "index", "(", "unit_id", ")", ")", "self", ".", "auto_select_one_item", "(", "self", ".", "lstUnits", ")" ]
43.305556
0.001255
def close(self): """Add docstring!""" # Seek to the start of the buffer self.seek(0) while True: # Copy bytes from the buffer until we reach the end of the JSON brace_count = 0 quoted = False json_string = '' while True: # Read a character b = self.read(1) if b == b'': return c = b.decode() # If it is a \ then copy the next character if c == '\\': json_string += c json_string += self.read(1).decode() continue # If we are inside quotes we just need to detect a # closing quote if c == '"': if quoted: quoted = False else: quoted = True # Otherwise we count the braces if c == '{': brace_count += 1 elif c == '}': brace_count -= 1 # Copy the character into the JSON string json_string += c # If the brace count is zero we are done if brace_count == 0: break # Parse the JSON so that we can get the size of the data array meta = json.loads(json_string) n_channels = meta['data_cube']['n_channels'] n_sub_integrations = meta['data_cube']['n_sub_integrations'] # Save the JSON to a file f = open('{0}_{1}_{2}.json'.format( meta['metadata']['observation_id'], meta['metadata']['beam_id'], meta['metadata']['name']), 'w') f.write(json.dumps(meta)) f.close() # Read the data data = self.read(n_channels * n_sub_integrations) # Write it to a file f = open('{0}_{1}_{2}.data'.format( meta['metadata']['observation_id'], meta['metadata']['beam_id'], meta['metadata']['name']), 'wb') f.write(data) f.close()
[ "def", "close", "(", "self", ")", ":", "# Seek to the start of the buffer", "self", ".", "seek", "(", "0", ")", "while", "True", ":", "# Copy bytes from the buffer until we reach the end of the JSON", "brace_count", "=", "0", "quoted", "=", "False", "json_string", "=", "''", "while", "True", ":", "# Read a character", "b", "=", "self", ".", "read", "(", "1", ")", "if", "b", "==", "b''", ":", "return", "c", "=", "b", ".", "decode", "(", ")", "# If it is a \\ then copy the next character", "if", "c", "==", "'\\\\'", ":", "json_string", "+=", "c", "json_string", "+=", "self", ".", "read", "(", "1", ")", ".", "decode", "(", ")", "continue", "# If we are inside quotes we just need to detect a", "# closing quote", "if", "c", "==", "'\"'", ":", "if", "quoted", ":", "quoted", "=", "False", "else", ":", "quoted", "=", "True", "# Otherwise we count the braces", "if", "c", "==", "'{'", ":", "brace_count", "+=", "1", "elif", "c", "==", "'}'", ":", "brace_count", "-=", "1", "# Copy the character into the JSON string", "json_string", "+=", "c", "# If the brace count is zero we are done", "if", "brace_count", "==", "0", ":", "break", "# Parse the JSON so that we can get the size of the data array", "meta", "=", "json", ".", "loads", "(", "json_string", ")", "n_channels", "=", "meta", "[", "'data_cube'", "]", "[", "'n_channels'", "]", "n_sub_integrations", "=", "meta", "[", "'data_cube'", "]", "[", "'n_sub_integrations'", "]", "# Save the JSON to a file", "f", "=", "open", "(", "'{0}_{1}_{2}.json'", ".", "format", "(", "meta", "[", "'metadata'", "]", "[", "'observation_id'", "]", ",", "meta", "[", "'metadata'", "]", "[", "'beam_id'", "]", ",", "meta", "[", "'metadata'", "]", "[", "'name'", "]", ")", ",", "'w'", ")", "f", ".", "write", "(", "json", ".", "dumps", "(", "meta", ")", ")", "f", ".", "close", "(", ")", "# Read the data", "data", "=", "self", ".", "read", "(", "n_channels", "*", "n_sub_integrations", ")", "# Write it to a file", "f", "=", "open", "(", "'{0}_{1}_{2}.data'", ".", "format", "(", "meta", "[", "'metadata'", "]", "[", "'observation_id'", "]", ",", "meta", "[", "'metadata'", "]", "[", "'beam_id'", "]", ",", "meta", "[", "'metadata'", "]", "[", "'name'", "]", ")", ",", "'wb'", ")", "f", ".", "write", "(", "data", ")", "f", ".", "close", "(", ")" ]
31.185714
0.000888
def connect_patch_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501 """connect_patch_namespaced_pod_proxy_with_path # noqa: E501 connect PATCH requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_patch_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_patch_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 else: (data) = self.connect_patch_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 return data
[ "def", "connect_patch_namespaced_pod_proxy_with_path", "(", "self", ",", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "connect_patch_namespaced_pod_proxy_with_path_with_http_info", "(", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "connect_patch_namespaced_pod_proxy_with_path_with_http_info", "(", "name", ",", "namespace", ",", "path", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
56.541667
0.001449
def _create_latent_variables(self): """ Creates the model's latent variables Returns ---------- None (changes model attributes) """ # Input layer for unit in range(self.units): self.latent_variables.add_z('Constant | Layer ' + str(1) + ' | Unit ' + str(unit+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3)) for ar_term in range(self.ar): self.latent_variables.add_z('AR' + str(ar_term+1) + ' | Layer ' + str(1) + ' | Unit ' + str(unit+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3)) for z in range(len(self.X_names)): self.latent_variables.add_z('Weight ' + self.X_names[z], fam.Cauchy(0, 1, transform=None), fam.Normal(0, 3)) # Hidden layers for layer in range(1, self.layers): for unit in range(self.units): for weight in range(self.units): self.latent_variables.add_z('Weight ' + str(weight+1) + ' | Layer ' + str(layer+1) + ' | Unit ' + str(unit+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3)) # Output layer for weight in range(self.units): self.latent_variables.add_z('Output Weight ' + str(weight+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3))
[ "def", "_create_latent_variables", "(", "self", ")", ":", "# Input layer", "for", "unit", "in", "range", "(", "self", ".", "units", ")", ":", "self", ".", "latent_variables", ".", "add_z", "(", "'Constant | Layer '", "+", "str", "(", "1", ")", "+", "' | Unit '", "+", "str", "(", "unit", "+", "1", ")", ",", "fam", ".", "Cauchy", "(", "0", ",", "1", ",", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "for", "ar_term", "in", "range", "(", "self", ".", "ar", ")", ":", "self", ".", "latent_variables", ".", "add_z", "(", "'AR'", "+", "str", "(", "ar_term", "+", "1", ")", "+", "' | Layer '", "+", "str", "(", "1", ")", "+", "' | Unit '", "+", "str", "(", "unit", "+", "1", ")", ",", "fam", ".", "Cauchy", "(", "0", ",", "1", ",", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "for", "z", "in", "range", "(", "len", "(", "self", ".", "X_names", ")", ")", ":", "self", ".", "latent_variables", ".", "add_z", "(", "'Weight '", "+", "self", ".", "X_names", "[", "z", "]", ",", "fam", ".", "Cauchy", "(", "0", ",", "1", ",", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "# Hidden layers", "for", "layer", "in", "range", "(", "1", ",", "self", ".", "layers", ")", ":", "for", "unit", "in", "range", "(", "self", ".", "units", ")", ":", "for", "weight", "in", "range", "(", "self", ".", "units", ")", ":", "self", ".", "latent_variables", ".", "add_z", "(", "'Weight '", "+", "str", "(", "weight", "+", "1", ")", "+", "' | Layer '", "+", "str", "(", "layer", "+", "1", ")", "+", "' | Unit '", "+", "str", "(", "unit", "+", "1", ")", ",", "fam", ".", "Cauchy", "(", "0", ",", "1", ",", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")", "# Output layer", "for", "weight", "in", "range", "(", "self", ".", "units", ")", ":", "self", ".", "latent_variables", ".", "add_z", "(", "'Output Weight '", "+", "str", "(", "weight", "+", "1", ")", ",", "fam", ".", "Cauchy", "(", "0", ",", "1", ",", "transform", "=", "None", ")", ",", "fam", ".", "Normal", "(", "0", ",", "3", ")", ")" ]
47.037037
0.011574
def byaxis_out(self): """Object to index along output dimensions. This is only valid for non-trivial `out_shape`. Examples -------- Indexing with integers or slices: >>> domain = odl.IntervalProd(0, 1) >>> fspace = odl.FunctionSpace(domain, out_dtype=(float, (2, 3, 4))) >>> fspace.byaxis_out[0] FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (2,))) >>> fspace.byaxis_out[1] FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,))) >>> fspace.byaxis_out[1:] FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3, 4))) Lists can be used to stack spaces arbitrarily: >>> fspace.byaxis_out[[2, 1, 2]] FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (4, 3, 4))) """ space = self class FspaceByaxisOut(object): """Helper class for indexing by output axes.""" def __getitem__(self, indices): """Return ``self[indices]``. Parameters ---------- indices : index expression Object used to index the output components. Returns ------- space : `FunctionSpace` The resulting space with same domain and scalar output data type, but indexed output components. Raises ------ IndexError If this is a space of scalar-valued functions. """ try: iter(indices) except TypeError: newshape = space.out_shape[indices] else: newshape = tuple(space.out_shape[int(i)] for i in indices) dtype = (space.scalar_out_dtype, newshape) return FunctionSpace(space.domain, out_dtype=dtype) def __repr__(self): """Return ``repr(self)``.""" return repr(space) + '.byaxis_out' return FspaceByaxisOut()
[ "def", "byaxis_out", "(", "self", ")", ":", "space", "=", "self", "class", "FspaceByaxisOut", "(", "object", ")", ":", "\"\"\"Helper class for indexing by output axes.\"\"\"", "def", "__getitem__", "(", "self", ",", "indices", ")", ":", "\"\"\"Return ``self[indices]``.\n\n Parameters\n ----------\n indices : index expression\n Object used to index the output components.\n\n Returns\n -------\n space : `FunctionSpace`\n The resulting space with same domain and scalar output\n data type, but indexed output components.\n\n Raises\n ------\n IndexError\n If this is a space of scalar-valued functions.\n \"\"\"", "try", ":", "iter", "(", "indices", ")", "except", "TypeError", ":", "newshape", "=", "space", ".", "out_shape", "[", "indices", "]", "else", ":", "newshape", "=", "tuple", "(", "space", ".", "out_shape", "[", "int", "(", "i", ")", "]", "for", "i", "in", "indices", ")", "dtype", "=", "(", "space", ".", "scalar_out_dtype", ",", "newshape", ")", "return", "FunctionSpace", "(", "space", ".", "domain", ",", "out_dtype", "=", "dtype", ")", "def", "__repr__", "(", "self", ")", ":", "\"\"\"Return ``repr(self)``.\"\"\"", "return", "repr", "(", "space", ")", "+", "'.byaxis_out'", "return", "FspaceByaxisOut", "(", ")" ]
33.047619
0.000933
def get_item_bank_id_metadata(self): """get the metadata for item bank""" metadata = dict(self._item_bank_id_metadata) metadata.update({'existing_id_values': self.my_osid_object_form._my_map['itemBankId']}) return Metadata(**metadata)
[ "def", "get_item_bank_id_metadata", "(", "self", ")", ":", "metadata", "=", "dict", "(", "self", ".", "_item_bank_id_metadata", ")", "metadata", ".", "update", "(", "{", "'existing_id_values'", ":", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'itemBankId'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
52.4
0.011278
def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False): """ Return a pandas DataFrame with the concat'ed content of the `sheetnames` from the Excel file in `xl_path`. Parameters ---------- xl_path: str Path to the Excel file sheetnames: list of str List of existing sheet names of `xl_path`. If None, will use all sheets from `xl_path`. add_tab_names: bool If True will add a 'Tab' column which says from which tab the row comes from. Returns ------- df: pandas.DataFrame """ xl_path, choice = _check_xl_path(xl_path) if sheetnames is None: sheetnames = get_sheet_list(xl_path) sheets = pd.read_excel(xl_path, sheetname=sheetnames) if add_tab_names: for tab in sheets: sheets[tab]['Tab'] = [tab] * len(sheets[tab]) return pd.concat([sheets[tab] for tab in sheets])
[ "def", "concat_sheets", "(", "xl_path", ":", "str", ",", "sheetnames", "=", "None", ",", "add_tab_names", "=", "False", ")", ":", "xl_path", ",", "choice", "=", "_check_xl_path", "(", "xl_path", ")", "if", "sheetnames", "is", "None", ":", "sheetnames", "=", "get_sheet_list", "(", "xl_path", ")", "sheets", "=", "pd", ".", "read_excel", "(", "xl_path", ",", "sheetname", "=", "sheetnames", ")", "if", "add_tab_names", ":", "for", "tab", "in", "sheets", ":", "sheets", "[", "tab", "]", "[", "'Tab'", "]", "=", "[", "tab", "]", "*", "len", "(", "sheets", "[", "tab", "]", ")", "return", "pd", ".", "concat", "(", "[", "sheets", "[", "tab", "]", "for", "tab", "in", "sheets", "]", ")" ]
26.176471
0.001083
def update_bgp_speaker(self, bgp_speaker_id, body=None): """Update a BGP speaker.""" return self.put(self.bgp_speaker_path % bgp_speaker_id, body=body)
[ "def", "update_bgp_speaker", "(", "self", ",", "bgp_speaker_id", ",", "body", "=", "None", ")", ":", "return", "self", ".", "put", "(", "self", ".", "bgp_speaker_path", "%", "bgp_speaker_id", ",", "body", "=", "body", ")" ]
55
0.011976
def getCollectionClass(cls, name) : """Return the class object of a collection given its 'name'""" try : return cls.collectionClasses[name] except KeyError : raise KeyError( "There is no Collection Class of type: '%s'; currently supported values: [%s]" % (name, ', '.join(getCollectionClasses().keys())) )
[ "def", "getCollectionClass", "(", "cls", ",", "name", ")", ":", "try", ":", "return", "cls", ".", "collectionClasses", "[", "name", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"There is no Collection Class of type: '%s'; currently supported values: [%s]\"", "%", "(", "name", ",", "', '", ".", "join", "(", "getCollectionClasses", "(", ")", ".", "keys", "(", ")", ")", ")", ")" ]
58
0.022663
def from_tuples_dict(pair_dict): '''pair_dict should be a dict mapping tuple (HET code, residue ID) -> (HET code, residue ID) e.g. {('MG ', 'A 204 ') : ('MG ', 'C 221 '), ...}. HET codes and residue IDs should respectively correspond to columns 17:20 and 21:27 of the PDB file. ''' lm = LigandMap() for k, v in pair_dict.iteritems(): lm.add(k[0], k[1], v[0], v[1]) return lm
[ "def", "from_tuples_dict", "(", "pair_dict", ")", ":", "lm", "=", "LigandMap", "(", ")", "for", "k", ",", "v", "in", "pair_dict", ".", "iteritems", "(", ")", ":", "lm", ".", "add", "(", "k", "[", "0", "]", ",", "k", "[", "1", "]", ",", "v", "[", "0", "]", ",", "v", "[", "1", "]", ")", "return", "lm" ]
53.75
0.009153
def _generate_route_signature( self, route, namespace, # pylint: disable=unused-argument route_args, extra_args, doc_list, task_type_name, func_suffix): """Generates route method signature for the given route.""" for name, _, typ in extra_args: route_args.append((name, typ)) deprecated = 'DEPRECATED: ' if route.deprecated else '' func_name = '{}{}'.format(fmt_route_func(route), func_suffix) self.emit(comment_prefix) if route.doc: route_doc = self.process_doc(route.doc, self._docf) else: route_doc = 'The {} route'.format(func_name) self.emit_wrapped_text( deprecated + route_doc, prefix=comment_prefix, width=120) self.emit(comment_prefix) for name, doc in doc_list: self.emit_wrapped_text( '@param {} {}'.format(name, doc if doc else undocumented), prefix=comment_prefix, width=120) self.emit(comment_prefix) output = ( '@return Through the response callback, the caller will ' + 'receive a `{}` object on success or a `{}` object on failure.') output = output.format( fmt_type(route.result_data_type, tag=False, no_ptr=True), fmt_type(route.error_data_type, tag=False, no_ptr=True)) self.emit_wrapped_text(output, prefix=comment_prefix, width=120) self.emit(comment_prefix) result_type_str = fmt_type(route.result_data_type) if not is_void_type( route.result_data_type) else 'DBNilObject *' error_type_str = fmt_type(route.error_data_type) if not is_void_type( route.error_data_type) else 'DBNilObject *' return_type = '{}<{}, {}> *'.format(task_type_name, result_type_str, error_type_str) deprecated = self._get_deprecation_warning(route) route_signature = fmt_signature( func=func_name, args=fmt_func_args_declaration(route_args), return_type='{}'.format(return_type)) self.emit('{}{};'.format(route_signature, deprecated)) self.emit()
[ "def", "_generate_route_signature", "(", "self", ",", "route", ",", "namespace", ",", "# pylint: disable=unused-argument", "route_args", ",", "extra_args", ",", "doc_list", ",", "task_type_name", ",", "func_suffix", ")", ":", "for", "name", ",", "_", ",", "typ", "in", "extra_args", ":", "route_args", ".", "append", "(", "(", "name", ",", "typ", ")", ")", "deprecated", "=", "'DEPRECATED: '", "if", "route", ".", "deprecated", "else", "''", "func_name", "=", "'{}{}'", ".", "format", "(", "fmt_route_func", "(", "route", ")", ",", "func_suffix", ")", "self", ".", "emit", "(", "comment_prefix", ")", "if", "route", ".", "doc", ":", "route_doc", "=", "self", ".", "process_doc", "(", "route", ".", "doc", ",", "self", ".", "_docf", ")", "else", ":", "route_doc", "=", "'The {} route'", ".", "format", "(", "func_name", ")", "self", ".", "emit_wrapped_text", "(", "deprecated", "+", "route_doc", ",", "prefix", "=", "comment_prefix", ",", "width", "=", "120", ")", "self", ".", "emit", "(", "comment_prefix", ")", "for", "name", ",", "doc", "in", "doc_list", ":", "self", ".", "emit_wrapped_text", "(", "'@param {} {}'", ".", "format", "(", "name", ",", "doc", "if", "doc", "else", "undocumented", ")", ",", "prefix", "=", "comment_prefix", ",", "width", "=", "120", ")", "self", ".", "emit", "(", "comment_prefix", ")", "output", "=", "(", "'@return Through the response callback, the caller will '", "+", "'receive a `{}` object on success or a `{}` object on failure.'", ")", "output", "=", "output", ".", "format", "(", "fmt_type", "(", "route", ".", "result_data_type", ",", "tag", "=", "False", ",", "no_ptr", "=", "True", ")", ",", "fmt_type", "(", "route", ".", "error_data_type", ",", "tag", "=", "False", ",", "no_ptr", "=", "True", ")", ")", "self", ".", "emit_wrapped_text", "(", "output", ",", "prefix", "=", "comment_prefix", ",", "width", "=", "120", ")", "self", ".", "emit", "(", "comment_prefix", ")", "result_type_str", "=", "fmt_type", "(", "route", ".", "result_data_type", ")", "if", "not", "is_void_type", "(", "route", ".", "result_data_type", ")", "else", "'DBNilObject *'", "error_type_str", "=", "fmt_type", "(", "route", ".", "error_data_type", ")", "if", "not", "is_void_type", "(", "route", ".", "error_data_type", ")", "else", "'DBNilObject *'", "return_type", "=", "'{}<{}, {}> *'", ".", "format", "(", "task_type_name", ",", "result_type_str", ",", "error_type_str", ")", "deprecated", "=", "self", ".", "_get_deprecation_warning", "(", "route", ")", "route_signature", "=", "fmt_signature", "(", "func", "=", "func_name", ",", "args", "=", "fmt_func_args_declaration", "(", "route_args", ")", ",", "return_type", "=", "'{}'", ".", "format", "(", "return_type", ")", ")", "self", ".", "emit", "(", "'{}{};'", ".", "format", "(", "route_signature", ",", "deprecated", ")", ")", "self", ".", "emit", "(", ")" ]
39.803571
0.000876
def latlng(arg): """Converts a lat/lon pair to a comma-separated string. For example: sydney = { "lat" : -33.8674869, "lng" : 151.2069902 } convert.latlng(sydney) # '-33.8674869,151.2069902' For convenience, also accepts lat/lon pair as a string, in which case it's returned unchanged. :param arg: The lat/lon pair. :type arg: string or dict or list or tuple """ if is_string(arg): return arg normalized = normalize_lat_lng(arg) return "%s,%s" % (format_float(normalized[0]), format_float(normalized[1]))
[ "def", "latlng", "(", "arg", ")", ":", "if", "is_string", "(", "arg", ")", ":", "return", "arg", "normalized", "=", "normalize_lat_lng", "(", "arg", ")", "return", "\"%s,%s\"", "%", "(", "format_float", "(", "normalized", "[", "0", "]", ")", ",", "format_float", "(", "normalized", "[", "1", "]", ")", ")" ]
23.625
0.001695
def is_identifier_position(rootpath): """Return whether the cursor is in identifier-position in a member declaration.""" if len(rootpath) >= 2 and is_tuple_member_node(rootpath[-2]) and is_identifier(rootpath[-1]): return True if len(rootpath) >= 1 and is_tuple_node(rootpath[-1]): # No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode. return True return False
[ "def", "is_identifier_position", "(", "rootpath", ")", ":", "if", "len", "(", "rootpath", ")", ">=", "2", "and", "is_tuple_member_node", "(", "rootpath", "[", "-", "2", "]", ")", "and", "is_identifier", "(", "rootpath", "[", "-", "1", "]", ")", ":", "return", "True", "if", "len", "(", "rootpath", ")", ">=", "1", "and", "is_tuple_node", "(", "rootpath", "[", "-", "1", "]", ")", ":", "# No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode.", "return", "True", "return", "False" ]
52
0.018913
def dict_diff(dicts): """ Subset dictionaries to keys which map to multiple values """ diff_keys = set() for k in union(set(d.keys()) for d in dicts): values = [] for d in dicts: if k not in d: diff_keys.add(k) break else: values.append(d[k]) if nunique(values) > 1: diff_keys.add(k) break return [dict_subset(d, diff_keys) for d in dicts]
[ "def", "dict_diff", "(", "dicts", ")", ":", "diff_keys", "=", "set", "(", ")", "for", "k", "in", "union", "(", "set", "(", "d", ".", "keys", "(", ")", ")", "for", "d", "in", "dicts", ")", ":", "values", "=", "[", "]", "for", "d", "in", "dicts", ":", "if", "k", "not", "in", "d", ":", "diff_keys", ".", "add", "(", "k", ")", "break", "else", ":", "values", ".", "append", "(", "d", "[", "k", "]", ")", "if", "nunique", "(", "values", ")", ">", "1", ":", "diff_keys", ".", "add", "(", "k", ")", "break", "return", "[", "dict_subset", "(", "d", ",", "diff_keys", ")", "for", "d", "in", "dicts", "]" ]
25.842105
0.001965
def from_devanagari(self, data): """A convenience method""" from indic_transliteration import sanscript return sanscript.transliterate(data=data, _from=sanscript.DEVANAGARI, _to=self.name)
[ "def", "from_devanagari", "(", "self", ",", "data", ")", ":", "from", "indic_transliteration", "import", "sanscript", "return", "sanscript", ".", "transliterate", "(", "data", "=", "data", ",", "_from", "=", "sanscript", ".", "DEVANAGARI", ",", "_to", "=", "self", ".", "name", ")" ]
52.25
0.014151
def pw( ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand, ): """Search for USER and KEY in GPG-encrypted password file.""" # install silent Ctrl-C handler def handle_sigint(*_): click.echo() ctx.exit(1) signal.signal(signal.SIGINT, handle_sigint) # invoke a subcommand? if gen_subcommand: length = int(key_pattern) if key_pattern else None generate_password(mode, length) return elif edit_subcommand: launch_editor(ctx, file) return # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load database store = Store.load(file) # if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses) if not user_pattern: user_pattern, _, key_pattern = key_pattern.rpartition("@") # search database results = store.search(key_pattern, user_pattern) results = list(results) # if strict flag is enabled, check that precisely a single record was found if strict_flag and len(results) != 1: click.echo( "error: multiple or no records found (but using --strict flag)", err=True ) ctx.exit(2) # raw mode? if mode == Mode.RAW: for entry in results: click.echo(entry.user if user_flag else entry.password) return # print results for idx, entry in enumerate(results): # start with key and user line = highlight_match(key_pattern, entry.key) if entry.user: line += ": " + highlight_match(user_pattern, entry.user) # add password or copy&paste sucess message if mode == Mode.ECHO and not user_flag: line += " | " + style_password(entry.password) elif mode == Mode.COPY and idx == 0: try: import pyperclip pyperclip.copy(entry.user if user_flag else entry.password) result = style_success( "*** %s COPIED TO CLIPBOARD ***" % ("USERNAME" if user_flag else "PASSWORD") ) except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') line += " | " + result # add notes if entry.notes: if idx == 0: line += "\n" line += "\n".join(" " + line for line in entry.notes.splitlines()) else: lines = entry.notes.splitlines() line += " | " + lines[0] if len(lines) > 1: line += " (...)" click.echo(line)
[ "def", "pw", "(", "ctx", ",", "key_pattern", ",", "user_pattern", ",", "mode", ",", "strict_flag", ",", "user_flag", ",", "file", ",", "edit_subcommand", ",", "gen_subcommand", ",", ")", ":", "# install silent Ctrl-C handler", "def", "handle_sigint", "(", "*", "_", ")", ":", "click", ".", "echo", "(", ")", "ctx", ".", "exit", "(", "1", ")", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "handle_sigint", ")", "# invoke a subcommand?", "if", "gen_subcommand", ":", "length", "=", "int", "(", "key_pattern", ")", "if", "key_pattern", "else", "None", "generate_password", "(", "mode", ",", "length", ")", "return", "elif", "edit_subcommand", ":", "launch_editor", "(", "ctx", ",", "file", ")", "return", "# verify that database file is present", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "click", ".", "echo", "(", "\"error: password store not found at '%s'\"", "%", "file", ",", "err", "=", "True", ")", "ctx", ".", "exit", "(", "1", ")", "# load database", "store", "=", "Store", ".", "load", "(", "file", ")", "# if no user query provided, split key query according to right-most \"@\" sign (since usernames are typically email addresses)", "if", "not", "user_pattern", ":", "user_pattern", ",", "_", ",", "key_pattern", "=", "key_pattern", ".", "rpartition", "(", "\"@\"", ")", "# search database", "results", "=", "store", ".", "search", "(", "key_pattern", ",", "user_pattern", ")", "results", "=", "list", "(", "results", ")", "# if strict flag is enabled, check that precisely a single record was found", "if", "strict_flag", "and", "len", "(", "results", ")", "!=", "1", ":", "click", ".", "echo", "(", "\"error: multiple or no records found (but using --strict flag)\"", ",", "err", "=", "True", ")", "ctx", ".", "exit", "(", "2", ")", "# raw mode?", "if", "mode", "==", "Mode", ".", "RAW", ":", "for", "entry", "in", "results", ":", "click", ".", "echo", "(", "entry", ".", "user", "if", "user_flag", "else", "entry", ".", "password", ")", "return", "# print results", "for", "idx", ",", "entry", "in", "enumerate", "(", "results", ")", ":", "# start with key and user", "line", "=", "highlight_match", "(", "key_pattern", ",", "entry", ".", "key", ")", "if", "entry", ".", "user", ":", "line", "+=", "\": \"", "+", "highlight_match", "(", "user_pattern", ",", "entry", ".", "user", ")", "# add password or copy&paste sucess message", "if", "mode", "==", "Mode", ".", "ECHO", "and", "not", "user_flag", ":", "line", "+=", "\" | \"", "+", "style_password", "(", "entry", ".", "password", ")", "elif", "mode", "==", "Mode", ".", "COPY", "and", "idx", "==", "0", ":", "try", ":", "import", "pyperclip", "pyperclip", ".", "copy", "(", "entry", ".", "user", "if", "user_flag", "else", "entry", ".", "password", ")", "result", "=", "style_success", "(", "\"*** %s COPIED TO CLIPBOARD ***\"", "%", "(", "\"USERNAME\"", "if", "user_flag", "else", "\"PASSWORD\"", ")", ")", "except", "ImportError", ":", "result", "=", "style_error", "(", "'*** PYTHON PACKAGE \"PYPERCLIP\" NOT FOUND ***'", ")", "line", "+=", "\" | \"", "+", "result", "# add notes", "if", "entry", ".", "notes", ":", "if", "idx", "==", "0", ":", "line", "+=", "\"\\n\"", "line", "+=", "\"\\n\"", ".", "join", "(", "\" \"", "+", "line", "for", "line", "in", "entry", ".", "notes", ".", "splitlines", "(", ")", ")", "else", ":", "lines", "=", "entry", ".", "notes", ".", "splitlines", "(", ")", "line", "+=", "\" | \"", "+", "lines", "[", "0", "]", "if", "len", "(", "lines", ")", ">", "1", ":", "line", "+=", "\" (...)\"", "click", ".", "echo", "(", "line", ")" ]
30.163043
0.001745
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0, sigma_lower=None, sigma_upper=None, maxiters=5, cenfunc='median', stdfunc='std', std_ddof=0, axis=None): """ Calculate sigma-clipped statistics on the provided data. Parameters ---------- data : array-like or `~numpy.ma.MaskedArray` Data array or object that can be converted to an array. mask : `numpy.ndarray` (bool), optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are excluded when computing the statistics. mask_value : float, optional A data value (e.g., ``0.0``) that is ignored when computing the statistics. ``mask_value`` will be masked in addition to any input ``mask``. sigma : float, optional The number of standard deviations to use for both the lower and upper clipping limit. These limits are overridden by ``sigma_lower`` and ``sigma_upper``, if input. The default is 3. sigma_lower : float or `None`, optional The number of standard deviations to use as the lower bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. sigma_upper : float or `None`, optional The number of standard deviations to use as the upper bound for the clipping limit. If `None` then the value of ``sigma`` is used. The default is `None`. maxiters : int or `None`, optional The maximum number of sigma-clipping iterations to perform or `None` to clip until convergence is achieved (i.e., iterate until the last iteration clips nothing). If convergence is achieved prior to ``maxiters`` iterations, the clipping iterations will stop. The default is 5. cenfunc : {'median', 'mean'} or callable, optional The statistic or callable function/object used to compute the center value for the clipping. If set to ``'median'`` or ``'mean'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanmean`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'median'``. .. _bottleneck: https://github.com/kwgoodman/bottleneck stdfunc : {'std'} or callable, optional The statistic or callable function/object used to compute the standard deviation about the center value. If set to ``'std'`` then having the optional `bottleneck`_ package installed will result in the best performance. If using a callable function/object and the ``axis`` keyword is used, then it must be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has an ``axis`` keyword to return an array with axis dimension(s) removed. The default is ``'std'``. std_ddof : int, optional The delta degrees of freedom for the standard deviation calculation. The divisor used in the calculation is ``N - std_ddof``, where ``N`` represents the number of elements. The default is 0. axis : `None` or int or tuple of int, optional The axis or axes along which to sigma clip the data. If `None`, then the flattened data will be used. ``axis`` is passed to the ``cenfunc`` and ``stdfunc``. The default is `None`. Returns ------- mean, median, stddev : float The mean, median, and standard deviation of the sigma-clipped data. See Also -------- SigmaClip, sigma_clip """ if mask is not None: data = np.ma.MaskedArray(data, mask) if mask_value is not None: data = np.ma.masked_values(data, mask_value) sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower, sigma_upper=sigma_upper, maxiters=maxiters, cenfunc=cenfunc, stdfunc=stdfunc) data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False, copy=False) if HAS_BOTTLENECK: mean = _nanmean(data_clipped, axis=axis) median = _nanmedian(data_clipped, axis=axis) std = _nanstd(data_clipped, ddof=std_ddof, axis=axis) else: # pragma: no cover mean = np.nanmean(data_clipped, axis=axis) median = np.nanmedian(data_clipped, axis=axis) std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis) return mean, median, std
[ "def", "sigma_clipped_stats", "(", "data", ",", "mask", "=", "None", ",", "mask_value", "=", "None", ",", "sigma", "=", "3.0", ",", "sigma_lower", "=", "None", ",", "sigma_upper", "=", "None", ",", "maxiters", "=", "5", ",", "cenfunc", "=", "'median'", ",", "stdfunc", "=", "'std'", ",", "std_ddof", "=", "0", ",", "axis", "=", "None", ")", ":", "if", "mask", "is", "not", "None", ":", "data", "=", "np", ".", "ma", ".", "MaskedArray", "(", "data", ",", "mask", ")", "if", "mask_value", "is", "not", "None", ":", "data", "=", "np", ".", "ma", ".", "masked_values", "(", "data", ",", "mask_value", ")", "sigclip", "=", "SigmaClip", "(", "sigma", "=", "sigma", ",", "sigma_lower", "=", "sigma_lower", ",", "sigma_upper", "=", "sigma_upper", ",", "maxiters", "=", "maxiters", ",", "cenfunc", "=", "cenfunc", ",", "stdfunc", "=", "stdfunc", ")", "data_clipped", "=", "sigclip", "(", "data", ",", "axis", "=", "axis", ",", "masked", "=", "False", ",", "return_bounds", "=", "False", ",", "copy", "=", "False", ")", "if", "HAS_BOTTLENECK", ":", "mean", "=", "_nanmean", "(", "data_clipped", ",", "axis", "=", "axis", ")", "median", "=", "_nanmedian", "(", "data_clipped", ",", "axis", "=", "axis", ")", "std", "=", "_nanstd", "(", "data_clipped", ",", "ddof", "=", "std_ddof", ",", "axis", "=", "axis", ")", "else", ":", "# pragma: no cover", "mean", "=", "np", ".", "nanmean", "(", "data_clipped", ",", "axis", "=", "axis", ")", "median", "=", "np", ".", "nanmedian", "(", "data_clipped", ",", "axis", "=", "axis", ")", "std", "=", "np", ".", "nanstd", "(", "data_clipped", ",", "ddof", "=", "std_ddof", ",", "axis", "=", "axis", ")", "return", "mean", ",", "median", ",", "std" ]
42.627273
0.000208
def unbind(self, callback, event_name = None): """Unbind a callback from an event Params: callback (callable): Callback to unbind event_name (string): If None (default) this callback is removed from every event to which it's bound. If a name is given then it is only removed from that event, if it is bound to that event. """ if event_name is None: for name in self.handlers.keys(): self.unbind(callback, name) return if callback in self.handlers[event_name]: self.handlers[event_name].remove(callback)
[ "def", "unbind", "(", "self", ",", "callback", ",", "event_name", "=", "None", ")", ":", "if", "event_name", "is", "None", ":", "for", "name", "in", "self", ".", "handlers", ".", "keys", "(", ")", ":", "self", ".", "unbind", "(", "callback", ",", "name", ")", "return", "if", "callback", "in", "self", ".", "handlers", "[", "event_name", "]", ":", "self", ".", "handlers", "[", "event_name", "]", ".", "remove", "(", "callback", ")" ]
30.555556
0.03351
def apply_default_prefetch(input_source_or_dataflow, trainer): """ Apply a set of default rules to make a fast :class:`InputSource`. Args: input_source_or_dataflow(InputSource | DataFlow): trainer (Trainer): Returns: InputSource """ if not isinstance(input_source_or_dataflow, InputSource): # to mimic same behavior of the old trainer interface if type(trainer) == SimpleTrainer: input = FeedInput(input_source_or_dataflow) else: logger.info("Automatically applying QueueInput on the DataFlow.") input = QueueInput(input_source_or_dataflow) else: input = input_source_or_dataflow if hasattr(trainer, 'devices'): towers = trainer.devices if len(towers) > 1: # seem to only improve on >1 GPUs assert not isinstance(trainer, SimpleTrainer) if isinstance(input, FeedfreeInput) and \ not isinstance(input, (StagingInput, DummyConstantInput)): logger.info("Automatically applying StagingInput on the DataFlow.") input = StagingInput(input) return input
[ "def", "apply_default_prefetch", "(", "input_source_or_dataflow", ",", "trainer", ")", ":", "if", "not", "isinstance", "(", "input_source_or_dataflow", ",", "InputSource", ")", ":", "# to mimic same behavior of the old trainer interface", "if", "type", "(", "trainer", ")", "==", "SimpleTrainer", ":", "input", "=", "FeedInput", "(", "input_source_or_dataflow", ")", "else", ":", "logger", ".", "info", "(", "\"Automatically applying QueueInput on the DataFlow.\"", ")", "input", "=", "QueueInput", "(", "input_source_or_dataflow", ")", "else", ":", "input", "=", "input_source_or_dataflow", "if", "hasattr", "(", "trainer", ",", "'devices'", ")", ":", "towers", "=", "trainer", ".", "devices", "if", "len", "(", "towers", ")", ">", "1", ":", "# seem to only improve on >1 GPUs", "assert", "not", "isinstance", "(", "trainer", ",", "SimpleTrainer", ")", "if", "isinstance", "(", "input", ",", "FeedfreeInput", ")", "and", "not", "isinstance", "(", "input", ",", "(", "StagingInput", ",", "DummyConstantInput", ")", ")", ":", "logger", ".", "info", "(", "\"Automatically applying StagingInput on the DataFlow.\"", ")", "input", "=", "StagingInput", "(", "input", ")", "return", "input" ]
36.967742
0.001701
def _symbols(): """(Lazy)load list of all supported symbols (sorted) Look into `_data()` for all currency symbols, then sort by length and unicode-ord (A-Z is not as relevant as ֏). Returns: List[unicode]: Sorted list of possible currency symbols. """ global _SYMBOLS if _SYMBOLS is None: tmp = [(s, 'symbol') for s in _data()['symbol'].keys()] tmp += [(s, 'alpha3') for s in _data()['alpha3'].keys()] tmp += [(s.name, 'name') for s in _data()['alpha3'].values()] _SYMBOLS = sorted( tmp, key=lambda s: (len(s[0]), ord(s[0][0])), reverse=True) return _SYMBOLS
[ "def", "_symbols", "(", ")", ":", "global", "_SYMBOLS", "if", "_SYMBOLS", "is", "None", ":", "tmp", "=", "[", "(", "s", ",", "'symbol'", ")", "for", "s", "in", "_data", "(", ")", "[", "'symbol'", "]", ".", "keys", "(", ")", "]", "tmp", "+=", "[", "(", "s", ",", "'alpha3'", ")", "for", "s", "in", "_data", "(", ")", "[", "'alpha3'", "]", ".", "keys", "(", ")", "]", "tmp", "+=", "[", "(", "s", ".", "name", ",", "'name'", ")", "for", "s", "in", "_data", "(", ")", "[", "'alpha3'", "]", ".", "values", "(", ")", "]", "_SYMBOLS", "=", "sorted", "(", "tmp", ",", "key", "=", "lambda", "s", ":", "(", "len", "(", "s", "[", "0", "]", ")", ",", "ord", "(", "s", "[", "0", "]", "[", "0", "]", ")", ")", ",", "reverse", "=", "True", ")", "return", "_SYMBOLS" ]
32.5
0.001495
def serial_udb_extra_f8_send(self, sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH, force_mavlink1=False): ''' Backwards compatible version of SERIAL_UDB_EXTRA F8: format sue_HEIGHT_TARGET_MAX : Serial UDB Extra HEIGHT_TARGET_MAX (float) sue_HEIGHT_TARGET_MIN : Serial UDB Extra HEIGHT_TARGET_MIN (float) sue_ALT_HOLD_THROTTLE_MIN : Serial UDB Extra ALT_HOLD_THROTTLE_MIN (float) sue_ALT_HOLD_THROTTLE_MAX : Serial UDB Extra ALT_HOLD_THROTTLE_MAX (float) sue_ALT_HOLD_PITCH_MIN : Serial UDB Extra ALT_HOLD_PITCH_MIN (float) sue_ALT_HOLD_PITCH_MAX : Serial UDB Extra ALT_HOLD_PITCH_MAX (float) sue_ALT_HOLD_PITCH_HIGH : Serial UDB Extra ALT_HOLD_PITCH_HIGH (float) ''' return self.send(self.serial_udb_extra_f8_encode(sue_HEIGHT_TARGET_MAX, sue_HEIGHT_TARGET_MIN, sue_ALT_HOLD_THROTTLE_MIN, sue_ALT_HOLD_THROTTLE_MAX, sue_ALT_HOLD_PITCH_MIN, sue_ALT_HOLD_PITCH_MAX, sue_ALT_HOLD_PITCH_HIGH), force_mavlink1=force_mavlink1)
[ "def", "serial_udb_extra_f8_send", "(", "self", ",", "sue_HEIGHT_TARGET_MAX", ",", "sue_HEIGHT_TARGET_MIN", ",", "sue_ALT_HOLD_THROTTLE_MIN", ",", "sue_ALT_HOLD_THROTTLE_MAX", ",", "sue_ALT_HOLD_PITCH_MIN", ",", "sue_ALT_HOLD_PITCH_MAX", ",", "sue_ALT_HOLD_PITCH_HIGH", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "serial_udb_extra_f8_encode", "(", "sue_HEIGHT_TARGET_MAX", ",", "sue_HEIGHT_TARGET_MIN", ",", "sue_ALT_HOLD_THROTTLE_MIN", ",", "sue_ALT_HOLD_THROTTLE_MAX", ",", "sue_ALT_HOLD_PITCH_MIN", ",", "sue_ALT_HOLD_PITCH_MAX", ",", "sue_ALT_HOLD_PITCH_HIGH", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
89.928571
0.008648
def sparse_angular_random_projection_split(inds, indptr, data, indices, rng_state): """Given a set of ``indices`` for data points from a sparse data set presented in csr sparse format as inds, indptr and data, create a random hyperplane to split the data, returning two arrays indices that fall on either side of the hyperplane. This is the basis for a random projection tree, which simply uses this splitting recursively. This particular split uses cosine distance to determine the hyperplane and which side each data sample falls on. Parameters ---------- inds: array CSR format index array of the matrix indptr: array CSR format index pointer array of the matrix data: array CSR format data array of the matrix indices: array of shape (tree_node_size,) The indices of the elements in the ``data`` array that are to be split in the current operation. rng_state: array of int64, shape (3,) The internal state of the rng Returns ------- indices_left: array The elements of ``indices`` that fall on the "left" side of the random hyperplane. indices_right: array The elements of ``indices`` that fall on the "left" side of the random hyperplane. """ # Select two random points, set the hyperplane between them left_index = tau_rand_int(rng_state) % indices.shape[0] right_index = tau_rand_int(rng_state) % indices.shape[0] right_index += left_index == right_index right_index = right_index % indices.shape[0] left = indices[left_index] right = indices[right_index] left_inds = inds[indptr[left] : indptr[left + 1]] left_data = data[indptr[left] : indptr[left + 1]] right_inds = inds[indptr[right] : indptr[right + 1]] right_data = data[indptr[right] : indptr[right + 1]] left_norm = norm(left_data) right_norm = norm(right_data) if abs(left_norm) < EPS: left_norm = 1.0 if abs(right_norm) < EPS: right_norm = 1.0 # Compute the normal vector to the hyperplane (the vector between # the two points) normalized_left_data = left_data / left_norm normalized_right_data = right_data / right_norm hyperplane_inds, hyperplane_data = sparse_diff( left_inds, normalized_left_data, right_inds, normalized_right_data ) hyperplane_norm = norm(hyperplane_data) if abs(hyperplane_norm) < EPS: hyperplane_norm = 1.0 for d in range(hyperplane_data.shape[0]): hyperplane_data[d] = hyperplane_data[d] / hyperplane_norm # For each point compute the margin (project into normal vector) # If we are on lower side of the hyperplane put in one pile, otherwise # put it in the other pile (if we hit hyperplane on the nose, flip a coin) n_left = 0 n_right = 0 side = np.empty(indices.shape[0], np.int8) for i in range(indices.shape[0]): margin = 0.0 i_inds = inds[indptr[indices[i]] : indptr[indices[i] + 1]] i_data = data[indptr[indices[i]] : indptr[indices[i] + 1]] mul_inds, mul_data = sparse_mul( hyperplane_inds, hyperplane_data, i_inds, i_data ) for d in range(mul_data.shape[0]): margin += mul_data[d] if abs(margin) < EPS: side[i] = tau_rand_int(rng_state) % 2 if side[i] == 0: n_left += 1 else: n_right += 1 elif margin > 0: side[i] = 0 n_left += 1 else: side[i] = 1 n_right += 1 # Now that we have the counts allocate arrays indices_left = np.empty(n_left, dtype=np.int64) indices_right = np.empty(n_right, dtype=np.int64) # Populate the arrays with indices according to which side they fell on n_left = 0 n_right = 0 for i in range(side.shape[0]): if side[i] == 0: indices_left[n_left] = indices[i] n_left += 1 else: indices_right[n_right] = indices[i] n_right += 1 hyperplane = np.vstack((hyperplane_inds, hyperplane_data)) return indices_left, indices_right, hyperplane, None
[ "def", "sparse_angular_random_projection_split", "(", "inds", ",", "indptr", ",", "data", ",", "indices", ",", "rng_state", ")", ":", "# Select two random points, set the hyperplane between them", "left_index", "=", "tau_rand_int", "(", "rng_state", ")", "%", "indices", ".", "shape", "[", "0", "]", "right_index", "=", "tau_rand_int", "(", "rng_state", ")", "%", "indices", ".", "shape", "[", "0", "]", "right_index", "+=", "left_index", "==", "right_index", "right_index", "=", "right_index", "%", "indices", ".", "shape", "[", "0", "]", "left", "=", "indices", "[", "left_index", "]", "right", "=", "indices", "[", "right_index", "]", "left_inds", "=", "inds", "[", "indptr", "[", "left", "]", ":", "indptr", "[", "left", "+", "1", "]", "]", "left_data", "=", "data", "[", "indptr", "[", "left", "]", ":", "indptr", "[", "left", "+", "1", "]", "]", "right_inds", "=", "inds", "[", "indptr", "[", "right", "]", ":", "indptr", "[", "right", "+", "1", "]", "]", "right_data", "=", "data", "[", "indptr", "[", "right", "]", ":", "indptr", "[", "right", "+", "1", "]", "]", "left_norm", "=", "norm", "(", "left_data", ")", "right_norm", "=", "norm", "(", "right_data", ")", "if", "abs", "(", "left_norm", ")", "<", "EPS", ":", "left_norm", "=", "1.0", "if", "abs", "(", "right_norm", ")", "<", "EPS", ":", "right_norm", "=", "1.0", "# Compute the normal vector to the hyperplane (the vector between", "# the two points)", "normalized_left_data", "=", "left_data", "/", "left_norm", "normalized_right_data", "=", "right_data", "/", "right_norm", "hyperplane_inds", ",", "hyperplane_data", "=", "sparse_diff", "(", "left_inds", ",", "normalized_left_data", ",", "right_inds", ",", "normalized_right_data", ")", "hyperplane_norm", "=", "norm", "(", "hyperplane_data", ")", "if", "abs", "(", "hyperplane_norm", ")", "<", "EPS", ":", "hyperplane_norm", "=", "1.0", "for", "d", "in", "range", "(", "hyperplane_data", ".", "shape", "[", "0", "]", ")", ":", "hyperplane_data", "[", "d", "]", "=", "hyperplane_data", "[", "d", "]", "/", "hyperplane_norm", "# For each point compute the margin (project into normal vector)", "# If we are on lower side of the hyperplane put in one pile, otherwise", "# put it in the other pile (if we hit hyperplane on the nose, flip a coin)", "n_left", "=", "0", "n_right", "=", "0", "side", "=", "np", ".", "empty", "(", "indices", ".", "shape", "[", "0", "]", ",", "np", ".", "int8", ")", "for", "i", "in", "range", "(", "indices", ".", "shape", "[", "0", "]", ")", ":", "margin", "=", "0.0", "i_inds", "=", "inds", "[", "indptr", "[", "indices", "[", "i", "]", "]", ":", "indptr", "[", "indices", "[", "i", "]", "+", "1", "]", "]", "i_data", "=", "data", "[", "indptr", "[", "indices", "[", "i", "]", "]", ":", "indptr", "[", "indices", "[", "i", "]", "+", "1", "]", "]", "mul_inds", ",", "mul_data", "=", "sparse_mul", "(", "hyperplane_inds", ",", "hyperplane_data", ",", "i_inds", ",", "i_data", ")", "for", "d", "in", "range", "(", "mul_data", ".", "shape", "[", "0", "]", ")", ":", "margin", "+=", "mul_data", "[", "d", "]", "if", "abs", "(", "margin", ")", "<", "EPS", ":", "side", "[", "i", "]", "=", "tau_rand_int", "(", "rng_state", ")", "%", "2", "if", "side", "[", "i", "]", "==", "0", ":", "n_left", "+=", "1", "else", ":", "n_right", "+=", "1", "elif", "margin", ">", "0", ":", "side", "[", "i", "]", "=", "0", "n_left", "+=", "1", "else", ":", "side", "[", "i", "]", "=", "1", "n_right", "+=", "1", "# Now that we have the counts allocate arrays", "indices_left", "=", "np", ".", "empty", "(", "n_left", ",", "dtype", "=", "np", ".", "int64", ")", "indices_right", "=", "np", ".", "empty", "(", "n_right", ",", "dtype", "=", "np", ".", "int64", ")", "# Populate the arrays with indices according to which side they fell on", "n_left", "=", "0", "n_right", "=", "0", "for", "i", "in", "range", "(", "side", ".", "shape", "[", "0", "]", ")", ":", "if", "side", "[", "i", "]", "==", "0", ":", "indices_left", "[", "n_left", "]", "=", "indices", "[", "i", "]", "n_left", "+=", "1", "else", ":", "indices_right", "[", "n_right", "]", "=", "indices", "[", "i", "]", "n_right", "+=", "1", "hyperplane", "=", "np", ".", "vstack", "(", "(", "hyperplane_inds", ",", "hyperplane_data", ")", ")", "return", "indices_left", ",", "indices_right", ",", "hyperplane", ",", "None" ]
35.582609
0.001902
def localize(self, mode="r", perm=None, parent_perm=None, **kwargs): """ localize(mode="r", perm=None, parent_perm=None, skip_copy=False, is_tmp=None, **kwargs) """ if mode not in ("r", "w"): raise Exception("unknown mode '{}', use r or w".format(mode)) # get additional arguments skip_copy = kwargs.pop("skip_copy", False) is_tmp = kwargs.pop("is_tmp", mode == "w") if mode == "r": if is_tmp: # create a temporary target tmp = self.__class__(is_tmp=self.ext(n=1) or True) # always copy self.copy_to_local(tmp) # yield the copy try: yield tmp finally: tmp.remove() else: # simply yield yield self else: # write mode if is_tmp: # create a temporary target tmp = self.__class__(is_tmp=self.ext(n=1) or True) # copy when existing if not skip_copy and self.exists(): self.copy_to_local(tmp) # yield the copy try: yield tmp # move back again if tmp.exists(): tmp.move_to_local(self, dir_perm=parent_perm) self.chmod(perm) else: logger.warning("cannot move non-existing localized file target {!r}".format( self)) finally: tmp.remove() else: # create the parent dir self.parent.touch(perm=parent_perm) # simply yield yield self if self.exists(): self.chmod(perm)
[ "def", "localize", "(", "self", ",", "mode", "=", "\"r\"", ",", "perm", "=", "None", ",", "parent_perm", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "mode", "not", "in", "(", "\"r\"", ",", "\"w\"", ")", ":", "raise", "Exception", "(", "\"unknown mode '{}', use r or w\"", ".", "format", "(", "mode", ")", ")", "# get additional arguments", "skip_copy", "=", "kwargs", ".", "pop", "(", "\"skip_copy\"", ",", "False", ")", "is_tmp", "=", "kwargs", ".", "pop", "(", "\"is_tmp\"", ",", "mode", "==", "\"w\"", ")", "if", "mode", "==", "\"r\"", ":", "if", "is_tmp", ":", "# create a temporary target", "tmp", "=", "self", ".", "__class__", "(", "is_tmp", "=", "self", ".", "ext", "(", "n", "=", "1", ")", "or", "True", ")", "# always copy", "self", ".", "copy_to_local", "(", "tmp", ")", "# yield the copy", "try", ":", "yield", "tmp", "finally", ":", "tmp", ".", "remove", "(", ")", "else", ":", "# simply yield", "yield", "self", "else", ":", "# write mode", "if", "is_tmp", ":", "# create a temporary target", "tmp", "=", "self", ".", "__class__", "(", "is_tmp", "=", "self", ".", "ext", "(", "n", "=", "1", ")", "or", "True", ")", "# copy when existing", "if", "not", "skip_copy", "and", "self", ".", "exists", "(", ")", ":", "self", ".", "copy_to_local", "(", "tmp", ")", "# yield the copy", "try", ":", "yield", "tmp", "# move back again", "if", "tmp", ".", "exists", "(", ")", ":", "tmp", ".", "move_to_local", "(", "self", ",", "dir_perm", "=", "parent_perm", ")", "self", ".", "chmod", "(", "perm", ")", "else", ":", "logger", ".", "warning", "(", "\"cannot move non-existing localized file target {!r}\"", ".", "format", "(", "self", ")", ")", "finally", ":", "tmp", ".", "remove", "(", ")", "else", ":", "# create the parent dir", "self", ".", "parent", ".", "touch", "(", "perm", "=", "parent_perm", ")", "# simply yield", "yield", "self", "if", "self", ".", "exists", "(", ")", ":", "self", ".", "chmod", "(", "perm", ")" ]
31.87931
0.002099
def _redis_notifier(state): """Notify of configuration update through redis. Arguments: state (_WaffleState): Object that contains reference to app and its configstore. """ tstamp = time.time() state._tstamp = tstamp conf = state.app.config # Notify timestamp r = redis.client.StrictRedis() r.publish(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'), tstamp)
[ "def", "_redis_notifier", "(", "state", ")", ":", "tstamp", "=", "time", ".", "time", "(", ")", "state", ".", "_tstamp", "=", "tstamp", "conf", "=", "state", ".", "app", ".", "config", "# Notify timestamp", "r", "=", "redis", ".", "client", ".", "StrictRedis", "(", ")", "r", ".", "publish", "(", "conf", ".", "get", "(", "'WAFFLE_REDIS_CHANNEL'", ",", "'waffleconf'", ")", ",", "tstamp", ")" ]
28.642857
0.002415
def start(track_file, twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret, poll_interval=15, unfiltered=False, languages=None, debug=False, outfile=None): """Start the stream.""" listener = construct_listener(outfile) checker = BasicFileTermChecker(track_file, listener) auth = get_tweepy_auth(twitter_api_key, twitter_api_secret, twitter_access_token, twitter_access_token_secret) stream = DynamicTwitterStream(auth, listener, checker, unfiltered=unfiltered, languages=languages) set_terminate_listeners(stream) if debug: set_debug_listener(stream) begin_stream_loop(stream, poll_interval)
[ "def", "start", "(", "track_file", ",", "twitter_api_key", ",", "twitter_api_secret", ",", "twitter_access_token", ",", "twitter_access_token_secret", ",", "poll_interval", "=", "15", ",", "unfiltered", "=", "False", ",", "languages", "=", "None", ",", "debug", "=", "False", ",", "outfile", "=", "None", ")", ":", "listener", "=", "construct_listener", "(", "outfile", ")", "checker", "=", "BasicFileTermChecker", "(", "track_file", ",", "listener", ")", "auth", "=", "get_tweepy_auth", "(", "twitter_api_key", ",", "twitter_api_secret", ",", "twitter_access_token", ",", "twitter_access_token_secret", ")", "stream", "=", "DynamicTwitterStream", "(", "auth", ",", "listener", ",", "checker", ",", "unfiltered", "=", "unfiltered", ",", "languages", "=", "languages", ")", "set_terminate_listeners", "(", "stream", ")", "if", "debug", ":", "set_debug_listener", "(", "stream", ")", "begin_stream_loop", "(", "stream", ",", "poll_interval", ")" ]
31.346154
0.002381
def hash_from_file(filename): """ Compute the fuzzy hash of a file. Opens, reads, and hashes the contents of the file 'filename' :param String|Bytes filename: The name of the file to be hashed :return: The fuzzy hash of the file :rtype: String :raises IOError: If Python is unable to read the file :raises InternalError: If lib returns an internal error """ if not os.path.exists(filename): raise IOError("Path not found") if not os.path.isfile(filename): raise IOError("File not found") if not os.access(filename, os.R_OK): raise IOError("File is not readable") result = ffi.new("char[]", binding.lib.FUZZY_MAX_RESULT) if binding.lib.fuzzy_hash_filename(filename.encode("utf-8"), result) != 0: raise InternalError("Function returned an unexpected error code") return ffi.string(result).decode("ascii")
[ "def", "hash_from_file", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "IOError", "(", "\"Path not found\"", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "raise", "IOError", "(", "\"File not found\"", ")", "if", "not", "os", ".", "access", "(", "filename", ",", "os", ".", "R_OK", ")", ":", "raise", "IOError", "(", "\"File is not readable\"", ")", "result", "=", "ffi", ".", "new", "(", "\"char[]\"", ",", "binding", ".", "lib", ".", "FUZZY_MAX_RESULT", ")", "if", "binding", ".", "lib", ".", "fuzzy_hash_filename", "(", "filename", ".", "encode", "(", "\"utf-8\"", ")", ",", "result", ")", "!=", "0", ":", "raise", "InternalError", "(", "\"Function returned an unexpected error code\"", ")", "return", "ffi", ".", "string", "(", "result", ")", ".", "decode", "(", "\"ascii\"", ")" ]
33.653846
0.001111
def _iana_unassigned_port_ranges(): """ Returns unassigned port ranges according to IANA. """ page = urllib2.urlopen(IANA_DOWNLOAD_URL).read() xml = ElementTree.fromstring(page) records = xml.findall('{%s}record' % IANA_NS) for record in records: description = record.find('{%s}description' % IANA_NS).text if description == 'Unassigned': numbers = record.find('{%s}number' % IANA_NS).text yield numbers
[ "def", "_iana_unassigned_port_ranges", "(", ")", ":", "page", "=", "urllib2", ".", "urlopen", "(", "IANA_DOWNLOAD_URL", ")", ".", "read", "(", ")", "xml", "=", "ElementTree", ".", "fromstring", "(", "page", ")", "records", "=", "xml", ".", "findall", "(", "'{%s}record'", "%", "IANA_NS", ")", "for", "record", "in", "records", ":", "description", "=", "record", ".", "find", "(", "'{%s}description'", "%", "IANA_NS", ")", ".", "text", "if", "description", "==", "'Unassigned'", ":", "numbers", "=", "record", ".", "find", "(", "'{%s}number'", "%", "IANA_NS", ")", ".", "text", "yield", "numbers" ]
38.333333
0.002123
def random_shift(image, wsr=0.1, hsr=0.1): """Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images translated by the provided wsr and hsr. """ height, width, _ = common_layers.shape_list(image) width_range, height_range = wsr*width, hsr*height height_translations = tf.random_uniform((1,), -height_range, height_range) width_translations = tf.random_uniform((1,), -width_range, width_range) translations = tf.concat((height_translations, width_translations), axis=0) return tf.contrib.image.translate(image, translations=translations)
[ "def", "random_shift", "(", "image", ",", "wsr", "=", "0.1", ",", "hsr", "=", "0.1", ")", ":", "height", ",", "width", ",", "_", "=", "common_layers", ".", "shape_list", "(", "image", ")", "width_range", ",", "height_range", "=", "wsr", "*", "width", ",", "hsr", "*", "height", "height_translations", "=", "tf", ".", "random_uniform", "(", "(", "1", ",", ")", ",", "-", "height_range", ",", "height_range", ")", "width_translations", "=", "tf", ".", "random_uniform", "(", "(", "1", ",", ")", ",", "-", "width_range", ",", "width_range", ")", "translations", "=", "tf", ".", "concat", "(", "(", "height_translations", ",", "width_translations", ")", ",", "axis", "=", "0", ")", "return", "tf", ".", "contrib", ".", "image", ".", "translate", "(", "image", ",", "translations", "=", "translations", ")" ]
44.166667
0.009852
def preprocess_input(features, target, train_config, preprocess_output_dir, model_type): """Perform some transformations after reading in the input tensors. Args: features: dict of feature_name to tensor target: tensor train_config: our training config object preprocess_output_dir: folder should contain the vocab files. model_type: the tf model type. Raises: ValueError: if wrong transforms are used Returns: New features dict and new target tensor. """ target_name = train_config['target_column'] key_name = train_config['key_column'] # Do the numerical transforms. # Numerical transforms supported for regression/classification # 1) num -> do nothing (identity, default) # 2) num -> scale to -1, 1 (scale) # 3) num -> scale to -a, a (scale with value parameter) with tf.name_scope('numerical_feature_preprocess'): if train_config['numerical_columns']: numerical_analysis_file = os.path.join(preprocess_output_dir, NUMERICAL_ANALYSIS) if not file_io.file_exists(numerical_analysis_file): raise ValueError('File %s not found in %s' % (NUMERICAL_ANALYSIS, preprocess_output_dir)) numerical_anlysis = json.loads( python_portable_string( file_io.read_file_to_string(numerical_analysis_file))) for name in train_config['numerical_columns']: if name == target_name or name == key_name: continue transform_config = train_config['transforms'].get(name, {}) transform_name = transform_config.get('transform', None) if transform_name == 'scale': value = float(transform_config.get('value', 1.0)) features[name] = _scale_tensor( features[name], range_min=numerical_anlysis[name]['min'], range_max=numerical_anlysis[name]['max'], scale_min=-value, scale_max=value) elif transform_name == 'identity' or transform_name is None: pass else: raise ValueError(('For numerical variables, only scale ' 'and identity are supported: ' 'Error for %s') % name) # Do target transform if it exists. if target is not None: with tf.name_scope('target_feature_preprocess'): if target_name in train_config['categorical_columns']: labels = train_config['vocab_stats'][target_name]['labels'] table = tf.contrib.lookup.string_to_index_table_from_tensor(labels) target = table.lookup(target) # target = tf.contrib.lookup.string_to_index(target, labels) # Do categorical transforms. Only apply vocab mapping. The real # transforms are done with tf learn column features. with tf.name_scope('categorical_feature_preprocess'): for name in train_config['categorical_columns']: if name == key_name or name == target_name: continue transform_config = train_config['transforms'].get(name, {}) transform_name = transform_config.get('transform', None) if is_dnn_model(model_type): if transform_name == 'embedding' or transform_name == 'one_hot' or transform_name is None: map_vocab = True else: raise ValueError('Unknown transform %s' % transform_name) elif is_linear_model(model_type): if (transform_name == 'one_hot' or transform_name is None): map_vocab = True elif transform_name == 'embedding': map_vocab = False else: raise ValueError('Unknown transform %s' % transform_name) if map_vocab: labels = train_config['vocab_stats'][name]['labels'] table = tf.contrib.lookup.string_to_index_table_from_tensor(labels) features[name] = table.lookup(features[name]) return features, target
[ "def", "preprocess_input", "(", "features", ",", "target", ",", "train_config", ",", "preprocess_output_dir", ",", "model_type", ")", ":", "target_name", "=", "train_config", "[", "'target_column'", "]", "key_name", "=", "train_config", "[", "'key_column'", "]", "# Do the numerical transforms.", "# Numerical transforms supported for regression/classification", "# 1) num -> do nothing (identity, default)", "# 2) num -> scale to -1, 1 (scale)", "# 3) num -> scale to -a, a (scale with value parameter)", "with", "tf", ".", "name_scope", "(", "'numerical_feature_preprocess'", ")", ":", "if", "train_config", "[", "'numerical_columns'", "]", ":", "numerical_analysis_file", "=", "os", ".", "path", ".", "join", "(", "preprocess_output_dir", ",", "NUMERICAL_ANALYSIS", ")", "if", "not", "file_io", ".", "file_exists", "(", "numerical_analysis_file", ")", ":", "raise", "ValueError", "(", "'File %s not found in %s'", "%", "(", "NUMERICAL_ANALYSIS", ",", "preprocess_output_dir", ")", ")", "numerical_anlysis", "=", "json", ".", "loads", "(", "python_portable_string", "(", "file_io", ".", "read_file_to_string", "(", "numerical_analysis_file", ")", ")", ")", "for", "name", "in", "train_config", "[", "'numerical_columns'", "]", ":", "if", "name", "==", "target_name", "or", "name", "==", "key_name", ":", "continue", "transform_config", "=", "train_config", "[", "'transforms'", "]", ".", "get", "(", "name", ",", "{", "}", ")", "transform_name", "=", "transform_config", ".", "get", "(", "'transform'", ",", "None", ")", "if", "transform_name", "==", "'scale'", ":", "value", "=", "float", "(", "transform_config", ".", "get", "(", "'value'", ",", "1.0", ")", ")", "features", "[", "name", "]", "=", "_scale_tensor", "(", "features", "[", "name", "]", ",", "range_min", "=", "numerical_anlysis", "[", "name", "]", "[", "'min'", "]", ",", "range_max", "=", "numerical_anlysis", "[", "name", "]", "[", "'max'", "]", ",", "scale_min", "=", "-", "value", ",", "scale_max", "=", "value", ")", "elif", "transform_name", "==", "'identity'", "or", "transform_name", "is", "None", ":", "pass", "else", ":", "raise", "ValueError", "(", "(", "'For numerical variables, only scale '", "'and identity are supported: '", "'Error for %s'", ")", "%", "name", ")", "# Do target transform if it exists.", "if", "target", "is", "not", "None", ":", "with", "tf", ".", "name_scope", "(", "'target_feature_preprocess'", ")", ":", "if", "target_name", "in", "train_config", "[", "'categorical_columns'", "]", ":", "labels", "=", "train_config", "[", "'vocab_stats'", "]", "[", "target_name", "]", "[", "'labels'", "]", "table", "=", "tf", ".", "contrib", ".", "lookup", ".", "string_to_index_table_from_tensor", "(", "labels", ")", "target", "=", "table", ".", "lookup", "(", "target", ")", "# target = tf.contrib.lookup.string_to_index(target, labels)", "# Do categorical transforms. Only apply vocab mapping. The real", "# transforms are done with tf learn column features.", "with", "tf", ".", "name_scope", "(", "'categorical_feature_preprocess'", ")", ":", "for", "name", "in", "train_config", "[", "'categorical_columns'", "]", ":", "if", "name", "==", "key_name", "or", "name", "==", "target_name", ":", "continue", "transform_config", "=", "train_config", "[", "'transforms'", "]", ".", "get", "(", "name", ",", "{", "}", ")", "transform_name", "=", "transform_config", ".", "get", "(", "'transform'", ",", "None", ")", "if", "is_dnn_model", "(", "model_type", ")", ":", "if", "transform_name", "==", "'embedding'", "or", "transform_name", "==", "'one_hot'", "or", "transform_name", "is", "None", ":", "map_vocab", "=", "True", "else", ":", "raise", "ValueError", "(", "'Unknown transform %s'", "%", "transform_name", ")", "elif", "is_linear_model", "(", "model_type", ")", ":", "if", "(", "transform_name", "==", "'one_hot'", "or", "transform_name", "is", "None", ")", ":", "map_vocab", "=", "True", "elif", "transform_name", "==", "'embedding'", ":", "map_vocab", "=", "False", "else", ":", "raise", "ValueError", "(", "'Unknown transform %s'", "%", "transform_name", ")", "if", "map_vocab", ":", "labels", "=", "train_config", "[", "'vocab_stats'", "]", "[", "name", "]", "[", "'labels'", "]", "table", "=", "tf", ".", "contrib", ".", "lookup", ".", "string_to_index_table_from_tensor", "(", "labels", ")", "features", "[", "name", "]", "=", "table", ".", "lookup", "(", "features", "[", "name", "]", ")", "return", "features", ",", "target" ]
40.094737
0.009736
def query_all(self): """ Query all records without limit and offset. """ return self.query_model(self.model, self.condition, order_by=self.order_by, group_by=self.group_by, having=self.having)
[ "def", "query_all", "(", "self", ")", ":", "return", "self", ".", "query_model", "(", "self", ".", "model", ",", "self", ".", "condition", ",", "order_by", "=", "self", ".", "order_by", ",", "group_by", "=", "self", ".", "group_by", ",", "having", "=", "self", ".", "having", ")" ]
42.666667
0.011494
def validate_is_document_type(option, value): """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (collections.MutableMapping, RawBSONDocument)): raise TypeError("%s must be an instance of dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or " "a type that inherits from " "collections.MutableMapping" % (option,))
[ "def", "validate_is_document_type", "(", "option", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "collections", ".", "MutableMapping", ",", "RawBSONDocument", ")", ")", ":", "raise", "TypeError", "(", "\"%s must be an instance of dict, bson.son.SON, \"", "\"bson.raw_bson.RawBSONDocument, or \"", "\"a type that inherits from \"", "\"collections.MutableMapping\"", "%", "(", "option", ",", ")", ")" ]
64.142857
0.002198
def set_setpoint(self, setpointvalue): """Set the setpoint. Args: setpointvalue (float): Setpoint [most often in degrees] """ _checkSetpointValue( setpointvalue, self.setpoint_max ) self.write_register( 4097, setpointvalue, 1)
[ "def", "set_setpoint", "(", "self", ",", "setpointvalue", ")", ":", "_checkSetpointValue", "(", "setpointvalue", ",", "self", ".", "setpoint_max", ")", "self", ".", "write_register", "(", "4097", ",", "setpointvalue", ",", "1", ")" ]
32.444444
0.023333
def _infer_decorator_callchain(node): """Detect decorator call chaining and see if the end result is a static or a classmethod. """ if not isinstance(node, FunctionDef): return None if not node.parent: return None try: result = next(node.infer_call_result(node.parent)) except exceptions.InferenceError: return None if isinstance(result, bases.Instance): result = result._proxied if isinstance(result, ClassDef): if result.is_subtype_of("%s.classmethod" % BUILTINS): return "classmethod" if result.is_subtype_of("%s.staticmethod" % BUILTINS): return "staticmethod" return None
[ "def", "_infer_decorator_callchain", "(", "node", ")", ":", "if", "not", "isinstance", "(", "node", ",", "FunctionDef", ")", ":", "return", "None", "if", "not", "node", ".", "parent", ":", "return", "None", "try", ":", "result", "=", "next", "(", "node", ".", "infer_call_result", "(", "node", ".", "parent", ")", ")", "except", "exceptions", ".", "InferenceError", ":", "return", "None", "if", "isinstance", "(", "result", ",", "bases", ".", "Instance", ")", ":", "result", "=", "result", ".", "_proxied", "if", "isinstance", "(", "result", ",", "ClassDef", ")", ":", "if", "result", ".", "is_subtype_of", "(", "\"%s.classmethod\"", "%", "BUILTINS", ")", ":", "return", "\"classmethod\"", "if", "result", ".", "is_subtype_of", "(", "\"%s.staticmethod\"", "%", "BUILTINS", ")", ":", "return", "\"staticmethod\"", "return", "None" ]
33.85
0.001437
def parse_extension_item_param( header: str, pos: int, header_name: str ) -> Tuple[ExtensionParameter, int]: """ Parse a single extension parameter from ``header`` at the given position. Return a ``(name, value)`` pair and the new position. Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. """ # Extract parameter name. name, pos = parse_token(header, pos, header_name) pos = parse_OWS(header, pos) # Extract parameter value, if there is one. value: Optional[str] = None if peek_ahead(header, pos) == "=": pos = parse_OWS(header, pos + 1) if peek_ahead(header, pos) == '"': pos_before = pos # for proper error reporting below value, pos = parse_quoted_string(header, pos, header_name) # https://tools.ietf.org/html/rfc6455#section-9.1 says: the value # after quoted-string unescaping MUST conform to the 'token' ABNF. if _token_re.fullmatch(value) is None: raise InvalidHeaderFormat( header_name, "invalid quoted header content", header, pos_before ) else: value, pos = parse_token(header, pos, header_name) pos = parse_OWS(header, pos) return (name, value), pos
[ "def", "parse_extension_item_param", "(", "header", ":", "str", ",", "pos", ":", "int", ",", "header_name", ":", "str", ")", "->", "Tuple", "[", "ExtensionParameter", ",", "int", "]", ":", "# Extract parameter name.", "name", ",", "pos", "=", "parse_token", "(", "header", ",", "pos", ",", "header_name", ")", "pos", "=", "parse_OWS", "(", "header", ",", "pos", ")", "# Extract parameter value, if there is one.", "value", ":", "Optional", "[", "str", "]", "=", "None", "if", "peek_ahead", "(", "header", ",", "pos", ")", "==", "\"=\"", ":", "pos", "=", "parse_OWS", "(", "header", ",", "pos", "+", "1", ")", "if", "peek_ahead", "(", "header", ",", "pos", ")", "==", "'\"'", ":", "pos_before", "=", "pos", "# for proper error reporting below", "value", ",", "pos", "=", "parse_quoted_string", "(", "header", ",", "pos", ",", "header_name", ")", "# https://tools.ietf.org/html/rfc6455#section-9.1 says: the value", "# after quoted-string unescaping MUST conform to the 'token' ABNF.", "if", "_token_re", ".", "fullmatch", "(", "value", ")", "is", "None", ":", "raise", "InvalidHeaderFormat", "(", "header_name", ",", "\"invalid quoted header content\"", ",", "header", ",", "pos_before", ")", "else", ":", "value", ",", "pos", "=", "parse_token", "(", "header", ",", "pos", ",", "header_name", ")", "pos", "=", "parse_OWS", "(", "header", ",", "pos", ")", "return", "(", "name", ",", "value", ")", ",", "pos" ]
39.6875
0.001537
def pipe(self, f, *args, **kwargs): """Generic composition function to enable expression pipelining. Parameters ---------- f : function or (function, arg_name) tuple If the expression needs to be passed as anything other than the first argument to the function, pass a tuple with the argument name. For example, (f, 'data') if the function f expects a 'data' keyword args : positional arguments kwargs : keyword arguments Examples -------- >>> import ibis >>> t = ibis.table([('a', 'int64'), ('b', 'string')], name='t') >>> f = lambda a: (a + 1).name('a') >>> g = lambda a: (a * 2).name('a') >>> result1 = t.a.pipe(f).pipe(g) >>> result1 # doctest: +NORMALIZE_WHITESPACE ref_0 UnboundTable[table] name: t schema: a : int64 b : string a = Multiply[int64*] left: a = Add[int64*] left: a = Column[int64*] 'a' from table ref_0 right: Literal[int8] 1 right: Literal[int8] 2 >>> result2 = g(f(t.a)) # equivalent to the above >>> result1.equals(result2) True Returns ------- result : result type of passed function """ if isinstance(f, tuple): f, data_keyword = f kwargs = kwargs.copy() kwargs[data_keyword] = self return f(*args, **kwargs) else: return f(self, *args, **kwargs)
[ "def", "pipe", "(", "self", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "f", ",", "tuple", ")", ":", "f", ",", "data_keyword", "=", "f", "kwargs", "=", "kwargs", ".", "copy", "(", ")", "kwargs", "[", "data_keyword", "]", "=", "self", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
30.566038
0.001196
def get_sphinx_autodoc( self, depth=None, exclude=None, width=72, error=False, raised=False, no_comment=False, ): r""" Return exception list in `reStructuredText`_ auto-determining callable name. :param depth: Hierarchy levels to include in the exceptions list (overrides default **depth** argument; see :py:attr:`pexdoc.ExDoc.depth`). If None exceptions at all depths are included :type depth: non-negative integer or None :param exclude: List of (potentially partial) module and callable names to exclude from exceptions list (overrides default **exclude** argument, see :py:attr:`pexdoc.ExDoc.exclude`). If None all callables are included :type exclude: list of strings or None :param width: Maximum width of the lines of text (minimum 40) :type width: integer :param error: Flag that indicates whether an exception should be raised if the callable is not found in the callables exceptions database (True) or not (False) :type error: boolean :param raised: Flag that indicates whether only exceptions that were raised (and presumably caught) should be documented (True) or all registered exceptions should be documented (False) :type raised: boolean :param no_comment: Flag that indicates whether a `reStructuredText`_ comment labeling the callable (method, function or class property) should be printed (False) or not (True) before the exceptions documentation :type no_comment: boolean :raises: * RuntimeError (Argument \\`depth\\` is not valid) * RuntimeError (Argument \\`error\\` is not valid) * RuntimeError (Argument \\`exclude\\` is not valid) * RuntimeError (Argument \\`no_comment\\` is not valid) * RuntimeError (Argument \\`raised\\` is not valid) * RuntimeError (Argument \\`width\\` is not valid) * RuntimeError (Callable not found in exception list: *[name]*) * RuntimeError (Unable to determine callable name) """ # This code is cog-specific: cog code file name is the module # file name, a plus (+), and then the line number where the # cog function is frame = sys._getframe(1) index = frame.f_code.co_filename.rfind("+") fname = os.path.abspath(frame.f_code.co_filename[:index]) # Find name of callable based on module name and line number # within that module, then get the exceptions by using the # get_sphinx_doc() method with this information line_num = int(frame.f_code.co_filename[index + 1 :]) module_db = self._module_obj_db[fname] names = [callable_dict["name"] for callable_dict in module_db] line_nums = [callable_dict["line"] for callable_dict in module_db] name = names[bisect.bisect(line_nums, line_num) - 1] return self.get_sphinx_doc( name=name, depth=depth, exclude=exclude, width=width, error=error, raised=raised, no_comment=no_comment, )
[ "def", "get_sphinx_autodoc", "(", "self", ",", "depth", "=", "None", ",", "exclude", "=", "None", ",", "width", "=", "72", ",", "error", "=", "False", ",", "raised", "=", "False", ",", "no_comment", "=", "False", ",", ")", ":", "# This code is cog-specific: cog code file name is the module", "# file name, a plus (+), and then the line number where the", "# cog function is", "frame", "=", "sys", ".", "_getframe", "(", "1", ")", "index", "=", "frame", ".", "f_code", ".", "co_filename", ".", "rfind", "(", "\"+\"", ")", "fname", "=", "os", ".", "path", ".", "abspath", "(", "frame", ".", "f_code", ".", "co_filename", "[", ":", "index", "]", ")", "# Find name of callable based on module name and line number", "# within that module, then get the exceptions by using the", "# get_sphinx_doc() method with this information", "line_num", "=", "int", "(", "frame", ".", "f_code", ".", "co_filename", "[", "index", "+", "1", ":", "]", ")", "module_db", "=", "self", ".", "_module_obj_db", "[", "fname", "]", "names", "=", "[", "callable_dict", "[", "\"name\"", "]", "for", "callable_dict", "in", "module_db", "]", "line_nums", "=", "[", "callable_dict", "[", "\"line\"", "]", "for", "callable_dict", "in", "module_db", "]", "name", "=", "names", "[", "bisect", ".", "bisect", "(", "line_nums", ",", "line_num", ")", "-", "1", "]", "return", "self", ".", "get_sphinx_doc", "(", "name", "=", "name", ",", "depth", "=", "depth", ",", "exclude", "=", "exclude", ",", "width", "=", "width", ",", "error", "=", "error", ",", "raised", "=", "raised", ",", "no_comment", "=", "no_comment", ",", ")" ]
39.574713
0.001417
def UpdateUserCredentials(client_id, client_secret, refresh_token, adwords_manager_cid, developer_token): """Update the credentials associated with application user. Args: client_id: str Client Id retrieved from the developer's console. client_secret: str Client Secret retrieved from the developer's console. refresh_token: str Refresh token generated with the above client id/secret. adwords_manager_cid: str Customer Id for the AdWords manager account. developer_token: str Developer Token for the AdWords account. """ app_user = AppUser.query(AppUser.user == users.get_current_user()).fetch()[0] app_user.client_id = client_id app_user.client_secret = client_secret app_user.refresh_token = refresh_token app_user.adwords_manager_cid = adwords_manager_cid app_user.developer_token = developer_token app_user.put()
[ "def", "UpdateUserCredentials", "(", "client_id", ",", "client_secret", ",", "refresh_token", ",", "adwords_manager_cid", ",", "developer_token", ")", ":", "app_user", "=", "AppUser", ".", "query", "(", "AppUser", ".", "user", "==", "users", ".", "get_current_user", "(", ")", ")", ".", "fetch", "(", ")", "[", "0", "]", "app_user", ".", "client_id", "=", "client_id", "app_user", ".", "client_secret", "=", "client_secret", "app_user", ".", "refresh_token", "=", "refresh_token", "app_user", ".", "adwords_manager_cid", "=", "adwords_manager_cid", "app_user", ".", "developer_token", "=", "developer_token", "app_user", ".", "put", "(", ")" ]
43.35
0.010158
def relate_obs_ids_to_chosen_alts(obs_id_array, alt_id_array, choice_array): """ Creates a dictionary that relates each unique alternative id to the set of observations ids that chose the given alternative. Parameters ---------- obs_id_array : 1D ndarray of ints. Should be a long-format array of observation ids. Each element should correspond to the unique id of the unit of observation that corresponds to the given row of the long-format data. Note that each unit of observation may have more than one associated choice situation. alt_id_array : 1D ndarray of ints. Should be a long-format array of alternative ids. Each element should denote the unique id of the alternative that corresponds to the given row of the long format data. choice_array : 1D ndarray of ints. Each element should be either a one or a zero, indicating whether the alternative on the given row of the long format data was chosen or not. Returns ------- chosen_alts_to_obs_ids : dict. Each key will be a unique value from `alt_id_array`. Each key's value will be a 1D ndarray that contains the sorted, unique observation ids of those observational units that chose the given alternative. """ # Figure out which units of observation chose each alternative. chosen_alts_to_obs_ids = {} for alt_id in np.sort(np.unique(alt_id_array)): # Determine which observations chose the current alternative. selection_condition =\ np.where((alt_id_array == alt_id) & (choice_array == 1)) # Store the sorted, unique ids that chose the current alternative. chosen_alts_to_obs_ids[alt_id] =\ np.sort(np.unique(obs_id_array[selection_condition])) # Return the desired dictionary. return chosen_alts_to_obs_ids
[ "def", "relate_obs_ids_to_chosen_alts", "(", "obs_id_array", ",", "alt_id_array", ",", "choice_array", ")", ":", "# Figure out which units of observation chose each alternative.", "chosen_alts_to_obs_ids", "=", "{", "}", "for", "alt_id", "in", "np", ".", "sort", "(", "np", ".", "unique", "(", "alt_id_array", ")", ")", ":", "# Determine which observations chose the current alternative.", "selection_condition", "=", "np", ".", "where", "(", "(", "alt_id_array", "==", "alt_id", ")", "&", "(", "choice_array", "==", "1", ")", ")", "# Store the sorted, unique ids that chose the current alternative.", "chosen_alts_to_obs_ids", "[", "alt_id", "]", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "obs_id_array", "[", "selection_condition", "]", ")", ")", "# Return the desired dictionary.", "return", "chosen_alts_to_obs_ids" ]
44.651163
0.00051
def extract_mfd_params(src): """ Extracts the MFD parameters from an object """ tags = get_taglist(src) if "incrementalMFD" in tags: mfd_node = src.nodes[tags.index("incrementalMFD")] elif "truncGutenbergRichterMFD" in tags: mfd_node = src.nodes[tags.index("truncGutenbergRichterMFD")] elif "arbitraryMFD" in tags: mfd_node = src.nodes[tags.index("arbitraryMFD")] elif "YoungsCoppersmithMFD" in tags: mfd_node = src.nodes[tags.index("YoungsCoppersmithMFD")] else: raise ValueError("Source %s contains no supported MFD type!" % src.tag) data = [] rates = [] for key, param, vtype in MFD_PARAMS: if key in mfd_node.attrib and mfd_node.attrib[key] is not None: data.append((param, mfd_node.attrib[key])) else: data.append((param, None)) if ("incrementalMFD" or "arbitraryMFD") in mfd_node.tag: # Extract Rates rates = ~mfd_node.occurRates n_r = len(rates) if n_r > MAX_RATES: raise ValueError("Number of rates in source %s too large " "to be placed into shapefile" % src.tag) rate_dict = dict([(key, rates[i] if i < n_r else None) for i, (key, _) in enumerate(RATE_PARAMS)]) elif "YoungsCoppersmithMFD" in mfd_node.tag: rate_dict = dict([(key, mfd_node.attrib['characteristicRate']) for i, (key, _) in enumerate(RATE_PARAMS)]) else: rate_dict = dict([(key, None) for i, (key, _) in enumerate(RATE_PARAMS)]) return dict(data), rate_dict
[ "def", "extract_mfd_params", "(", "src", ")", ":", "tags", "=", "get_taglist", "(", "src", ")", "if", "\"incrementalMFD\"", "in", "tags", ":", "mfd_node", "=", "src", ".", "nodes", "[", "tags", ".", "index", "(", "\"incrementalMFD\"", ")", "]", "elif", "\"truncGutenbergRichterMFD\"", "in", "tags", ":", "mfd_node", "=", "src", ".", "nodes", "[", "tags", ".", "index", "(", "\"truncGutenbergRichterMFD\"", ")", "]", "elif", "\"arbitraryMFD\"", "in", "tags", ":", "mfd_node", "=", "src", ".", "nodes", "[", "tags", ".", "index", "(", "\"arbitraryMFD\"", ")", "]", "elif", "\"YoungsCoppersmithMFD\"", "in", "tags", ":", "mfd_node", "=", "src", ".", "nodes", "[", "tags", ".", "index", "(", "\"YoungsCoppersmithMFD\"", ")", "]", "else", ":", "raise", "ValueError", "(", "\"Source %s contains no supported MFD type!\"", "%", "src", ".", "tag", ")", "data", "=", "[", "]", "rates", "=", "[", "]", "for", "key", ",", "param", ",", "vtype", "in", "MFD_PARAMS", ":", "if", "key", "in", "mfd_node", ".", "attrib", "and", "mfd_node", ".", "attrib", "[", "key", "]", "is", "not", "None", ":", "data", ".", "append", "(", "(", "param", ",", "mfd_node", ".", "attrib", "[", "key", "]", ")", ")", "else", ":", "data", ".", "append", "(", "(", "param", ",", "None", ")", ")", "if", "(", "\"incrementalMFD\"", "or", "\"arbitraryMFD\"", ")", "in", "mfd_node", ".", "tag", ":", "# Extract Rates", "rates", "=", "~", "mfd_node", ".", "occurRates", "n_r", "=", "len", "(", "rates", ")", "if", "n_r", ">", "MAX_RATES", ":", "raise", "ValueError", "(", "\"Number of rates in source %s too large \"", "\"to be placed into shapefile\"", "%", "src", ".", "tag", ")", "rate_dict", "=", "dict", "(", "[", "(", "key", ",", "rates", "[", "i", "]", "if", "i", "<", "n_r", "else", "None", ")", "for", "i", ",", "(", "key", ",", "_", ")", "in", "enumerate", "(", "RATE_PARAMS", ")", "]", ")", "elif", "\"YoungsCoppersmithMFD\"", "in", "mfd_node", ".", "tag", ":", "rate_dict", "=", "dict", "(", "[", "(", "key", ",", "mfd_node", ".", "attrib", "[", "'characteristicRate'", "]", ")", "for", "i", ",", "(", "key", ",", "_", ")", "in", "enumerate", "(", "RATE_PARAMS", ")", "]", ")", "else", ":", "rate_dict", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "i", ",", "(", "key", ",", "_", ")", "in", "enumerate", "(", "RATE_PARAMS", ")", "]", ")", "return", "dict", "(", "data", ")", ",", "rate_dict" ]
42.473684
0.000606
def infer(args: argparse.Namespace) -> None: """ :args: An argparse.Namespace object. This is the function called when the 'infer' sub-command is passed as an argument to the CLI. """ try: last_tag = last_git_release_tag(git_tags()) except NoGitTagsException: print(SemVer(0, 1, 0)) exit(0) commit_log = git_commits_since_last_tag(last_tag) action = parse_commit_log(commit_log) last_ver = git_tag_to_semver(last_tag) if action == 'min': new_ver = last_ver.bump_minor() elif action == 'maj': new_ver = last_ver.bump_major() else: new_ver = last_ver.bump_patch() print(new_ver)
[ "def", "infer", "(", "args", ":", "argparse", ".", "Namespace", ")", "->", "None", ":", "try", ":", "last_tag", "=", "last_git_release_tag", "(", "git_tags", "(", ")", ")", "except", "NoGitTagsException", ":", "print", "(", "SemVer", "(", "0", ",", "1", ",", "0", ")", ")", "exit", "(", "0", ")", "commit_log", "=", "git_commits_since_last_tag", "(", "last_tag", ")", "action", "=", "parse_commit_log", "(", "commit_log", ")", "last_ver", "=", "git_tag_to_semver", "(", "last_tag", ")", "if", "action", "==", "'min'", ":", "new_ver", "=", "last_ver", ".", "bump_minor", "(", ")", "elif", "action", "==", "'maj'", ":", "new_ver", "=", "last_ver", ".", "bump_major", "(", ")", "else", ":", "new_ver", "=", "last_ver", ".", "bump_patch", "(", ")", "print", "(", "new_ver", ")" ]
25.384615
0.00146
def delete(gandi, webacc, vhost, backend, port): """ Delete a webaccelerator, a vhost or a backend """ result = [] if webacc: result = gandi.webacc.delete(webacc) if backend: backends = backend for backend in backends: if 'port' not in backend: if not port: backend['port'] = click.prompt('Please set a port for ' 'backends. If you want to ' ' different port for ' 'each backend, use `-b ' 'ip:port`', type=int) else: backend['port'] = port result = gandi.webacc.backend_remove(backend) if vhost: vhosts = vhost for vhost in vhosts: result = gandi.webacc.vhost_remove(vhost) return result
[ "def", "delete", "(", "gandi", ",", "webacc", ",", "vhost", ",", "backend", ",", "port", ")", ":", "result", "=", "[", "]", "if", "webacc", ":", "result", "=", "gandi", ".", "webacc", ".", "delete", "(", "webacc", ")", "if", "backend", ":", "backends", "=", "backend", "for", "backend", "in", "backends", ":", "if", "'port'", "not", "in", "backend", ":", "if", "not", "port", ":", "backend", "[", "'port'", "]", "=", "click", ".", "prompt", "(", "'Please set a port for '", "'backends. If you want to '", "' different port for '", "'each backend, use `-b '", "'ip:port`'", ",", "type", "=", "int", ")", "else", ":", "backend", "[", "'port'", "]", "=", "port", "result", "=", "gandi", ".", "webacc", ".", "backend_remove", "(", "backend", ")", "if", "vhost", ":", "vhosts", "=", "vhost", "for", "vhost", "in", "vhosts", ":", "result", "=", "gandi", ".", "webacc", ".", "vhost_remove", "(", "vhost", ")", "return", "result" ]
36.230769
0.001034
def threshold_gradients(self, grad_thresh): """Creates a new DepthImage by zeroing out all depths where the magnitude of the gradient at that point is greater than grad_thresh. Parameters ---------- grad_thresh : float A threshold for the gradient magnitude. Returns ------- :obj:`DepthImage` A new DepthImage created from the thresholding operation. """ data = np.copy(self._data) gx, gy = self.gradients() gradients = np.zeros([gx.shape[0], gx.shape[1], 2]) gradients[:, :, 0] = gx gradients[:, :, 1] = gy gradient_mags = np.linalg.norm(gradients, axis=2) ind = np.where(gradient_mags > grad_thresh) data[ind[0], ind[1]] = 0.0 return DepthImage(data, self._frame)
[ "def", "threshold_gradients", "(", "self", ",", "grad_thresh", ")", ":", "data", "=", "np", ".", "copy", "(", "self", ".", "_data", ")", "gx", ",", "gy", "=", "self", ".", "gradients", "(", ")", "gradients", "=", "np", ".", "zeros", "(", "[", "gx", ".", "shape", "[", "0", "]", ",", "gx", ".", "shape", "[", "1", "]", ",", "2", "]", ")", "gradients", "[", ":", ",", ":", ",", "0", "]", "=", "gx", "gradients", "[", ":", ",", ":", ",", "1", "]", "=", "gy", "gradient_mags", "=", "np", ".", "linalg", ".", "norm", "(", "gradients", ",", "axis", "=", "2", ")", "ind", "=", "np", ".", "where", "(", "gradient_mags", ">", "grad_thresh", ")", "data", "[", "ind", "[", "0", "]", ",", "ind", "[", "1", "]", "]", "=", "0.0", "return", "DepthImage", "(", "data", ",", "self", ".", "_frame", ")" ]
34.166667
0.002372
def boards(self, startAt=0, maxResults=50, type=None, name=None, projectKeyOrID=None): """Get a list of board resources. :param startAt: The starting index of the returned boards. Base index: 0. :param maxResults: The maximum number of boards to return per page. Default: 50 :param type: Filters results to boards of the specified type. Valid values: scrum, kanban. :param name: Filters results to boards that match or partially match the specified name. :param projectKeyOrID: Filters results to boards that match the specified project key or ID. :rtype: ResultList[Board] When old GreenHopper private API is used, paging is not enabled and all parameters are ignored. """ params = {} if type: params['type'] = type if name: params['name'] = name if projectKeyOrID: params['projectKeyOrId'] = projectKeyOrID if self._options['agile_rest_path'] == GreenHopperResource.GREENHOPPER_REST_PATH: # Old, private API did not support pagination, all records were present in response, # and no parameters were supported. if startAt or maxResults or params: warnings.warn('Old private GreenHopper API is used, all parameters will be ignored.', Warning) r_json = self._get_json('rapidviews/list', base=self.AGILE_BASE_URL) boards = [Board(self._options, self._session, raw_boards_json) for raw_boards_json in r_json['views']] return ResultList(boards, 0, len(boards), len(boards), True) else: return self._fetch_pages(Board, 'values', 'board', startAt, maxResults, params, base=self.AGILE_BASE_URL)
[ "def", "boards", "(", "self", ",", "startAt", "=", "0", ",", "maxResults", "=", "50", ",", "type", "=", "None", ",", "name", "=", "None", ",", "projectKeyOrID", "=", "None", ")", ":", "params", "=", "{", "}", "if", "type", ":", "params", "[", "'type'", "]", "=", "type", "if", "name", ":", "params", "[", "'name'", "]", "=", "name", "if", "projectKeyOrID", ":", "params", "[", "'projectKeyOrId'", "]", "=", "projectKeyOrID", "if", "self", ".", "_options", "[", "'agile_rest_path'", "]", "==", "GreenHopperResource", ".", "GREENHOPPER_REST_PATH", ":", "# Old, private API did not support pagination, all records were present in response,", "# and no parameters were supported.", "if", "startAt", "or", "maxResults", "or", "params", ":", "warnings", ".", "warn", "(", "'Old private GreenHopper API is used, all parameters will be ignored.'", ",", "Warning", ")", "r_json", "=", "self", ".", "_get_json", "(", "'rapidviews/list'", ",", "base", "=", "self", ".", "AGILE_BASE_URL", ")", "boards", "=", "[", "Board", "(", "self", ".", "_options", ",", "self", ".", "_session", ",", "raw_boards_json", ")", "for", "raw_boards_json", "in", "r_json", "[", "'views'", "]", "]", "return", "ResultList", "(", "boards", ",", "0", ",", "len", "(", "boards", ")", ",", "len", "(", "boards", ")", ",", "True", ")", "else", ":", "return", "self", ".", "_fetch_pages", "(", "Board", ",", "'values'", ",", "'board'", ",", "startAt", ",", "maxResults", ",", "params", ",", "base", "=", "self", ".", "AGILE_BASE_URL", ")" ]
55.451613
0.008576
def onepara(R): """Converts an ill-conditioned correlation matrix into well-conditioned matrix with one common correlation coefficient Parameters: ----------- R : ndarray an illconditioned correlation matrix, e.g. oxyba.illcond_corrmat Return: ------- cmat : ndarray DxD matrix with +1 as diagonal elements and 1 common coefficient for all other relations. """ import numpy as np import warnings d = R.shape[0] if d < 2: raise Exception(( "More than one variable is required." "Supply at least a 2x2 matrix.")) # the explicit solution x = (np.sum(R) + np.trace(R)) / (d**2 - d) if x < (-1. / (d - 1)) or x > 1: warnings.warn("No analytic solution found x={:.8f}".format(x)) return None else: C = np.eye(d) C[np.logical_not(C)] = x return C
[ "def", "onepara", "(", "R", ")", ":", "import", "numpy", "as", "np", "import", "warnings", "d", "=", "R", ".", "shape", "[", "0", "]", "if", "d", "<", "2", ":", "raise", "Exception", "(", "(", "\"More than one variable is required.\"", "\"Supply at least a 2x2 matrix.\"", ")", ")", "# the explicit solution", "x", "=", "(", "np", ".", "sum", "(", "R", ")", "+", "np", ".", "trace", "(", "R", ")", ")", "/", "(", "d", "**", "2", "-", "d", ")", "if", "x", "<", "(", "-", "1.", "/", "(", "d", "-", "1", ")", ")", "or", "x", ">", "1", ":", "warnings", ".", "warn", "(", "\"No analytic solution found x={:.8f}\"", ".", "format", "(", "x", ")", ")", "return", "None", "else", ":", "C", "=", "np", ".", "eye", "(", "d", ")", "C", "[", "np", ".", "logical_not", "(", "C", ")", "]", "=", "x", "return", "C" ]
23.631579
0.00107
def _update_views_binary(self, result, other, binop): ''' @result: weldarray that is being updated @other: weldarray or scalar. (result binop other) @binop: str, operation to perform. FIXME: the common indexing pattern for parent/child in _update_view might be too expensive (uses if statements (unneccessary checks when updating child) and wouldn't be ideal to update a large parent). ''' update_str_template = '{e2}{binop}e' v = result._weldarray_view if isinstance(other, weldarray): lookup_ind = 'i-{st}'.format(st=v.start) # update the base array to include the context from other v.base_array.weldobj.update(other.weldobj) e2 = 'lookup({arr2},{i}L)'.format(arr2 = other.weldobj.weld_code, i = lookup_ind) else: # other is just a scalar. e2 = str(other) + DTYPE_SUFFIXES[result._weld_type.__str__()] update_str = update_str_template.format(e2 = e2, binop=binop) v.base_array._update_range(v.start, v.end, update_str)
[ "def", "_update_views_binary", "(", "self", ",", "result", ",", "other", ",", "binop", ")", ":", "update_str_template", "=", "'{e2}{binop}e'", "v", "=", "result", ".", "_weldarray_view", "if", "isinstance", "(", "other", ",", "weldarray", ")", ":", "lookup_ind", "=", "'i-{st}'", ".", "format", "(", "st", "=", "v", ".", "start", ")", "# update the base array to include the context from other", "v", ".", "base_array", ".", "weldobj", ".", "update", "(", "other", ".", "weldobj", ")", "e2", "=", "'lookup({arr2},{i}L)'", ".", "format", "(", "arr2", "=", "other", ".", "weldobj", ".", "weld_code", ",", "i", "=", "lookup_ind", ")", "else", ":", "# other is just a scalar.", "e2", "=", "str", "(", "other", ")", "+", "DTYPE_SUFFIXES", "[", "result", ".", "_weld_type", ".", "__str__", "(", ")", "]", "update_str", "=", "update_str_template", ".", "format", "(", "e2", "=", "e2", ",", "binop", "=", "binop", ")", "v", ".", "base_array", ".", "_update_range", "(", "v", ".", "start", ",", "v", ".", "end", ",", "update_str", ")" ]
47.173913
0.009937
def get_suffixes(): """Get a list of all the filename suffixes supported by libvips. Returns: [string] """ names = [] if at_least_libvips(8, 8): array = vips_lib.vips_foreign_get_suffixes() i = 0 while array[i] != ffi.NULL: name = _to_string(array[i]) if name not in names: names.append(name) glib_lib.g_free(array[i]) i += 1 glib_lib.g_free(array) return names
[ "def", "get_suffixes", "(", ")", ":", "names", "=", "[", "]", "if", "at_least_libvips", "(", "8", ",", "8", ")", ":", "array", "=", "vips_lib", ".", "vips_foreign_get_suffixes", "(", ")", "i", "=", "0", "while", "array", "[", "i", "]", "!=", "ffi", ".", "NULL", ":", "name", "=", "_to_string", "(", "array", "[", "i", "]", ")", "if", "name", "not", "in", "names", ":", "names", ".", "append", "(", "name", ")", "glib_lib", ".", "g_free", "(", "array", "[", "i", "]", ")", "i", "+=", "1", "glib_lib", ".", "g_free", "(", "array", ")", "return", "names" ]
21.5
0.002024
def cookieDomainForRequest(self, request): """ Pick a domain to use when setting cookies. @type request: L{nevow.inevow.IRequest} @param request: Request to determine cookie domain for @rtype: C{str} or C{None} @return: Domain name to use when setting cookies, or C{None} to indicate that only the domain in the request should be used """ host = request.getHeader('host') if host is None: # This is a malformed request that we cannot possibly handle # safely, fall back to the default behaviour. return None host = host.split(':')[0] for domain in self._domains: suffix = "." + domain if host == domain: # The request is for a domain which is directly recognized. if self._enableSubdomains: # Subdomains are enabled, so the suffix is returned to # enable the cookie for this domain and all its subdomains. return suffix # Subdomains are not enabled, so None is returned to allow the # default restriction, which will enable this cookie only for # the domain in the request, to apply. return None if self._enableSubdomains and host.endswith(suffix): # The request is for a subdomain of a directly recognized # domain and subdomains are enabled. Drop the unrecognized # subdomain portion and return the suffix to enable the cookie # for this domain and all its subdomains. return suffix if self._enableSubdomains: # No directly recognized domain matched the request. If subdomains # are enabled, prefix the request domain with "." to make the # cookie valid for that domain and all its subdomains. This # probably isn't extremely useful. Perhaps it shouldn't work this # way. return "." + host # Subdomains are disabled and the domain from the request was not # recognized. Return None to get the default behavior. return None
[ "def", "cookieDomainForRequest", "(", "self", ",", "request", ")", ":", "host", "=", "request", ".", "getHeader", "(", "'host'", ")", "if", "host", "is", "None", ":", "# This is a malformed request that we cannot possibly handle", "# safely, fall back to the default behaviour.", "return", "None", "host", "=", "host", ".", "split", "(", "':'", ")", "[", "0", "]", "for", "domain", "in", "self", ".", "_domains", ":", "suffix", "=", "\".\"", "+", "domain", "if", "host", "==", "domain", ":", "# The request is for a domain which is directly recognized.", "if", "self", ".", "_enableSubdomains", ":", "# Subdomains are enabled, so the suffix is returned to", "# enable the cookie for this domain and all its subdomains.", "return", "suffix", "# Subdomains are not enabled, so None is returned to allow the", "# default restriction, which will enable this cookie only for", "# the domain in the request, to apply.", "return", "None", "if", "self", ".", "_enableSubdomains", "and", "host", ".", "endswith", "(", "suffix", ")", ":", "# The request is for a subdomain of a directly recognized", "# domain and subdomains are enabled. Drop the unrecognized", "# subdomain portion and return the suffix to enable the cookie", "# for this domain and all its subdomains.", "return", "suffix", "if", "self", ".", "_enableSubdomains", ":", "# No directly recognized domain matched the request. If subdomains", "# are enabled, prefix the request domain with \".\" to make the", "# cookie valid for that domain and all its subdomains. This", "# probably isn't extremely useful. Perhaps it shouldn't work this", "# way.", "return", "\".\"", "+", "host", "# Subdomains are disabled and the domain from the request was not", "# recognized. Return None to get the default behavior.", "return", "None" ]
43.98
0.00089
def _write_callback(connection_id, data_buffer, data_length_pointer): """ Callback called by Secure Transport to actually write to the socket :param connection_id: An integer identifing the connection :param data_buffer: A char pointer FFI type containing the data to write :param data_length_pointer: A size_t pointer FFI type of the amount of data to write. Will be overwritten with the amount of data actually written on return. :return: An integer status code of the result - 0 for success """ try: self = _connection_refs.get(connection_id) if not self: socket = _socket_refs.get(connection_id) else: socket = self._socket if not self and not socket: return 0 data_length = deref(data_length_pointer) data = bytes_from_buffer(data_buffer, data_length) if self and not self._done_handshake: self._client_hello += data error = None try: sent = socket.send(data) except (socket_.error) as e: error = e.errno if error is not None and error != errno.EAGAIN: if error == errno.ECONNRESET or error == errno.EPIPE: return SecurityConst.errSSLClosedNoNotify return SecurityConst.errSSLClosedAbort if sent != data_length: pointer_set(data_length_pointer, sent) return SecurityConst.errSSLWouldBlock return 0 except (KeyboardInterrupt) as e: self._exception = e return SecurityConst.errSSLPeerUserCancelled
[ "def", "_write_callback", "(", "connection_id", ",", "data_buffer", ",", "data_length_pointer", ")", ":", "try", ":", "self", "=", "_connection_refs", ".", "get", "(", "connection_id", ")", "if", "not", "self", ":", "socket", "=", "_socket_refs", ".", "get", "(", "connection_id", ")", "else", ":", "socket", "=", "self", ".", "_socket", "if", "not", "self", "and", "not", "socket", ":", "return", "0", "data_length", "=", "deref", "(", "data_length_pointer", ")", "data", "=", "bytes_from_buffer", "(", "data_buffer", ",", "data_length", ")", "if", "self", "and", "not", "self", ".", "_done_handshake", ":", "self", ".", "_client_hello", "+=", "data", "error", "=", "None", "try", ":", "sent", "=", "socket", ".", "send", "(", "data", ")", "except", "(", "socket_", ".", "error", ")", "as", "e", ":", "error", "=", "e", ".", "errno", "if", "error", "is", "not", "None", "and", "error", "!=", "errno", ".", "EAGAIN", ":", "if", "error", "==", "errno", ".", "ECONNRESET", "or", "error", "==", "errno", ".", "EPIPE", ":", "return", "SecurityConst", ".", "errSSLClosedNoNotify", "return", "SecurityConst", ".", "errSSLClosedAbort", "if", "sent", "!=", "data_length", ":", "pointer_set", "(", "data_length_pointer", ",", "sent", ")", "return", "SecurityConst", ".", "errSSLWouldBlock", "return", "0", "except", "(", "KeyboardInterrupt", ")", "as", "e", ":", "self", ".", "_exception", "=", "e", "return", "SecurityConst", ".", "errSSLPeerUserCancelled" ]
30.037736
0.000608
def plot_multitrack(multitrack, filename=None, mode='separate', track_label='name', preset='default', cmaps=None, xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='both', grid_linestyle=':', grid_linewidth=.5): """ Plot the pianorolls or save a plot of them. Parameters ---------- filename : str The filename to which the plot is saved. If None, save nothing. mode : {'separate', 'stacked', 'hybrid'} A string that indicate the plotting mode to use. Defaults to 'separate'. - In 'separate' mode, all the tracks are plotted separately. - In 'stacked' mode, a color is assigned based on `cmaps` to the pianoroll of each track and the pianorolls are stacked and plotted as a colored image with RGB channels. - In 'hybrid' mode, the drum tracks are merged into a 'Drums' track, while the other tracks are merged into an 'Others' track, and the two merged tracks are then plotted separately. track_label : {'name', 'program', 'family', 'off'} A sting that indicates what to use as labels to the track. When `mode` is 'hybrid', all options other than 'off' will label the two track with 'Drums' and 'Others'. preset : {'default', 'plain', 'frame'} A string that indicates the preset theme to use. - In 'default' preset, the ticks, grid and labels are on. - In 'frame' preset, the ticks and grid are both off. - In 'plain' preset, the x- and y-axis are both off. cmaps : tuple or list The `matplotlib.colors.Colormap` instances or colormap codes to use. - When `mode` is 'separate', each element will be passed to each call of :func:`matplotlib.pyplot.imshow`. Defaults to ('Blues', 'Oranges', 'Greens', 'Reds', 'Purples', 'Greys'). - When `mode` is stacked, a color is assigned based on `cmaps` to the pianoroll of each track. Defaults to ('hsv'). - When `mode` is 'hybrid', the first (second) element is used in the 'Drums' ('Others') track. Defaults to ('Blues', 'Greens'). xtick : {'auto', 'beat', 'step', 'off'} A string that indicates what to use as ticks along the x-axis. If 'auto' is given, automatically set to 'beat' if `beat_resolution` is also given and set to 'step', otherwise. Defaults to 'auto'. ytick : {'octave', 'pitch', 'off'} A string that indicates what to use as ticks along the y-axis. Defaults to 'octave'. xticklabel : bool Whether to add tick labels along the x-axis. Only effective when `xtick` is not 'off'. yticklabel : {'auto', 'name', 'number', 'off'} If 'name', use octave name and pitch name (key name when `is_drum` is True) as tick labels along the y-axis. If 'number', use pitch number. If 'auto', set to 'name' when `ytick` is 'octave' and 'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective when `ytick` is not 'off'. tick_loc : tuple or list The locations to put the ticks. Availables elements are 'bottom', 'top', 'left' and 'right'. Defaults to ('bottom', 'left'). tick_direction : {'in', 'out', 'inout'} A string that indicates where to put the ticks. Defaults to 'in'. Only effective when one of `xtick` and `ytick` is on. label : {'x', 'y', 'both', 'off'} A string that indicates whether to add labels to the x-axis and y-axis. Defaults to 'both'. grid : {'x', 'y', 'both', 'off'} A string that indicates whether to add grids to the x-axis, y-axis, both or neither. Defaults to 'both'. grid_linestyle : str Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle' argument. grid_linewidth : float Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth' argument. Returns ------- fig : `matplotlib.figure.Figure` object A :class:`matplotlib.figure.Figure` object. axs : list List of :class:`matplotlib.axes.Axes` object. """ if not HAS_MATPLOTLIB: raise ImportError("matplotlib package is required for plotting " "supports.") def get_track_label(track_label, track=None): """Convenient function to get track labels""" if track_label == 'name': return track.name elif track_label == 'program': return pretty_midi.program_to_instrument_name(track.program) elif track_label == 'family': return pretty_midi.program_to_instrument_class(track.program) elif track is None: return track_label def add_tracklabel(ax, track_label, track=None): """Convenient function for adding track labels""" if not ax.get_ylabel(): return ax.set_ylabel(get_track_label(track_label, track) + '\n\n' + ax.get_ylabel()) multitrack.check_validity() if not multitrack.tracks: raise ValueError("There is no track to plot.") if mode not in ('separate', 'stacked', 'hybrid'): raise ValueError("`mode` must be one of {'separate', 'stacked', " "'hybrid'}.") if track_label not in ('name', 'program', 'family', 'off'): raise ValueError("`track_label` must be one of {'name', 'program', " "'family'}.") if cmaps is None: if mode == 'separate': cmaps = ('Blues', 'Oranges', 'Greens', 'Reds', 'Purples', 'Greys') elif mode == 'stacked': cmaps = ('hsv') else: cmaps = ('Blues', 'Greens') num_track = len(multitrack.tracks) downbeats = multitrack.get_downbeat_steps() if mode == 'separate': if num_track > 1: fig, axs = plt.subplots(num_track, sharex=True) else: fig, ax = plt.subplots() axs = [ax] for idx, track in enumerate(multitrack.tracks): now_xticklabel = xticklabel if idx < num_track else False plot_pianoroll(axs[idx], track.pianoroll, False, multitrack.beat_resolution, downbeats, preset=preset, cmap=cmaps[idx%len(cmaps)], xtick=xtick, ytick=ytick, xticklabel=now_xticklabel, yticklabel=yticklabel, tick_loc=tick_loc, tick_direction=tick_direction, label=label, grid=grid, grid_linestyle=grid_linestyle, grid_linewidth=grid_linewidth) if track_label != 'none': add_tracklabel(axs[idx], track_label, track) if num_track > 1: fig.subplots_adjust(hspace=0) if filename is not None: plt.savefig(filename) return (fig, axs) elif mode == 'stacked': is_all_drum = True for track in multitrack.tracks: if not track.is_drum: is_all_drum = False fig, ax = plt.subplots() stacked = multitrack.get_stacked_pianorolls() colormap = matplotlib.cm.get_cmap(cmaps[0]) cmatrix = colormap(np.arange(0, 1, 1 / num_track))[:, :3] recolored = np.matmul(stacked.reshape(-1, num_track), cmatrix) stacked = recolored.reshape(stacked.shape[:2] + (3, )) plot_pianoroll(ax, stacked, is_all_drum, multitrack.beat_resolution, downbeats, preset=preset, xtick=xtick, ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel, tick_loc=tick_loc, tick_direction=tick_direction, label=label, grid=grid, grid_linestyle=grid_linestyle, grid_linewidth=grid_linewidth) if track_label != 'none': patches = [Patch(color=cmatrix[idx], label=get_track_label(track_label, track)) for idx, track in enumerate(multitrack.tracks)] plt.legend(handles=patches) if filename is not None: plt.savefig(filename) return (fig, [ax]) elif mode == 'hybrid': drums = [i for i, track in enumerate(multitrack.tracks) if track.is_drum] others = [i for i in range(len(multitrack.tracks)) if i not in drums] merged_drums = multitrack.get_merged_pianoroll(drums) merged_others = multitrack.get_merged_pianoroll(others) fig, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True) plot_pianoroll(ax1, merged_drums, True, multitrack.beat_resolution, downbeats, preset=preset, cmap=cmaps[0], xtick=xtick, ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel, tick_loc=tick_loc, tick_direction=tick_direction, label=label, grid=grid, grid_linestyle=grid_linestyle, grid_linewidth=grid_linewidth) plot_pianoroll(ax2, merged_others, False, multitrack.beat_resolution, downbeats, preset=preset, cmap=cmaps[1], ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel, tick_loc=tick_loc, tick_direction=tick_direction, label=label, grid=grid, grid_linestyle=grid_linestyle, grid_linewidth=grid_linewidth) fig.subplots_adjust(hspace=0) if track_label != 'none': add_tracklabel(ax1, 'Drums') add_tracklabel(ax2, 'Others') if filename is not None: plt.savefig(filename) return (fig, [ax1, ax2])
[ "def", "plot_multitrack", "(", "multitrack", ",", "filename", "=", "None", ",", "mode", "=", "'separate'", ",", "track_label", "=", "'name'", ",", "preset", "=", "'default'", ",", "cmaps", "=", "None", ",", "xtick", "=", "'auto'", ",", "ytick", "=", "'octave'", ",", "xticklabel", "=", "True", ",", "yticklabel", "=", "'auto'", ",", "tick_loc", "=", "None", ",", "tick_direction", "=", "'in'", ",", "label", "=", "'both'", ",", "grid", "=", "'both'", ",", "grid_linestyle", "=", "':'", ",", "grid_linewidth", "=", ".5", ")", ":", "if", "not", "HAS_MATPLOTLIB", ":", "raise", "ImportError", "(", "\"matplotlib package is required for plotting \"", "\"supports.\"", ")", "def", "get_track_label", "(", "track_label", ",", "track", "=", "None", ")", ":", "\"\"\"Convenient function to get track labels\"\"\"", "if", "track_label", "==", "'name'", ":", "return", "track", ".", "name", "elif", "track_label", "==", "'program'", ":", "return", "pretty_midi", ".", "program_to_instrument_name", "(", "track", ".", "program", ")", "elif", "track_label", "==", "'family'", ":", "return", "pretty_midi", ".", "program_to_instrument_class", "(", "track", ".", "program", ")", "elif", "track", "is", "None", ":", "return", "track_label", "def", "add_tracklabel", "(", "ax", ",", "track_label", ",", "track", "=", "None", ")", ":", "\"\"\"Convenient function for adding track labels\"\"\"", "if", "not", "ax", ".", "get_ylabel", "(", ")", ":", "return", "ax", ".", "set_ylabel", "(", "get_track_label", "(", "track_label", ",", "track", ")", "+", "'\\n\\n'", "+", "ax", ".", "get_ylabel", "(", ")", ")", "multitrack", ".", "check_validity", "(", ")", "if", "not", "multitrack", ".", "tracks", ":", "raise", "ValueError", "(", "\"There is no track to plot.\"", ")", "if", "mode", "not", "in", "(", "'separate'", ",", "'stacked'", ",", "'hybrid'", ")", ":", "raise", "ValueError", "(", "\"`mode` must be one of {'separate', 'stacked', \"", "\"'hybrid'}.\"", ")", "if", "track_label", "not", "in", "(", "'name'", ",", "'program'", ",", "'family'", ",", "'off'", ")", ":", "raise", "ValueError", "(", "\"`track_label` must be one of {'name', 'program', \"", "\"'family'}.\"", ")", "if", "cmaps", "is", "None", ":", "if", "mode", "==", "'separate'", ":", "cmaps", "=", "(", "'Blues'", ",", "'Oranges'", ",", "'Greens'", ",", "'Reds'", ",", "'Purples'", ",", "'Greys'", ")", "elif", "mode", "==", "'stacked'", ":", "cmaps", "=", "(", "'hsv'", ")", "else", ":", "cmaps", "=", "(", "'Blues'", ",", "'Greens'", ")", "num_track", "=", "len", "(", "multitrack", ".", "tracks", ")", "downbeats", "=", "multitrack", ".", "get_downbeat_steps", "(", ")", "if", "mode", "==", "'separate'", ":", "if", "num_track", ">", "1", ":", "fig", ",", "axs", "=", "plt", ".", "subplots", "(", "num_track", ",", "sharex", "=", "True", ")", "else", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "axs", "=", "[", "ax", "]", "for", "idx", ",", "track", "in", "enumerate", "(", "multitrack", ".", "tracks", ")", ":", "now_xticklabel", "=", "xticklabel", "if", "idx", "<", "num_track", "else", "False", "plot_pianoroll", "(", "axs", "[", "idx", "]", ",", "track", ".", "pianoroll", ",", "False", ",", "multitrack", ".", "beat_resolution", ",", "downbeats", ",", "preset", "=", "preset", ",", "cmap", "=", "cmaps", "[", "idx", "%", "len", "(", "cmaps", ")", "]", ",", "xtick", "=", "xtick", ",", "ytick", "=", "ytick", ",", "xticklabel", "=", "now_xticklabel", ",", "yticklabel", "=", "yticklabel", ",", "tick_loc", "=", "tick_loc", ",", "tick_direction", "=", "tick_direction", ",", "label", "=", "label", ",", "grid", "=", "grid", ",", "grid_linestyle", "=", "grid_linestyle", ",", "grid_linewidth", "=", "grid_linewidth", ")", "if", "track_label", "!=", "'none'", ":", "add_tracklabel", "(", "axs", "[", "idx", "]", ",", "track_label", ",", "track", ")", "if", "num_track", ">", "1", ":", "fig", ".", "subplots_adjust", "(", "hspace", "=", "0", ")", "if", "filename", "is", "not", "None", ":", "plt", ".", "savefig", "(", "filename", ")", "return", "(", "fig", ",", "axs", ")", "elif", "mode", "==", "'stacked'", ":", "is_all_drum", "=", "True", "for", "track", "in", "multitrack", ".", "tracks", ":", "if", "not", "track", ".", "is_drum", ":", "is_all_drum", "=", "False", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "stacked", "=", "multitrack", ".", "get_stacked_pianorolls", "(", ")", "colormap", "=", "matplotlib", ".", "cm", ".", "get_cmap", "(", "cmaps", "[", "0", "]", ")", "cmatrix", "=", "colormap", "(", "np", ".", "arange", "(", "0", ",", "1", ",", "1", "/", "num_track", ")", ")", "[", ":", ",", ":", "3", "]", "recolored", "=", "np", ".", "matmul", "(", "stacked", ".", "reshape", "(", "-", "1", ",", "num_track", ")", ",", "cmatrix", ")", "stacked", "=", "recolored", ".", "reshape", "(", "stacked", ".", "shape", "[", ":", "2", "]", "+", "(", "3", ",", ")", ")", "plot_pianoroll", "(", "ax", ",", "stacked", ",", "is_all_drum", ",", "multitrack", ".", "beat_resolution", ",", "downbeats", ",", "preset", "=", "preset", ",", "xtick", "=", "xtick", ",", "ytick", "=", "ytick", ",", "xticklabel", "=", "xticklabel", ",", "yticklabel", "=", "yticklabel", ",", "tick_loc", "=", "tick_loc", ",", "tick_direction", "=", "tick_direction", ",", "label", "=", "label", ",", "grid", "=", "grid", ",", "grid_linestyle", "=", "grid_linestyle", ",", "grid_linewidth", "=", "grid_linewidth", ")", "if", "track_label", "!=", "'none'", ":", "patches", "=", "[", "Patch", "(", "color", "=", "cmatrix", "[", "idx", "]", ",", "label", "=", "get_track_label", "(", "track_label", ",", "track", ")", ")", "for", "idx", ",", "track", "in", "enumerate", "(", "multitrack", ".", "tracks", ")", "]", "plt", ".", "legend", "(", "handles", "=", "patches", ")", "if", "filename", "is", "not", "None", ":", "plt", ".", "savefig", "(", "filename", ")", "return", "(", "fig", ",", "[", "ax", "]", ")", "elif", "mode", "==", "'hybrid'", ":", "drums", "=", "[", "i", "for", "i", ",", "track", "in", "enumerate", "(", "multitrack", ".", "tracks", ")", "if", "track", ".", "is_drum", "]", "others", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "multitrack", ".", "tracks", ")", ")", "if", "i", "not", "in", "drums", "]", "merged_drums", "=", "multitrack", ".", "get_merged_pianoroll", "(", "drums", ")", "merged_others", "=", "multitrack", ".", "get_merged_pianoroll", "(", "others", ")", "fig", ",", "(", "ax1", ",", "ax2", ")", "=", "plt", ".", "subplots", "(", "2", ",", "sharex", "=", "True", ",", "sharey", "=", "True", ")", "plot_pianoroll", "(", "ax1", ",", "merged_drums", ",", "True", ",", "multitrack", ".", "beat_resolution", ",", "downbeats", ",", "preset", "=", "preset", ",", "cmap", "=", "cmaps", "[", "0", "]", ",", "xtick", "=", "xtick", ",", "ytick", "=", "ytick", ",", "xticklabel", "=", "xticklabel", ",", "yticklabel", "=", "yticklabel", ",", "tick_loc", "=", "tick_loc", ",", "tick_direction", "=", "tick_direction", ",", "label", "=", "label", ",", "grid", "=", "grid", ",", "grid_linestyle", "=", "grid_linestyle", ",", "grid_linewidth", "=", "grid_linewidth", ")", "plot_pianoroll", "(", "ax2", ",", "merged_others", ",", "False", ",", "multitrack", ".", "beat_resolution", ",", "downbeats", ",", "preset", "=", "preset", ",", "cmap", "=", "cmaps", "[", "1", "]", ",", "ytick", "=", "ytick", ",", "xticklabel", "=", "xticklabel", ",", "yticklabel", "=", "yticklabel", ",", "tick_loc", "=", "tick_loc", ",", "tick_direction", "=", "tick_direction", ",", "label", "=", "label", ",", "grid", "=", "grid", ",", "grid_linestyle", "=", "grid_linestyle", ",", "grid_linewidth", "=", "grid_linewidth", ")", "fig", ".", "subplots_adjust", "(", "hspace", "=", "0", ")", "if", "track_label", "!=", "'none'", ":", "add_tracklabel", "(", "ax1", ",", "'Drums'", ")", "add_tracklabel", "(", "ax2", ",", "'Others'", ")", "if", "filename", "is", "not", "None", ":", "plt", ".", "savefig", "(", "filename", ")", "return", "(", "fig", ",", "[", "ax1", ",", "ax2", "]", ")" ]
43.419643
0.000402
def set_bool_param(params, name, value): """ Set a boolean parameter if applicable. :param dict params: A dict containing API call parameters. :param str name: The name of the parameter to set. :param bool value: The value of the parameter. If ``None``, the field will not be set. If ``True`` or ``False``, the relevant field in ``params`` will be set to ``'true'`` or ``'false'``. Any other value will raise a `ValueError`. :returns: ``None`` """ if value is None: return if value is True: params[name] = 'true' elif value is False: params[name] = 'false' else: raise ValueError("Parameter '%s' must be boolean or None, got %r." % ( name, value))
[ "def", "set_bool_param", "(", "params", ",", "name", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "if", "value", "is", "True", ":", "params", "[", "name", "]", "=", "'true'", "elif", "value", "is", "False", ":", "params", "[", "name", "]", "=", "'false'", "else", ":", "raise", "ValueError", "(", "\"Parameter '%s' must be boolean or None, got %r.\"", "%", "(", "name", ",", "value", ")", ")" ]
29.6
0.001309
def find_event(name): """Actually import the event represented by name Raises the `EventNotFoundError` if it's not possible to find the event class refered by `name`. """ try: module, klass = parse_event_name(name) return getattr(import_module(module), klass) except (ImportError, AttributeError): raise EventNotFoundError( ('Event "{}" not found. ' 'Make sure you have a class called "{}" inside the "{}" ' 'module.'.format(name, klass, module)))
[ "def", "find_event", "(", "name", ")", ":", "try", ":", "module", ",", "klass", "=", "parse_event_name", "(", "name", ")", "return", "getattr", "(", "import_module", "(", "module", ")", ",", "klass", ")", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "raise", "EventNotFoundError", "(", "(", "'Event \"{}\" not found. '", "'Make sure you have a class called \"{}\" inside the \"{}\" '", "'module.'", ".", "format", "(", "name", ",", "klass", ",", "module", ")", ")", ")" ]
37.214286
0.001873
def buff(self, target, buff, **kwargs): """ Summon \a buff and apply it to \a target If keyword arguments are given, attempt to set the given values to the buff. Example: player.buff(target, health=random.randint(1, 5)) NOTE: Any Card can buff any other Card. The controller of the Card that buffs the target becomes the controller of the buff. """ ret = self.controller.card(buff, self) ret.source = self ret.apply(target) for k, v in kwargs.items(): setattr(ret, k, v) return ret
[ "def", "buff", "(", "self", ",", "target", ",", "buff", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "self", ".", "controller", ".", "card", "(", "buff", ",", "self", ")", "ret", ".", "source", "=", "self", "ret", ".", "apply", "(", "target", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "setattr", "(", "ret", ",", "k", ",", "v", ")", "return", "ret" ]
33.066667
0.031373
def _get_marX(self, attr_name, default): """ Generalized method to get margin values. """ if self.tcPr is None: return Emu(default) return Emu(int(self.tcPr.get(attr_name, default)))
[ "def", "_get_marX", "(", "self", ",", "attr_name", ",", "default", ")", ":", "if", "self", ".", "tcPr", "is", "None", ":", "return", "Emu", "(", "default", ")", "return", "Emu", "(", "int", "(", "self", ".", "tcPr", ".", "get", "(", "attr_name", ",", "default", ")", ")", ")" ]
32.571429
0.008547
def create_index(self,*fields): """Create an index on the specified field names An index on a field is a mapping between the values taken by the field and the sorted list of the ids of the records whose field is equal to this value For each indexed field, an attribute of self is created, an instance of the class Index (see above). Its name it the field name, with the prefix _ to avoid name conflicts """ reset = False for f in fields: if not f in self.fields: raise NameError,"%s is not a field name %s" %(f,self.fields) # initialize the indices if self.mode == "open" and f in self.indices: continue reset = True self.indices[f] = {} for _id,record in self.records.iteritems(): # use bisect to quickly insert the id in the list bisect.insort(self.indices[f].setdefault(record[f],[]), _id) # create a new attribute of self, used to find the records # by this index setattr(self,'_'+f,Index(self,f)) if reset: self.commit()
[ "def", "create_index", "(", "self", ",", "*", "fields", ")", ":", "reset", "=", "False", "for", "f", "in", "fields", ":", "if", "not", "f", "in", "self", ".", "fields", ":", "raise", "NameError", ",", "\"%s is not a field name %s\"", "%", "(", "f", ",", "self", ".", "fields", ")", "# initialize the indices\r", "if", "self", ".", "mode", "==", "\"open\"", "and", "f", "in", "self", ".", "indices", ":", "continue", "reset", "=", "True", "self", ".", "indices", "[", "f", "]", "=", "{", "}", "for", "_id", ",", "record", "in", "self", ".", "records", ".", "iteritems", "(", ")", ":", "# use bisect to quickly insert the id in the list\r", "bisect", ".", "insort", "(", "self", ".", "indices", "[", "f", "]", ".", "setdefault", "(", "record", "[", "f", "]", ",", "[", "]", ")", ",", "_id", ")", "# create a new attribute of self, used to find the records\r", "# by this index\r", "setattr", "(", "self", ",", "'_'", "+", "f", ",", "Index", "(", "self", ",", "f", ")", ")", "if", "reset", ":", "self", ".", "commit", "(", ")" ]
42.655172
0.014229
def infer(query, replacements=None, root_type=None, libs=("stdcore", "stdmath")): """Determine the type of the query's output without actually running it. Arguments: query: A query object or string with the query. replacements: Built-time parameters to the query, either as dict or as an array (for positional interpolation). root_type: The types of variables to be supplied to the query inference. libs: What standard libraries should be taken into account for the inference. Returns: The type of the query's output, if it can be determined. If undecidable, returns efilter.protocol.AnyType. NOTE: The inference returns the type of a row in the results, not of the actual Python object returned by 'apply'. For example, if a query returns multiple rows, each one of which is an integer, the type of the output is considered to be int, not a collection of rows. Examples: infer("5 + 5") # -> INumber infer("SELECT * FROM people WHERE age > 10") # -> AnyType # If root_type implements the IStructured reflection API: infer("SELECT * FROM people WHERE age > 10", root_type=...) # -> dict """ # Always make the scope stack start with stdcore. if root_type: type_scope = scope.ScopeStack(std_core.MODULE, root_type) else: type_scope = scope.ScopeStack(std_core.MODULE) stdcore_included = False for lib in libs: if lib == "stdcore": stdcore_included = True continue module = std_core.LibraryModule.ALL_MODULES.get(lib) if not module: raise TypeError("No standard library module %r." % lib) type_scope = scope.ScopeStack(module, type_scope) if not stdcore_included: raise TypeError("'stdcore' must always be included.") query = q.Query(query, params=replacements) return infer_type.infer_type(query, type_scope)
[ "def", "infer", "(", "query", ",", "replacements", "=", "None", ",", "root_type", "=", "None", ",", "libs", "=", "(", "\"stdcore\"", ",", "\"stdmath\"", ")", ")", ":", "# Always make the scope stack start with stdcore.", "if", "root_type", ":", "type_scope", "=", "scope", ".", "ScopeStack", "(", "std_core", ".", "MODULE", ",", "root_type", ")", "else", ":", "type_scope", "=", "scope", ".", "ScopeStack", "(", "std_core", ".", "MODULE", ")", "stdcore_included", "=", "False", "for", "lib", "in", "libs", ":", "if", "lib", "==", "\"stdcore\"", ":", "stdcore_included", "=", "True", "continue", "module", "=", "std_core", ".", "LibraryModule", ".", "ALL_MODULES", ".", "get", "(", "lib", ")", "if", "not", "module", ":", "raise", "TypeError", "(", "\"No standard library module %r.\"", "%", "lib", ")", "type_scope", "=", "scope", ".", "ScopeStack", "(", "module", ",", "type_scope", ")", "if", "not", "stdcore_included", ":", "raise", "TypeError", "(", "\"'stdcore' must always be included.\"", ")", "query", "=", "q", ".", "Query", "(", "query", ",", "params", "=", "replacements", ")", "return", "infer_type", ".", "infer_type", "(", "query", ",", "type_scope", ")" ]
37.596154
0.001994
def next_frame_sv2p(): """SV2P model hparams.""" hparams = basic_stochastic.next_frame_basic_stochastic() hparams.optimizer = "true_adam" hparams.learning_rate_schedule = "constant" hparams.learning_rate_constant = 1e-3 hparams.video_num_input_frames = 1 hparams.video_num_target_frames = 3 hparams.batch_size = 16 hparams.bottom = { "inputs": modalities.video_raw_bottom, "targets": modalities.video_raw_targets_bottom, } hparams.loss = { "targets": modalities.video_l2_raw_loss, } hparams.top = { "targets": modalities.video_raw_top, } hparams.video_modality_loss_cutoff = 0.0 hparams.scheduled_sampling_mode = "count" hparams.scheduled_sampling_k = 900.0 hparams.add_hparam("reward_prediction", True) hparams.add_hparam("reward_prediction_stop_gradient", False) hparams.add_hparam("reward_prediction_buffer_size", 0) hparams.add_hparam("model_options", "CDNA") hparams.add_hparam("num_masks", 10) hparams.add_hparam("multi_latent", False) hparams.add_hparam("relu_shift", 1e-12) hparams.add_hparam("dna_kernel_size", 5) hparams.add_hparam("upsample_method", "conv2d_transpose") hparams.add_hparam("reward_model", "basic") hparams.add_hparam("visualize_logits_histogram", True) return hparams
[ "def", "next_frame_sv2p", "(", ")", ":", "hparams", "=", "basic_stochastic", ".", "next_frame_basic_stochastic", "(", ")", "hparams", ".", "optimizer", "=", "\"true_adam\"", "hparams", ".", "learning_rate_schedule", "=", "\"constant\"", "hparams", ".", "learning_rate_constant", "=", "1e-3", "hparams", ".", "video_num_input_frames", "=", "1", "hparams", ".", "video_num_target_frames", "=", "3", "hparams", ".", "batch_size", "=", "16", "hparams", ".", "bottom", "=", "{", "\"inputs\"", ":", "modalities", ".", "video_raw_bottom", ",", "\"targets\"", ":", "modalities", ".", "video_raw_targets_bottom", ",", "}", "hparams", ".", "loss", "=", "{", "\"targets\"", ":", "modalities", ".", "video_l2_raw_loss", ",", "}", "hparams", ".", "top", "=", "{", "\"targets\"", ":", "modalities", ".", "video_raw_top", ",", "}", "hparams", ".", "video_modality_loss_cutoff", "=", "0.0", "hparams", ".", "scheduled_sampling_mode", "=", "\"count\"", "hparams", ".", "scheduled_sampling_k", "=", "900.0", "hparams", ".", "add_hparam", "(", "\"reward_prediction\"", ",", "True", ")", "hparams", ".", "add_hparam", "(", "\"reward_prediction_stop_gradient\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"reward_prediction_buffer_size\"", ",", "0", ")", "hparams", ".", "add_hparam", "(", "\"model_options\"", ",", "\"CDNA\"", ")", "hparams", ".", "add_hparam", "(", "\"num_masks\"", ",", "10", ")", "hparams", ".", "add_hparam", "(", "\"multi_latent\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"relu_shift\"", ",", "1e-12", ")", "hparams", ".", "add_hparam", "(", "\"dna_kernel_size\"", ",", "5", ")", "hparams", ".", "add_hparam", "(", "\"upsample_method\"", ",", "\"conv2d_transpose\"", ")", "hparams", ".", "add_hparam", "(", "\"reward_model\"", ",", "\"basic\"", ")", "hparams", ".", "add_hparam", "(", "\"visualize_logits_histogram\"", ",", "True", ")", "return", "hparams" ]
36.558824
0.02116
def dump_pmean(self, filename): """ Dump parameter means (``fit.pmean``) into file ``filename``. ``fit.dump_pmean(filename)`` saves the means of the best-fit parameter values (``fit.pmean``) from a ``nonlinear_fit`` called ``fit``. These values are recovered using ``p0 = nonlinear_fit.load_parameters(filename)`` where ``p0``'s layout is the same as ``fit.pmean``. The saved values can be used to initialize a later fit (``nonlinear_fit`` parameter ``p0``). """ warnings.warn( "nonlinear_fit.dump_pmean deprecated; use pickle.dump instead", DeprecationWarning, ) with open(filename, "wb") as f: if self.p0.shape is not None: pickle.dump(numpy.array(self.pmean), f) else: pickle.dump(collections.OrderedDict(self.pmean), f)
[ "def", "dump_pmean", "(", "self", ",", "filename", ")", ":", "warnings", ".", "warn", "(", "\"nonlinear_fit.dump_pmean deprecated; use pickle.dump instead\"", ",", "DeprecationWarning", ",", ")", "with", "open", "(", "filename", ",", "\"wb\"", ")", "as", "f", ":", "if", "self", ".", "p0", ".", "shape", "is", "not", "None", ":", "pickle", ".", "dump", "(", "numpy", ".", "array", "(", "self", ".", "pmean", ")", ",", "f", ")", "else", ":", "pickle", ".", "dump", "(", "collections", ".", "OrderedDict", "(", "self", ".", "pmean", ")", ",", "f", ")" ]
44.25
0.002212
def _update_cov_model(self, strata_to_update='all'): """ strata_to_update : array-like or 'all' array containing stratum indices to update """ if strata_to_update == 'all': strata_to_update = self.strata.indices_ #: Otherwise assume strata_to_update is valid (no duplicates etc.) #: Update covariance matrices #: We usually update only one stratum at a time, so for loop is ok n_sampled = np.clip(self.strata._n_sampled, 2, np.inf) #: adding 2 avoids undef. cov factor = n_sampled/(n_sampled - 1) for k in strata_to_update: TP = self._BB_TP.theta_[k] PP = self._BB_PP.theta_[k] P = self._BB_P.theta_[k] self.cov_model_[k,0,0] = factor[k] * TP * (1 - TP) self.cov_model_[k,0,1] = factor[k] * TP * (1 - PP) self.cov_model_[k,0,2] = factor[k] * TP * (1 - P) self.cov_model_[k,1,1] = factor[k] * PP * (1 - PP) self.cov_model_[k,1,2] = factor[k] * (TP - PP * P) self.cov_model_[k,2,2] = factor[k] * P * (1 - P) self.cov_model_[k,1,0] = self.cov_model_[k,0,1] self.cov_model_[k,2,0] = self.cov_model_[k,0,2] self.cov_model_[k,2,1] = self.cov_model_[k,1,2]
[ "def", "_update_cov_model", "(", "self", ",", "strata_to_update", "=", "'all'", ")", ":", "if", "strata_to_update", "==", "'all'", ":", "strata_to_update", "=", "self", ".", "strata", ".", "indices_", "#: Otherwise assume strata_to_update is valid (no duplicates etc.)", "#: Update covariance matrices", "#: We usually update only one stratum at a time, so for loop is ok", "n_sampled", "=", "np", ".", "clip", "(", "self", ".", "strata", ".", "_n_sampled", ",", "2", ",", "np", ".", "inf", ")", "#: adding 2 avoids undef. cov", "factor", "=", "n_sampled", "/", "(", "n_sampled", "-", "1", ")", "for", "k", "in", "strata_to_update", ":", "TP", "=", "self", ".", "_BB_TP", ".", "theta_", "[", "k", "]", "PP", "=", "self", ".", "_BB_PP", ".", "theta_", "[", "k", "]", "P", "=", "self", ".", "_BB_P", ".", "theta_", "[", "k", "]", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "0", "]", "=", "factor", "[", "k", "]", "*", "TP", "*", "(", "1", "-", "TP", ")", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "1", "]", "=", "factor", "[", "k", "]", "*", "TP", "*", "(", "1", "-", "PP", ")", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "2", "]", "=", "factor", "[", "k", "]", "*", "TP", "*", "(", "1", "-", "P", ")", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "1", "]", "=", "factor", "[", "k", "]", "*", "PP", "*", "(", "1", "-", "PP", ")", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "2", "]", "=", "factor", "[", "k", "]", "*", "(", "TP", "-", "PP", "*", "P", ")", "self", ".", "cov_model_", "[", "k", ",", "2", ",", "2", "]", "=", "factor", "[", "k", "]", "*", "P", "*", "(", "1", "-", "P", ")", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "0", "]", "=", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "1", "]", "self", ".", "cov_model_", "[", "k", ",", "2", ",", "0", "]", "=", "self", ".", "cov_model_", "[", "k", ",", "0", ",", "2", "]", "self", ".", "cov_model_", "[", "k", ",", "2", ",", "1", "]", "=", "self", ".", "cov_model_", "[", "k", ",", "1", ",", "2", "]" ]
47.148148
0.021555
def run(self, lines): """Filter method""" ret = [] for line in lines: ret.append(re.sub(r'^#', '#' + ('#' * self.offset), line)) return ret
[ "def", "run", "(", "self", ",", "lines", ")", ":", "ret", "=", "[", "]", "for", "line", "in", "lines", ":", "ret", ".", "append", "(", "re", ".", "sub", "(", "r'^#'", ",", "'#'", "+", "(", "'#'", "*", "self", ".", "offset", ")", ",", "line", ")", ")", "return", "ret" ]
25.428571
0.01087
def copy(self, items=None): """Return a new NGram object with the same settings, and referencing the same items. Copy is shallow in that each item is not recursively copied. Optionally specify alternate items to populate the copy. >>> from ngram import NGram >>> from copy import deepcopy >>> n = NGram(['eggs', 'spam']) >>> m = n.copy() >>> m.add('ham') >>> sorted(list(n)) ['eggs', 'spam'] >>> sorted(list(m)) ['eggs', 'ham', 'spam'] >>> p = n.copy(['foo', 'bar']) >>> sorted(list(p)) ['bar', 'foo'] """ return NGram(items if items is not None else self, self.threshold, self.warp, self._key, self.N, self._pad_len, self._pad_char)
[ "def", "copy", "(", "self", ",", "items", "=", "None", ")", ":", "return", "NGram", "(", "items", "if", "items", "is", "not", "None", "else", "self", ",", "self", ".", "threshold", ",", "self", ".", "warp", ",", "self", ".", "_key", ",", "self", ".", "N", ",", "self", ".", "_pad_len", ",", "self", ".", "_pad_char", ")" ]
36.454545
0.00243
def push(self: BoardT, move: Move) -> None: """ Updates the position with the given move and puts it onto the move stack. >>> import chess >>> >>> board = chess.Board() >>> >>> Nf3 = chess.Move.from_uci("g1f3") >>> board.push(Nf3) # Make the move >>> board.pop() # Unmake the last move Move.from_uci('g1f3') Null moves just increment the move counters, switch turns and forfeit en passant capturing. :warning: Moves are not checked for legality. """ # Push move and remember board state. move = self._to_chess960(move) self.move_stack.append(self._from_chess960(self.chess960, move.from_square, move.to_square, move.promotion, move.drop)) self._stack.append(self._board_state()) # Reset en passant square. ep_square = self.ep_square self.ep_square = None # Increment move counters. self.halfmove_clock += 1 if self.turn == BLACK: self.fullmove_number += 1 # On a null move, simply swap turns and reset the en passant square. if not move: self.turn = not self.turn return # Drops. if move.drop: self._set_piece_at(move.to_square, move.drop, self.turn) self.turn = not self.turn return # Zero the half-move clock. if self.is_zeroing(move): self.halfmove_clock = 0 from_bb = BB_SQUARES[move.from_square] to_bb = BB_SQUARES[move.to_square] promoted = bool(self.promoted & from_bb) piece_type = self._remove_piece_at(move.from_square) assert piece_type is not None, "push() expects move to be pseudo-legal, but got {} in {}".format(move, self.fen()) capture_square = move.to_square captured_piece_type = self.piece_type_at(capture_square) # Update castling rights. self.castling_rights = self.clean_castling_rights() & ~to_bb & ~from_bb if piece_type == KING and not promoted: if self.turn == WHITE: self.castling_rights &= ~BB_RANK_1 else: self.castling_rights &= ~BB_RANK_8 elif captured_piece_type == KING and not self.promoted & to_bb: if self.turn == WHITE and square_rank(move.to_square) == 7: self.castling_rights &= ~BB_RANK_8 elif self.turn == BLACK and square_rank(move.to_square) == 0: self.castling_rights &= ~BB_RANK_1 # Handle special pawn moves. if piece_type == PAWN: diff = move.to_square - move.from_square if diff == 16 and square_rank(move.from_square) == 1: self.ep_square = move.from_square + 8 elif diff == -16 and square_rank(move.from_square) == 6: self.ep_square = move.from_square - 8 elif move.to_square == ep_square and abs(diff) in [7, 9] and not captured_piece_type: # Remove pawns captured en passant. down = -8 if self.turn == WHITE else 8 capture_square = ep_square + down captured_piece_type = self._remove_piece_at(capture_square) # Promotion. if move.promotion: promoted = True piece_type = move.promotion # Castling. castling = piece_type == KING and self.occupied_co[self.turn] & to_bb if castling: a_side = square_file(move.to_square) < square_file(move.from_square) self._remove_piece_at(move.from_square) self._remove_piece_at(move.to_square) if a_side: self._set_piece_at(C1 if self.turn == WHITE else C8, KING, self.turn) self._set_piece_at(D1 if self.turn == WHITE else D8, ROOK, self.turn) else: self._set_piece_at(G1 if self.turn == WHITE else G8, KING, self.turn) self._set_piece_at(F1 if self.turn == WHITE else F8, ROOK, self.turn) # Put the piece on the target square. if not castling: was_promoted = bool(self.promoted & to_bb) self._set_piece_at(move.to_square, piece_type, self.turn, promoted) if captured_piece_type: self._push_capture(move, capture_square, captured_piece_type, was_promoted) # Swap turn. self.turn = not self.turn
[ "def", "push", "(", "self", ":", "BoardT", ",", "move", ":", "Move", ")", "->", "None", ":", "# Push move and remember board state.", "move", "=", "self", ".", "_to_chess960", "(", "move", ")", "self", ".", "move_stack", ".", "append", "(", "self", ".", "_from_chess960", "(", "self", ".", "chess960", ",", "move", ".", "from_square", ",", "move", ".", "to_square", ",", "move", ".", "promotion", ",", "move", ".", "drop", ")", ")", "self", ".", "_stack", ".", "append", "(", "self", ".", "_board_state", "(", ")", ")", "# Reset en passant square.", "ep_square", "=", "self", ".", "ep_square", "self", ".", "ep_square", "=", "None", "# Increment move counters.", "self", ".", "halfmove_clock", "+=", "1", "if", "self", ".", "turn", "==", "BLACK", ":", "self", ".", "fullmove_number", "+=", "1", "# On a null move, simply swap turns and reset the en passant square.", "if", "not", "move", ":", "self", ".", "turn", "=", "not", "self", ".", "turn", "return", "# Drops.", "if", "move", ".", "drop", ":", "self", ".", "_set_piece_at", "(", "move", ".", "to_square", ",", "move", ".", "drop", ",", "self", ".", "turn", ")", "self", ".", "turn", "=", "not", "self", ".", "turn", "return", "# Zero the half-move clock.", "if", "self", ".", "is_zeroing", "(", "move", ")", ":", "self", ".", "halfmove_clock", "=", "0", "from_bb", "=", "BB_SQUARES", "[", "move", ".", "from_square", "]", "to_bb", "=", "BB_SQUARES", "[", "move", ".", "to_square", "]", "promoted", "=", "bool", "(", "self", ".", "promoted", "&", "from_bb", ")", "piece_type", "=", "self", ".", "_remove_piece_at", "(", "move", ".", "from_square", ")", "assert", "piece_type", "is", "not", "None", ",", "\"push() expects move to be pseudo-legal, but got {} in {}\"", ".", "format", "(", "move", ",", "self", ".", "fen", "(", ")", ")", "capture_square", "=", "move", ".", "to_square", "captured_piece_type", "=", "self", ".", "piece_type_at", "(", "capture_square", ")", "# Update castling rights.", "self", ".", "castling_rights", "=", "self", ".", "clean_castling_rights", "(", ")", "&", "~", "to_bb", "&", "~", "from_bb", "if", "piece_type", "==", "KING", "and", "not", "promoted", ":", "if", "self", ".", "turn", "==", "WHITE", ":", "self", ".", "castling_rights", "&=", "~", "BB_RANK_1", "else", ":", "self", ".", "castling_rights", "&=", "~", "BB_RANK_8", "elif", "captured_piece_type", "==", "KING", "and", "not", "self", ".", "promoted", "&", "to_bb", ":", "if", "self", ".", "turn", "==", "WHITE", "and", "square_rank", "(", "move", ".", "to_square", ")", "==", "7", ":", "self", ".", "castling_rights", "&=", "~", "BB_RANK_8", "elif", "self", ".", "turn", "==", "BLACK", "and", "square_rank", "(", "move", ".", "to_square", ")", "==", "0", ":", "self", ".", "castling_rights", "&=", "~", "BB_RANK_1", "# Handle special pawn moves.", "if", "piece_type", "==", "PAWN", ":", "diff", "=", "move", ".", "to_square", "-", "move", ".", "from_square", "if", "diff", "==", "16", "and", "square_rank", "(", "move", ".", "from_square", ")", "==", "1", ":", "self", ".", "ep_square", "=", "move", ".", "from_square", "+", "8", "elif", "diff", "==", "-", "16", "and", "square_rank", "(", "move", ".", "from_square", ")", "==", "6", ":", "self", ".", "ep_square", "=", "move", ".", "from_square", "-", "8", "elif", "move", ".", "to_square", "==", "ep_square", "and", "abs", "(", "diff", ")", "in", "[", "7", ",", "9", "]", "and", "not", "captured_piece_type", ":", "# Remove pawns captured en passant.", "down", "=", "-", "8", "if", "self", ".", "turn", "==", "WHITE", "else", "8", "capture_square", "=", "ep_square", "+", "down", "captured_piece_type", "=", "self", ".", "_remove_piece_at", "(", "capture_square", ")", "# Promotion.", "if", "move", ".", "promotion", ":", "promoted", "=", "True", "piece_type", "=", "move", ".", "promotion", "# Castling.", "castling", "=", "piece_type", "==", "KING", "and", "self", ".", "occupied_co", "[", "self", ".", "turn", "]", "&", "to_bb", "if", "castling", ":", "a_side", "=", "square_file", "(", "move", ".", "to_square", ")", "<", "square_file", "(", "move", ".", "from_square", ")", "self", ".", "_remove_piece_at", "(", "move", ".", "from_square", ")", "self", ".", "_remove_piece_at", "(", "move", ".", "to_square", ")", "if", "a_side", ":", "self", ".", "_set_piece_at", "(", "C1", "if", "self", ".", "turn", "==", "WHITE", "else", "C8", ",", "KING", ",", "self", ".", "turn", ")", "self", ".", "_set_piece_at", "(", "D1", "if", "self", ".", "turn", "==", "WHITE", "else", "D8", ",", "ROOK", ",", "self", ".", "turn", ")", "else", ":", "self", ".", "_set_piece_at", "(", "G1", "if", "self", ".", "turn", "==", "WHITE", "else", "G8", ",", "KING", ",", "self", ".", "turn", ")", "self", ".", "_set_piece_at", "(", "F1", "if", "self", ".", "turn", "==", "WHITE", "else", "F8", ",", "ROOK", ",", "self", ".", "turn", ")", "# Put the piece on the target square.", "if", "not", "castling", ":", "was_promoted", "=", "bool", "(", "self", ".", "promoted", "&", "to_bb", ")", "self", ".", "_set_piece_at", "(", "move", ".", "to_square", ",", "piece_type", ",", "self", ".", "turn", ",", "promoted", ")", "if", "captured_piece_type", ":", "self", ".", "_push_capture", "(", "move", ",", "capture_square", ",", "captured_piece_type", ",", "was_promoted", ")", "# Swap turn.", "self", ".", "turn", "=", "not", "self", ".", "turn" ]
37.86087
0.002462
def parse_JSON(self, JSON_string): """ Parses an *NO2Index* instance out of raw JSON data. Only certain properties of the data are used: if these properties are not found or cannot be parsed, an error is issued. :param JSON_string: a raw JSON string :type JSON_string: str :returns: an *NO2Index* instance or ``None`` if no data is available :raises: *ParseResponseError* if it is impossible to find or parse the data needed to build the result, *APIResponseError* if the JSON string embeds an HTTP status error """ if JSON_string is None: raise parse_response_error.ParseResponseError('JSON data is None') d = json.loads(JSON_string) try: # -- reference time (strip away Z and T on ISO8601 format) t = d['time'].replace('Z', '+00').replace('T', ' ') reference_time = timeformatutils._ISO8601_to_UNIXtime(t) # -- reception time (now) reception_time = timeutils.now('unix') # -- location lon = float(d['location']['longitude']) lat = float(d['location']['latitude']) place = location.Location(None, lon, lat, None) # -- CO samples no2_samples = [dict(label=key, precision=d['data'][key]['precision'], value=d['data'][key]['value']) for key in d['data']] except KeyError: raise parse_response_error.ParseResponseError( ''.join([__name__, ': impossible to parse NO2Index'])) return no2index.NO2Index(reference_time, place, None, no2_samples, reception_time)
[ "def", "parse_JSON", "(", "self", ",", "JSON_string", ")", ":", "if", "JSON_string", "is", "None", ":", "raise", "parse_response_error", ".", "ParseResponseError", "(", "'JSON data is None'", ")", "d", "=", "json", ".", "loads", "(", "JSON_string", ")", "try", ":", "# -- reference time (strip away Z and T on ISO8601 format)", "t", "=", "d", "[", "'time'", "]", ".", "replace", "(", "'Z'", ",", "'+00'", ")", ".", "replace", "(", "'T'", ",", "' '", ")", "reference_time", "=", "timeformatutils", ".", "_ISO8601_to_UNIXtime", "(", "t", ")", "# -- reception time (now)", "reception_time", "=", "timeutils", ".", "now", "(", "'unix'", ")", "# -- location", "lon", "=", "float", "(", "d", "[", "'location'", "]", "[", "'longitude'", "]", ")", "lat", "=", "float", "(", "d", "[", "'location'", "]", "[", "'latitude'", "]", ")", "place", "=", "location", ".", "Location", "(", "None", ",", "lon", ",", "lat", ",", "None", ")", "# -- CO samples", "no2_samples", "=", "[", "dict", "(", "label", "=", "key", ",", "precision", "=", "d", "[", "'data'", "]", "[", "key", "]", "[", "'precision'", "]", ",", "value", "=", "d", "[", "'data'", "]", "[", "key", "]", "[", "'value'", "]", ")", "for", "key", "in", "d", "[", "'data'", "]", "]", "except", "KeyError", ":", "raise", "parse_response_error", ".", "ParseResponseError", "(", "''", ".", "join", "(", "[", "__name__", ",", "': impossible to parse NO2Index'", "]", ")", ")", "return", "no2index", ".", "NO2Index", "(", "reference_time", ",", "place", ",", "None", ",", "no2_samples", ",", "reception_time", ")" ]
42.243902
0.001693
def write_report(storage_dic, output_file, sample_id): """ Writes a report from multiple samples. Parameters ---------- storage_dic : dict or :py:class:`OrderedDict` Storage containing the trimming statistics. See :py:func:`parse_log` for its generation. output_file : str Path where the output file will be generated. sample_id : str Id or name of the current sample. """ with open(output_file, "w") as fh, open(".report.json", "w") as json_rep: # Write header fh.write("Sample,Total length,Total trimmed,%,5end Trim,3end Trim," "bad_reads\\n") # Write contents for sample, vals in storage_dic.items(): fh.write("{},{}\\n".format( sample, ",".join([str(x) for x in vals.values()]))) json_dic = { "tableRow": [{ "sample": sample_id, "data": [ {"header": "trimmed", "value": vals["total_trim_perc"], "table": "qc", "columnBar": True}, ] }], "plotData": [{ "sample": sample_id, "data": { "sparkline": vals["clean_len"] } }], "badReads": vals["bad_reads"] } json_rep.write(json.dumps(json_dic, separators=(",", ":")))
[ "def", "write_report", "(", "storage_dic", ",", "output_file", ",", "sample_id", ")", ":", "with", "open", "(", "output_file", ",", "\"w\"", ")", "as", "fh", ",", "open", "(", "\".report.json\"", ",", "\"w\"", ")", "as", "json_rep", ":", "# Write header", "fh", ".", "write", "(", "\"Sample,Total length,Total trimmed,%,5end Trim,3end Trim,\"", "\"bad_reads\\\\n\"", ")", "# Write contents", "for", "sample", ",", "vals", "in", "storage_dic", ".", "items", "(", ")", ":", "fh", ".", "write", "(", "\"{},{}\\\\n\"", ".", "format", "(", "sample", ",", "\",\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "vals", ".", "values", "(", ")", "]", ")", ")", ")", "json_dic", "=", "{", "\"tableRow\"", ":", "[", "{", "\"sample\"", ":", "sample_id", ",", "\"data\"", ":", "[", "{", "\"header\"", ":", "\"trimmed\"", ",", "\"value\"", ":", "vals", "[", "\"total_trim_perc\"", "]", ",", "\"table\"", ":", "\"qc\"", ",", "\"columnBar\"", ":", "True", "}", ",", "]", "}", "]", ",", "\"plotData\"", ":", "[", "{", "\"sample\"", ":", "sample_id", ",", "\"data\"", ":", "{", "\"sparkline\"", ":", "vals", "[", "\"clean_len\"", "]", "}", "}", "]", ",", "\"badReads\"", ":", "vals", "[", "\"bad_reads\"", "]", "}", "json_rep", ".", "write", "(", "json", ".", "dumps", "(", "json_dic", ",", "separators", "=", "(", "\",\"", ",", "\":\"", ")", ")", ")" ]
33.545455
0.000658
def init_app(self, app, storage=None, cache=None, file_upload=None): """ Initialize the engine. :param app: The app to use :type app: Object :param storage: The blog storage instance that implements the :type storage: Object :param cache: (Optional) A Flask-Cache object to enable caching :type cache: Object ``Storage`` class interface. """ self.app = app self.config = self.app.config self.storage = storage or self.storage self.file_upload = file_upload or self.file_upload self.cache = cache or self.cache self._register_plugins(self.app, self.config) from .views import create_blueprint blog_app = create_blueprint(__name__, self) # external urls blueprint_created.send(self.app, engine=self, blueprint=blog_app) self.app.register_blueprint( blog_app, url_prefix=self.config.get("BLOGGING_URL_PREFIX")) self.app.extensions["FLASK_BLOGGING_ENGINE"] = self # duplicate self.app.extensions["blogging"] = self self.principal = Principal(self.app) engine_initialised.send(self.app, engine=self) if self.config.get("BLOGGING_ALLOW_FILEUPLOAD", True): self.ffu = self.file_upload or FlaskFileUpload(app)
[ "def", "init_app", "(", "self", ",", "app", ",", "storage", "=", "None", ",", "cache", "=", "None", ",", "file_upload", "=", "None", ")", ":", "self", ".", "app", "=", "app", "self", ".", "config", "=", "self", ".", "app", ".", "config", "self", ".", "storage", "=", "storage", "or", "self", ".", "storage", "self", ".", "file_upload", "=", "file_upload", "or", "self", ".", "file_upload", "self", ".", "cache", "=", "cache", "or", "self", ".", "cache", "self", ".", "_register_plugins", "(", "self", ".", "app", ",", "self", ".", "config", ")", "from", ".", "views", "import", "create_blueprint", "blog_app", "=", "create_blueprint", "(", "__name__", ",", "self", ")", "# external urls", "blueprint_created", ".", "send", "(", "self", ".", "app", ",", "engine", "=", "self", ",", "blueprint", "=", "blog_app", ")", "self", ".", "app", ".", "register_blueprint", "(", "blog_app", ",", "url_prefix", "=", "self", ".", "config", ".", "get", "(", "\"BLOGGING_URL_PREFIX\"", ")", ")", "self", ".", "app", ".", "extensions", "[", "\"FLASK_BLOGGING_ENGINE\"", "]", "=", "self", "# duplicate", "self", ".", "app", ".", "extensions", "[", "\"blogging\"", "]", "=", "self", "self", ".", "principal", "=", "Principal", "(", "self", ".", "app", ")", "engine_initialised", ".", "send", "(", "self", ".", "app", ",", "engine", "=", "self", ")", "if", "self", ".", "config", ".", "get", "(", "\"BLOGGING_ALLOW_FILEUPLOAD\"", ",", "True", ")", ":", "self", ".", "ffu", "=", "self", ".", "file_upload", "or", "FlaskFileUpload", "(", "app", ")" ]
38.441176
0.001493
def conjugate(self): """Return the conjugate of the operator.""" return Operator( np.conj(self.data), self.input_dims(), self.output_dims())
[ "def", "conjugate", "(", "self", ")", ":", "return", "Operator", "(", "np", ".", "conj", "(", "self", ".", "data", ")", ",", "self", ".", "input_dims", "(", ")", ",", "self", ".", "output_dims", "(", ")", ")" ]
41.25
0.011905
def get(self): """Return current profiler statistics.""" sort = self.get_argument('sort', 'cum_time') count = self.get_argument('count', 20) strip_dirs = self.get_argument('strip_dirs', True) error = '' sorts = ('num_calls', 'cum_time', 'total_time', 'cum_time_per_call', 'total_time_per_call') if sort not in sorts: error += "Invalid `sort` '%s', must be in %s." % (sort, sorts) try: count = int(count) except (ValueError, TypeError): error += "Can't cast `count` '%s' to int." % count if count <= 0: count = None strip_dirs = str(strip_dirs).lower() not in ('false', 'no', 'none', 'null', '0', '') if error: self.write({'error': error}) self.set_status(400) self.finish() return try: statistics = get_profiler_statistics(sort, count, strip_dirs) self.write({'statistics': statistics}) self.set_status(200) except TypeError: logger.exception('Error while retrieving profiler statistics') self.write({'error': 'No stats available. Start and stop the profiler before trying to retrieve stats.'}) self.set_status(404) self.finish()
[ "def", "get", "(", "self", ")", ":", "sort", "=", "self", ".", "get_argument", "(", "'sort'", ",", "'cum_time'", ")", "count", "=", "self", ".", "get_argument", "(", "'count'", ",", "20", ")", "strip_dirs", "=", "self", ".", "get_argument", "(", "'strip_dirs'", ",", "True", ")", "error", "=", "''", "sorts", "=", "(", "'num_calls'", ",", "'cum_time'", ",", "'total_time'", ",", "'cum_time_per_call'", ",", "'total_time_per_call'", ")", "if", "sort", "not", "in", "sorts", ":", "error", "+=", "\"Invalid `sort` '%s', must be in %s.\"", "%", "(", "sort", ",", "sorts", ")", "try", ":", "count", "=", "int", "(", "count", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "error", "+=", "\"Can't cast `count` '%s' to int.\"", "%", "count", "if", "count", "<=", "0", ":", "count", "=", "None", "strip_dirs", "=", "str", "(", "strip_dirs", ")", ".", "lower", "(", ")", "not", "in", "(", "'false'", ",", "'no'", ",", "'none'", ",", "'null'", ",", "'0'", ",", "''", ")", "if", "error", ":", "self", ".", "write", "(", "{", "'error'", ":", "error", "}", ")", "self", ".", "set_status", "(", "400", ")", "self", ".", "finish", "(", ")", "return", "try", ":", "statistics", "=", "get_profiler_statistics", "(", "sort", ",", "count", ",", "strip_dirs", ")", "self", ".", "write", "(", "{", "'statistics'", ":", "statistics", "}", ")", "self", ".", "set_status", "(", "200", ")", "except", "TypeError", ":", "logger", ".", "exception", "(", "'Error while retrieving profiler statistics'", ")", "self", ".", "write", "(", "{", "'error'", ":", "'No stats available. Start and stop the profiler before trying to retrieve stats.'", "}", ")", "self", ".", "set_status", "(", "404", ")", "self", ".", "finish", "(", ")" ]
38.742857
0.002158
def match(license): '''Returns True if given license field is correct Taken from rpmlint. It's named match() to mimic a compiled regexp.''' if license not in VALID_LICENSES: for l1 in _split_license(license): if l1 in VALID_LICENSES: continue for l2 in _split_license(l1): if l2 not in VALID_LICENSES: return False valid_license = False return True
[ "def", "match", "(", "license", ")", ":", "if", "license", "not", "in", "VALID_LICENSES", ":", "for", "l1", "in", "_split_license", "(", "license", ")", ":", "if", "l1", "in", "VALID_LICENSES", ":", "continue", "for", "l2", "in", "_split_license", "(", "l1", ")", ":", "if", "l2", "not", "in", "VALID_LICENSES", ":", "return", "False", "valid_license", "=", "False", "return", "True" ]
32.857143
0.002114
def ShowMessage(self, title, message, filename=None, data=None, data_base64=None, messageicon=None, time=10000): ''' Shows a balloon above icon in system tray :param title: Title shown in balloon :param message: Message to be displayed :param filename: Optional icon filename :param data: Optional in-ram icon :param data_base64: Optional base64 icon :param time: How long to display message in milliseconds :return: ''' qicon = None if filename is not None: qicon = QIcon(filename) elif data is not None: ba = QtCore.QByteArray.fromRawData(data) pixmap = QtGui.QPixmap() pixmap.loadFromData(ba) qicon = QIcon(pixmap) elif data_base64 is not None: ba = QtCore.QByteArray.fromBase64(data_base64) pixmap = QtGui.QPixmap() pixmap.loadFromData(ba) qicon = QIcon(pixmap) if qicon is not None: self.TrayIcon.showMessage(title, message, qicon, time) elif messageicon is not None: self.TrayIcon.showMessage(title, message, messageicon, time) else: self.TrayIcon.showMessage(title, message, QIcon(), time) self.LastMessage = message self.LastTitle = title return self
[ "def", "ShowMessage", "(", "self", ",", "title", ",", "message", ",", "filename", "=", "None", ",", "data", "=", "None", ",", "data_base64", "=", "None", ",", "messageicon", "=", "None", ",", "time", "=", "10000", ")", ":", "qicon", "=", "None", "if", "filename", "is", "not", "None", ":", "qicon", "=", "QIcon", "(", "filename", ")", "elif", "data", "is", "not", "None", ":", "ba", "=", "QtCore", ".", "QByteArray", ".", "fromRawData", "(", "data", ")", "pixmap", "=", "QtGui", ".", "QPixmap", "(", ")", "pixmap", ".", "loadFromData", "(", "ba", ")", "qicon", "=", "QIcon", "(", "pixmap", ")", "elif", "data_base64", "is", "not", "None", ":", "ba", "=", "QtCore", ".", "QByteArray", ".", "fromBase64", "(", "data_base64", ")", "pixmap", "=", "QtGui", ".", "QPixmap", "(", ")", "pixmap", ".", "loadFromData", "(", "ba", ")", "qicon", "=", "QIcon", "(", "pixmap", ")", "if", "qicon", "is", "not", "None", ":", "self", ".", "TrayIcon", ".", "showMessage", "(", "title", ",", "message", ",", "qicon", ",", "time", ")", "elif", "messageicon", "is", "not", "None", ":", "self", ".", "TrayIcon", ".", "showMessage", "(", "title", ",", "message", ",", "messageicon", ",", "time", ")", "else", ":", "self", ".", "TrayIcon", ".", "showMessage", "(", "title", ",", "message", ",", "QIcon", "(", ")", ",", "time", ")", "self", ".", "LastMessage", "=", "message", "self", ".", "LastTitle", "=", "title", "return", "self" ]
38.028571
0.002198
def get_parameter(self, parameter): "Return a dict for given parameter" parameter = self._get_parameter_name(parameter) return self._parameters[parameter]
[ "def", "get_parameter", "(", "self", ",", "parameter", ")", ":", "parameter", "=", "self", ".", "_get_parameter_name", "(", "parameter", ")", "return", "self", ".", "_parameters", "[", "parameter", "]" ]
43.75
0.011236
def issue(self, issue_instance_id): """Select an issue. Parameters: issue_instance_id: int id of the issue instance to select Note: We are selecting issue instances, even though the command is called issue. """ with self.db.make_session() as session: selected_issue = ( session.query(IssueInstance) .filter(IssueInstance.id == issue_instance_id) .scalar() ) if selected_issue is None: self.warning( f"Issue {issue_instance_id} doesn't exist. " "Type 'issues' for available issues." ) return self.sources = self._get_leaves_issue_instance( session, issue_instance_id, SharedTextKind.SOURCE ) self.sinks = self._get_leaves_issue_instance( session, issue_instance_id, SharedTextKind.SINK ) self.current_issue_instance_id = int(selected_issue.id) self.current_frame_id = -1 self.current_trace_frame_index = 1 # first one after the source print(f"Set issue to {issue_instance_id}.") if int(selected_issue.run_id) != self.current_run_id: self.current_run_id = int(selected_issue.run_id) print(f"Set run to {self.current_run_id}.") print() self._generate_trace_from_issue() self.show()
[ "def", "issue", "(", "self", ",", "issue_instance_id", ")", ":", "with", "self", ".", "db", ".", "make_session", "(", ")", "as", "session", ":", "selected_issue", "=", "(", "session", ".", "query", "(", "IssueInstance", ")", ".", "filter", "(", "IssueInstance", ".", "id", "==", "issue_instance_id", ")", ".", "scalar", "(", ")", ")", "if", "selected_issue", "is", "None", ":", "self", ".", "warning", "(", "f\"Issue {issue_instance_id} doesn't exist. \"", "\"Type 'issues' for available issues.\"", ")", "return", "self", ".", "sources", "=", "self", ".", "_get_leaves_issue_instance", "(", "session", ",", "issue_instance_id", ",", "SharedTextKind", ".", "SOURCE", ")", "self", ".", "sinks", "=", "self", ".", "_get_leaves_issue_instance", "(", "session", ",", "issue_instance_id", ",", "SharedTextKind", ".", "SINK", ")", "self", ".", "current_issue_instance_id", "=", "int", "(", "selected_issue", ".", "id", ")", "self", ".", "current_frame_id", "=", "-", "1", "self", ".", "current_trace_frame_index", "=", "1", "# first one after the source", "print", "(", "f\"Set issue to {issue_instance_id}.\"", ")", "if", "int", "(", "selected_issue", ".", "run_id", ")", "!=", "self", ".", "current_run_id", ":", "self", ".", "current_run_id", "=", "int", "(", "selected_issue", ".", "run_id", ")", "print", "(", "f\"Set run to {self.current_run_id}.\"", ")", "print", "(", ")", "self", ".", "_generate_trace_from_issue", "(", ")", "self", ".", "show", "(", ")" ]
33.674419
0.002013
def get_all(self, cat): """ if data can't found in cache then it will be fetched from db, parsed and stored to cache for each lang_code. :param cat: cat of catalog data :return: """ return self._get_from_local_cache(cat) or self._get_from_cache(cat) or self._get_from_db(cat)
[ "def", "get_all", "(", "self", ",", "cat", ")", ":", "return", "self", ".", "_get_from_local_cache", "(", "cat", ")", "or", "self", ".", "_get_from_cache", "(", "cat", ")", "or", "self", ".", "_get_from_db", "(", "cat", ")" ]
36.111111
0.009009
def read_interactions(path, comments="#", directed=False, delimiter=None, nodetype=None, timestamptype=None, encoding='utf-8', keys=False): """Read a DyNetx graph from interaction list format. Parameters ---------- path : basestring The desired output filename delimiter : character Column delimiter """ ids = None lines = (line.decode(encoding) for line in path) if keys: ids = read_ids(path.name, delimiter=delimiter, timestamptype=timestamptype) return parse_interactions(lines, comments=comments, directed=directed, delimiter=delimiter, nodetype=nodetype, timestamptype=timestamptype, keys=ids)
[ "def", "read_interactions", "(", "path", ",", "comments", "=", "\"#\"", ",", "directed", "=", "False", ",", "delimiter", "=", "None", ",", "nodetype", "=", "None", ",", "timestamptype", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "keys", "=", "False", ")", ":", "ids", "=", "None", "lines", "=", "(", "line", ".", "decode", "(", "encoding", ")", "for", "line", "in", "path", ")", "if", "keys", ":", "ids", "=", "read_ids", "(", "path", ".", "name", ",", "delimiter", "=", "delimiter", ",", "timestamptype", "=", "timestamptype", ")", "return", "parse_interactions", "(", "lines", ",", "comments", "=", "comments", ",", "directed", "=", "directed", ",", "delimiter", "=", "delimiter", ",", "nodetype", "=", "nodetype", ",", "timestamptype", "=", "timestamptype", ",", "keys", "=", "ids", ")" ]
34.190476
0.00813
def create_framework( bundles, properties=None, auto_start=False, wait_for_stop=False, auto_delete=False, ): # type: (Union[list, tuple], dict, bool, bool, bool) -> Framework """ Creates a Pelix framework, installs the given bundles and returns its instance reference. If *auto_start* is True, the framework will be started once all bundles will have been installed If *wait_for_stop* is True, the method will return only when the framework will have stopped. This requires *auto_start* to be True. If *auto_delete* is True, the framework will be deleted once it has stopped, and the method will return None. This requires *wait_for_stop* and *auto_start* to be True. :param bundles: Bundles to initially install (shouldn't be empty if *wait_for_stop* is True) :param properties: Optional framework properties :param auto_start: If True, the framework will be started immediately :param wait_for_stop: If True, the method will return only when the framework will have stopped :param auto_delete: If True, deletes the framework once it stopped. :return: The framework instance :raise ValueError: Only one framework can run at a time """ # Test if a framework already exists if FrameworkFactory.is_framework_running(None): raise ValueError("A framework is already running") # Create the framework framework = FrameworkFactory.get_framework(properties) # Install bundles context = framework.get_bundle_context() for bundle in bundles: context.install_bundle(bundle) if auto_start: # Automatically start the framework framework.start() if wait_for_stop: # Wait for the framework to stop try: framework.wait_for_stop(None) except KeyboardInterrupt: # Stop keyboard interruptions if framework.get_state() == Bundle.ACTIVE: framework.stop() if auto_delete: # Delete the framework FrameworkFactory.delete_framework(framework) framework = None return framework
[ "def", "create_framework", "(", "bundles", ",", "properties", "=", "None", ",", "auto_start", "=", "False", ",", "wait_for_stop", "=", "False", ",", "auto_delete", "=", "False", ",", ")", ":", "# type: (Union[list, tuple], dict, bool, bool, bool) -> Framework", "# Test if a framework already exists", "if", "FrameworkFactory", ".", "is_framework_running", "(", "None", ")", ":", "raise", "ValueError", "(", "\"A framework is already running\"", ")", "# Create the framework", "framework", "=", "FrameworkFactory", ".", "get_framework", "(", "properties", ")", "# Install bundles", "context", "=", "framework", ".", "get_bundle_context", "(", ")", "for", "bundle", "in", "bundles", ":", "context", ".", "install_bundle", "(", "bundle", ")", "if", "auto_start", ":", "# Automatically start the framework", "framework", ".", "start", "(", ")", "if", "wait_for_stop", ":", "# Wait for the framework to stop", "try", ":", "framework", ".", "wait_for_stop", "(", "None", ")", "except", "KeyboardInterrupt", ":", "# Stop keyboard interruptions", "if", "framework", ".", "get_state", "(", ")", "==", "Bundle", ".", "ACTIVE", ":", "framework", ".", "stop", "(", ")", "if", "auto_delete", ":", "# Delete the framework", "FrameworkFactory", ".", "delete_framework", "(", "framework", ")", "framework", "=", "None", "return", "framework" ]
36.35
0.000446
def _find_detections(cum_net_resp, nodes, threshold, thresh_type, samp_rate, realstations, length): """ Find detections within the cumulative network response. :type cum_net_resp: numpy.ndarray :param cum_net_resp: Array of cumulative network response for nodes :type nodes: list :param nodes: Nodes associated with the source of energy in the \ cum_net_resp :type threshold: float :param threshold: Threshold value :type thresh_type: str :param thresh_type: Either MAD (Median Absolute Deviation) or abs \ (absolute) or RMS (Root Mean Squared) :type samp_rate: float :param samp_rate: Sampling rate in Hz :type realstations: list :param realstations: List of stations used to make the cumulative network response, will be reported in the :class:`eqcorrscan.core.match_filter.Detection` :type length: float :param length: Maximum length of peak to look for in seconds :returns: Detections as :class:`eqcorrscan.core.match_filter.Detection` objects. :rtype: list """ cum_net_resp = np.nan_to_num(cum_net_resp) # Force no NaNs if np.isnan(cum_net_resp).any(): raise ValueError("Nans present") print('Mean of data is: ' + str(np.median(cum_net_resp))) print('RMS of data is: ' + str(np.sqrt(np.mean(np.square(cum_net_resp))))) print('MAD of data is: ' + str(np.median(np.abs(cum_net_resp)))) if thresh_type == 'MAD': thresh = (np.median(np.abs(cum_net_resp)) * threshold) elif thresh_type == 'abs': thresh = threshold elif thresh_type == 'RMS': thresh = _rms(cum_net_resp) * threshold print('Threshold is set to: ' + str(thresh)) print('Max of data is: ' + str(max(cum_net_resp))) peaks = findpeaks.find_peaks2_short(cum_net_resp, thresh, length * samp_rate, debug=0) detections = [] if peaks: for peak in peaks: node = nodes[peak[1]] detections.append( Detection(template_name=str(node[0]) + '_' + str(node[1]) + '_' + str(node[2]), detect_time=peak[1] / samp_rate, no_chans=len(realstations), detect_val=peak[0], threshold=thresh, typeofdet='brightness', chans=realstations, id=str(node[0]) + '_' + str(node[1]) + '_' + str(node[2]) + str(peak[1] / samp_rate), threshold_type=thresh_type, threshold_input=threshold)) else: detections = [] print('I have found ' + str(len(peaks)) + ' possible detections') return detections
[ "def", "_find_detections", "(", "cum_net_resp", ",", "nodes", ",", "threshold", ",", "thresh_type", ",", "samp_rate", ",", "realstations", ",", "length", ")", ":", "cum_net_resp", "=", "np", ".", "nan_to_num", "(", "cum_net_resp", ")", "# Force no NaNs", "if", "np", ".", "isnan", "(", "cum_net_resp", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"Nans present\"", ")", "print", "(", "'Mean of data is: '", "+", "str", "(", "np", ".", "median", "(", "cum_net_resp", ")", ")", ")", "print", "(", "'RMS of data is: '", "+", "str", "(", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "np", ".", "square", "(", "cum_net_resp", ")", ")", ")", ")", ")", "print", "(", "'MAD of data is: '", "+", "str", "(", "np", ".", "median", "(", "np", ".", "abs", "(", "cum_net_resp", ")", ")", ")", ")", "if", "thresh_type", "==", "'MAD'", ":", "thresh", "=", "(", "np", ".", "median", "(", "np", ".", "abs", "(", "cum_net_resp", ")", ")", "*", "threshold", ")", "elif", "thresh_type", "==", "'abs'", ":", "thresh", "=", "threshold", "elif", "thresh_type", "==", "'RMS'", ":", "thresh", "=", "_rms", "(", "cum_net_resp", ")", "*", "threshold", "print", "(", "'Threshold is set to: '", "+", "str", "(", "thresh", ")", ")", "print", "(", "'Max of data is: '", "+", "str", "(", "max", "(", "cum_net_resp", ")", ")", ")", "peaks", "=", "findpeaks", ".", "find_peaks2_short", "(", "cum_net_resp", ",", "thresh", ",", "length", "*", "samp_rate", ",", "debug", "=", "0", ")", "detections", "=", "[", "]", "if", "peaks", ":", "for", "peak", "in", "peaks", ":", "node", "=", "nodes", "[", "peak", "[", "1", "]", "]", "detections", ".", "append", "(", "Detection", "(", "template_name", "=", "str", "(", "node", "[", "0", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "1", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "2", "]", ")", ",", "detect_time", "=", "peak", "[", "1", "]", "/", "samp_rate", ",", "no_chans", "=", "len", "(", "realstations", ")", ",", "detect_val", "=", "peak", "[", "0", "]", ",", "threshold", "=", "thresh", ",", "typeofdet", "=", "'brightness'", ",", "chans", "=", "realstations", ",", "id", "=", "str", "(", "node", "[", "0", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "1", "]", ")", "+", "'_'", "+", "str", "(", "node", "[", "2", "]", ")", "+", "str", "(", "peak", "[", "1", "]", "/", "samp_rate", ")", ",", "threshold_type", "=", "thresh_type", ",", "threshold_input", "=", "threshold", ")", ")", "else", ":", "detections", "=", "[", "]", "print", "(", "'I have found '", "+", "str", "(", "len", "(", "peaks", ")", ")", "+", "' possible detections'", ")", "return", "detections" ]
43.253968
0.000359
def send_keys(self, keys): """Send keys to the device.""" try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(self._timeout) sock.connect((self._ip, self._port['cmd'])) # mandatory dance version_info = sock.recv(15) sock.send(version_info) sock.recv(2) sock.send(bytes.fromhex('01')) sock.recv(4) sock.recv(24) # send our command now! for key in keys: if key in self._keys: sock.send(bytes.fromhex("04 01 00 00 00 00 " + self._keys[key])) sock.send(bytes.fromhex("04 00 00 00 00 00 " + self._keys[key])) sock.close() except socket.error: raise
[ "def", "send_keys", "(", "self", ",", "keys", ")", ":", "try", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "settimeout", "(", "self", ".", "_timeout", ")", "sock", ".", "connect", "(", "(", "self", ".", "_ip", ",", "self", ".", "_port", "[", "'cmd'", "]", ")", ")", "# mandatory dance", "version_info", "=", "sock", ".", "recv", "(", "15", ")", "sock", ".", "send", "(", "version_info", ")", "sock", ".", "recv", "(", "2", ")", "sock", ".", "send", "(", "bytes", ".", "fromhex", "(", "'01'", ")", ")", "sock", ".", "recv", "(", "4", ")", "sock", ".", "recv", "(", "24", ")", "# send our command now!", "for", "key", "in", "keys", ":", "if", "key", "in", "self", ".", "_keys", ":", "sock", ".", "send", "(", "bytes", ".", "fromhex", "(", "\"04 01 00 00 00 00 \"", "+", "self", ".", "_keys", "[", "key", "]", ")", ")", "sock", ".", "send", "(", "bytes", ".", "fromhex", "(", "\"04 00 00 00 00 00 \"", "+", "self", ".", "_keys", "[", "key", "]", ")", ")", "sock", ".", "close", "(", ")", "except", "socket", ".", "error", ":", "raise" ]
38.391304
0.00221
def _ta_plot(self,study,periods=14,column=None,include=True,str='{name}({period})',detail=False, theme=None,sharing=None,filename='',asFigure=False,**iplot_kwargs): """ Generates a Technical Study Chart Parameters: ----------- study : string Technical Study to be charted sma - 'Simple Moving Average' rsi - 'R Strength Indicator' periods : int Number of periods column : string Name of the column on which the study will be done include : bool Indicates if the input column(s) should be included in the chart str : string Label factory for studies The following wildcards can be used: {name} : Name of the column {study} : Name of the study {period} : Period used Examples: 'study: {study} - period: {period}' detail : bool If True the supporting data/calculations are included in the chart study_colors : string or [string] Colors to be used for the studies Study Specific Parameters ------------------------- RSI rsi_upper : int (0,100] Level for the upper rsi band default : 70 rsi_lower : int (0,100] Level for the lower rsi band default : 30 CCI cci_upper : int Level for the upper cci band default : 100 cci_lower : int Level for the lower cci band default : -100 BOLL boll_std : int or float Number of standard deviations MACD fast_period : int Number of periods for the fast moving average slow_period : int Number of periods for the slow moving average signal_period : int Number of periods for the signal CORREL how : string Method for the correlation calculation values pct_cht diff """ if 'columns' in iplot_kwargs: column=iplot_kwargs.pop('columns') if 'period' in iplot_kwargs: periods=iplot_kwargs.pop('period') if 'world_readable' in iplot_kwargs: sharing=iplot_kwargs.pop('world_readable') if 'study_color' in iplot_kwargs: iplot_kwargs['study_colors']=iplot_kwargs.pop('study_color') if sharing is None: sharing = auth.get_config_file()['sharing'] if isinstance(sharing,bool): if sharing: sharing='public' else: sharing='private' iplot_kwargs['sharing']=sharing if theme is None: theme = iplot_kwargs.pop('study_theme',auth.get_config_file()['theme']) if not filename: if 'title' in iplot_kwargs: filename=iplot_kwargs['title'] else: filename='Plotly Playground {0}'.format(time.strftime("%Y-%m-%d %H:%M:%S")) def get_subplots(figures): shape=(len(figures),1) layout=tools.get_base_layout(figures) subplots=tools.subplots(figures,shape=shape,shared_xaxes=True,base_layout=layout) if len(figures)==2: subplots['layout']['yaxis']['domain']=[.27,1.0] subplots['layout']['yaxis2']['domain']=[0,.25] return subplots def get_study(df,func,iplot_kwargs,iplot_study_kwargs,str=None,include=False,column=None,inset=False): df=df.copy() if inset: if not column: if isinstance(df,pd.DataFrame): column=df.keys().tolist() else: df=pd.DataFrame(df) column=df.keys().tolist() if 'legend' in iplot_kwargs: iplot_study_kwargs['legend']=iplot_kwargs['legend'] fig_0=df.figure(**iplot_kwargs) df_ta=func(df,column=column,include=False,str=str,**study_kwargs) kind=iplot_kwargs['kind'] if 'kind' in iplot_kwargs else '' iplot_study_kwargs['kind']='scatter' iplot_study_kwargs['colors']=iplot_study_kwargs.get('colors',['blue','green','red'] if study=='dmi' else 'blue') fig_1=df_ta.figure(theme=theme,**iplot_study_kwargs) if kind in ['candle','ohlc']: for i in fig_1['data']: i['x']=[pd.Timestamp(_) for _ in i['x']] if inset: figure=tools.merge_figures([fig_0,fig_1]) if include else fig_1 else: figure=get_subplots([fig_0,fig_1]) if include else fig_1 return figure study_kwargs={} iplot_study_kwargs={} study_kwargs=check_kwargs(iplot_kwargs,__TA_KWARGS,{},clean_origin=True) iplot_study_kwargs=kwargs_from_keyword(iplot_kwargs,{},'study') study_kwargs.update({'periods':periods}) ta_func = eval('ta.{0}'.format(study)) inset=study in ('sma','boll','ema','atr','ptps') figure=get_study(self,ta_func,iplot_kwargs,iplot_study_kwargs,include=include, column=column,str=str,inset=inset) ## Add Bands if study in ('rsi','cci'): bands= {'rsi':(30,70), 'cci':(-100,100)} _upper=study_kwargs.get('{0}_upper'.format(study),bands[study][0]) _lower=study_kwargs.get('{0}_lower'.format(study),bands[study][1]) yref='y2' if include else 'y1' shapes=[tools.get_shape(y=i,yref=yref,color=j,dash='dash') for (i,j) in [(_lower,'green'),(_upper,'red')]] figure['layout']['shapes']=shapes # if study=='rsi': # rsi_upper=study_kwargs.get('rsi_upper',70) # rsi_lower=study_kwargs.get('rsi_lower',30) # yref='y2' if include else 'y1' # shapes=[tools.get_shape(y=i,yref=yref,color=j,dash='dash') for (i,j) in [(rsi_lower,'green'),(rsi_upper,'red')]] # figure['layout']['shapes']=shapes # if study=='cci': # cci_upper=study_kwargs.get('cci_upper',100) # cci_lower=study_kwargs.get('cci_lower',-100) # yref='y2' if include else 'y1' # shapes=[tools.get_shape(y=i,yref=yref,color=j,dash='dash') for (i,j) in [(cci_lower,'green'),(cci_upper,'red')]] # figure['layout']['shapes']=shapes ## Exports if asFigure: return figure else: return iplot(figure,sharing=sharing,filename=filename)
[ "def", "_ta_plot", "(", "self", ",", "study", ",", "periods", "=", "14", ",", "column", "=", "None", ",", "include", "=", "True", ",", "str", "=", "'{name}({period})'", ",", "detail", "=", "False", ",", "theme", "=", "None", ",", "sharing", "=", "None", ",", "filename", "=", "''", ",", "asFigure", "=", "False", ",", "*", "*", "iplot_kwargs", ")", ":", "if", "'columns'", "in", "iplot_kwargs", ":", "column", "=", "iplot_kwargs", ".", "pop", "(", "'columns'", ")", "if", "'period'", "in", "iplot_kwargs", ":", "periods", "=", "iplot_kwargs", ".", "pop", "(", "'period'", ")", "if", "'world_readable'", "in", "iplot_kwargs", ":", "sharing", "=", "iplot_kwargs", ".", "pop", "(", "'world_readable'", ")", "if", "'study_color'", "in", "iplot_kwargs", ":", "iplot_kwargs", "[", "'study_colors'", "]", "=", "iplot_kwargs", ".", "pop", "(", "'study_color'", ")", "if", "sharing", "is", "None", ":", "sharing", "=", "auth", ".", "get_config_file", "(", ")", "[", "'sharing'", "]", "if", "isinstance", "(", "sharing", ",", "bool", ")", ":", "if", "sharing", ":", "sharing", "=", "'public'", "else", ":", "sharing", "=", "'private'", "iplot_kwargs", "[", "'sharing'", "]", "=", "sharing", "if", "theme", "is", "None", ":", "theme", "=", "iplot_kwargs", ".", "pop", "(", "'study_theme'", ",", "auth", ".", "get_config_file", "(", ")", "[", "'theme'", "]", ")", "if", "not", "filename", ":", "if", "'title'", "in", "iplot_kwargs", ":", "filename", "=", "iplot_kwargs", "[", "'title'", "]", "else", ":", "filename", "=", "'Plotly Playground {0}'", ".", "format", "(", "time", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", "def", "get_subplots", "(", "figures", ")", ":", "shape", "=", "(", "len", "(", "figures", ")", ",", "1", ")", "layout", "=", "tools", ".", "get_base_layout", "(", "figures", ")", "subplots", "=", "tools", ".", "subplots", "(", "figures", ",", "shape", "=", "shape", ",", "shared_xaxes", "=", "True", ",", "base_layout", "=", "layout", ")", "if", "len", "(", "figures", ")", "==", "2", ":", "subplots", "[", "'layout'", "]", "[", "'yaxis'", "]", "[", "'domain'", "]", "=", "[", ".27", ",", "1.0", "]", "subplots", "[", "'layout'", "]", "[", "'yaxis2'", "]", "[", "'domain'", "]", "=", "[", "0", ",", ".25", "]", "return", "subplots", "def", "get_study", "(", "df", ",", "func", ",", "iplot_kwargs", ",", "iplot_study_kwargs", ",", "str", "=", "None", ",", "include", "=", "False", ",", "column", "=", "None", ",", "inset", "=", "False", ")", ":", "df", "=", "df", ".", "copy", "(", ")", "if", "inset", ":", "if", "not", "column", ":", "if", "isinstance", "(", "df", ",", "pd", ".", "DataFrame", ")", ":", "column", "=", "df", ".", "keys", "(", ")", ".", "tolist", "(", ")", "else", ":", "df", "=", "pd", ".", "DataFrame", "(", "df", ")", "column", "=", "df", ".", "keys", "(", ")", ".", "tolist", "(", ")", "if", "'legend'", "in", "iplot_kwargs", ":", "iplot_study_kwargs", "[", "'legend'", "]", "=", "iplot_kwargs", "[", "'legend'", "]", "fig_0", "=", "df", ".", "figure", "(", "*", "*", "iplot_kwargs", ")", "df_ta", "=", "func", "(", "df", ",", "column", "=", "column", ",", "include", "=", "False", ",", "str", "=", "str", ",", "*", "*", "study_kwargs", ")", "kind", "=", "iplot_kwargs", "[", "'kind'", "]", "if", "'kind'", "in", "iplot_kwargs", "else", "''", "iplot_study_kwargs", "[", "'kind'", "]", "=", "'scatter'", "iplot_study_kwargs", "[", "'colors'", "]", "=", "iplot_study_kwargs", ".", "get", "(", "'colors'", ",", "[", "'blue'", ",", "'green'", ",", "'red'", "]", "if", "study", "==", "'dmi'", "else", "'blue'", ")", "fig_1", "=", "df_ta", ".", "figure", "(", "theme", "=", "theme", ",", "*", "*", "iplot_study_kwargs", ")", "if", "kind", "in", "[", "'candle'", ",", "'ohlc'", "]", ":", "for", "i", "in", "fig_1", "[", "'data'", "]", ":", "i", "[", "'x'", "]", "=", "[", "pd", ".", "Timestamp", "(", "_", ")", "for", "_", "in", "i", "[", "'x'", "]", "]", "if", "inset", ":", "figure", "=", "tools", ".", "merge_figures", "(", "[", "fig_0", ",", "fig_1", "]", ")", "if", "include", "else", "fig_1", "else", ":", "figure", "=", "get_subplots", "(", "[", "fig_0", ",", "fig_1", "]", ")", "if", "include", "else", "fig_1", "return", "figure", "study_kwargs", "=", "{", "}", "iplot_study_kwargs", "=", "{", "}", "study_kwargs", "=", "check_kwargs", "(", "iplot_kwargs", ",", "__TA_KWARGS", ",", "{", "}", ",", "clean_origin", "=", "True", ")", "iplot_study_kwargs", "=", "kwargs_from_keyword", "(", "iplot_kwargs", ",", "{", "}", ",", "'study'", ")", "study_kwargs", ".", "update", "(", "{", "'periods'", ":", "periods", "}", ")", "ta_func", "=", "eval", "(", "'ta.{0}'", ".", "format", "(", "study", ")", ")", "inset", "=", "study", "in", "(", "'sma'", ",", "'boll'", ",", "'ema'", ",", "'atr'", ",", "'ptps'", ")", "figure", "=", "get_study", "(", "self", ",", "ta_func", ",", "iplot_kwargs", ",", "iplot_study_kwargs", ",", "include", "=", "include", ",", "column", "=", "column", ",", "str", "=", "str", ",", "inset", "=", "inset", ")", "## Add Bands", "if", "study", "in", "(", "'rsi'", ",", "'cci'", ")", ":", "bands", "=", "{", "'rsi'", ":", "(", "30", ",", "70", ")", ",", "'cci'", ":", "(", "-", "100", ",", "100", ")", "}", "_upper", "=", "study_kwargs", ".", "get", "(", "'{0}_upper'", ".", "format", "(", "study", ")", ",", "bands", "[", "study", "]", "[", "0", "]", ")", "_lower", "=", "study_kwargs", ".", "get", "(", "'{0}_lower'", ".", "format", "(", "study", ")", ",", "bands", "[", "study", "]", "[", "1", "]", ")", "yref", "=", "'y2'", "if", "include", "else", "'y1'", "shapes", "=", "[", "tools", ".", "get_shape", "(", "y", "=", "i", ",", "yref", "=", "yref", ",", "color", "=", "j", ",", "dash", "=", "'dash'", ")", "for", "(", "i", ",", "j", ")", "in", "[", "(", "_lower", ",", "'green'", ")", ",", "(", "_upper", ",", "'red'", ")", "]", "]", "figure", "[", "'layout'", "]", "[", "'shapes'", "]", "=", "shapes", "# if study=='rsi':", "# \trsi_upper=study_kwargs.get('rsi_upper',70)", "# \trsi_lower=study_kwargs.get('rsi_lower',30)", "# \tyref='y2' if include else 'y1'", "# \tshapes=[tools.get_shape(y=i,yref=yref,color=j,dash='dash') for (i,j) in [(rsi_lower,'green'),(rsi_upper,'red')]]", "# \tfigure['layout']['shapes']=shapes", "# if study=='cci':", "# \tcci_upper=study_kwargs.get('cci_upper',100)", "# \tcci_lower=study_kwargs.get('cci_lower',-100)", "# \tyref='y2' if include else 'y1'", "# \tshapes=[tools.get_shape(y=i,yref=yref,color=j,dash='dash') for (i,j) in [(cci_lower,'green'),(cci_upper,'red')]]", "# \tfigure['layout']['shapes']=shapes", "## Exports", "if", "asFigure", ":", "return", "figure", "else", ":", "return", "iplot", "(", "figure", ",", "sharing", "=", "sharing", ",", "filename", "=", "filename", ")" ]
29.772727
0.055771
def framers(self, value): """ Set the framers in use for the connection. The framer states will be reset next time their respective framer is used. """ # Handle sequence values if isinstance(value, collections.Sequence): if len(value) != 2: raise ValueError('need exactly 2 values to unpack') elif (not isinstance(value[0], framers.Framer) or not isinstance(value[1], framers.Framer)): raise ValueError("framer must be an instance of " "tendril.Framer") self._send_framer, self._recv_framer = value # If we have a single value, assume it's a framer else: if not isinstance(value, framers.Framer): raise ValueError("framer must be an instance of " "tendril.Framer") self._send_framer = value self._recv_framer = value
[ "def", "framers", "(", "self", ",", "value", ")", ":", "# Handle sequence values", "if", "isinstance", "(", "value", ",", "collections", ".", "Sequence", ")", ":", "if", "len", "(", "value", ")", "!=", "2", ":", "raise", "ValueError", "(", "'need exactly 2 values to unpack'", ")", "elif", "(", "not", "isinstance", "(", "value", "[", "0", "]", ",", "framers", ".", "Framer", ")", "or", "not", "isinstance", "(", "value", "[", "1", "]", ",", "framers", ".", "Framer", ")", ")", ":", "raise", "ValueError", "(", "\"framer must be an instance of \"", "\"tendril.Framer\"", ")", "self", ".", "_send_framer", ",", "self", ".", "_recv_framer", "=", "value", "# If we have a single value, assume it's a framer", "else", ":", "if", "not", "isinstance", "(", "value", ",", "framers", ".", "Framer", ")", ":", "raise", "ValueError", "(", "\"framer must be an instance of \"", "\"tendril.Framer\"", ")", "self", ".", "_send_framer", "=", "value", "self", ".", "_recv_framer", "=", "value" ]
38.6
0.002022
def urljoin_safe(base_url, url, allow_fragments=True): '''urljoin with warning log on error. Returns: str, None''' try: return wpull.url.urljoin( base_url, url, allow_fragments=allow_fragments ) except ValueError as error: _logger.warning(__( _('Unable to parse URL ‘{url}’: {error}.'), url=url, error=error ))
[ "def", "urljoin_safe", "(", "base_url", ",", "url", ",", "allow_fragments", "=", "True", ")", ":", "try", ":", "return", "wpull", ".", "url", ".", "urljoin", "(", "base_url", ",", "url", ",", "allow_fragments", "=", "allow_fragments", ")", "except", "ValueError", "as", "error", ":", "_logger", ".", "warning", "(", "__", "(", "_", "(", "'Unable to parse URL ‘{url}’: {error}.'),", "", "", "url", "=", "url", ",", "error", "=", "error", ")", ")" ]
27.857143
0.002481
def t_EQUAL(self, t): r"\=" t.endlexpos = t.lexpos + len(t.value) return t
[ "def", "t_EQUAL", "(", "self", ",", "t", ")", ":", "t", ".", "endlexpos", "=", "t", ".", "lexpos", "+", "len", "(", "t", ".", "value", ")", "return", "t" ]
23.75
0.020408
def exec_command(command, **kwargs): """ Executes the given command and send the output to the console :param str|list command: :kwargs: * `shell` (``bool`` = False) -- * `stdin` (``*`` = None) -- * `stdout` (``*`` = None) -- * `stderr` (``*`` = None) -- :return: CommandReturnValue """ shell = kwargs.get('shell', False) stdin = kwargs.get('stdin', None) stdout = kwargs.get('stdout', None) stderr = kwargs.get('stderr', None) kwargs.update(shell=shell) kwargs.update(stdin=stdin) kwargs.update(stdout=stdout) kwargs.update(stderr=stderr) if not isinstance(command, list): command = shlex.split(command) return_value = subprocess.call(command, **kwargs) return CommandReturnValue(return_value=return_value, stdin=stdin, stdout=stdout, stderr=stderr)
[ "def", "exec_command", "(", "command", ",", "*", "*", "kwargs", ")", ":", "shell", "=", "kwargs", ".", "get", "(", "'shell'", ",", "False", ")", "stdin", "=", "kwargs", ".", "get", "(", "'stdin'", ",", "None", ")", "stdout", "=", "kwargs", ".", "get", "(", "'stdout'", ",", "None", ")", "stderr", "=", "kwargs", ".", "get", "(", "'stderr'", ",", "None", ")", "kwargs", ".", "update", "(", "shell", "=", "shell", ")", "kwargs", ".", "update", "(", "stdin", "=", "stdin", ")", "kwargs", ".", "update", "(", "stdout", "=", "stdout", ")", "kwargs", ".", "update", "(", "stderr", "=", "stderr", ")", "if", "not", "isinstance", "(", "command", ",", "list", ")", ":", "command", "=", "shlex", ".", "split", "(", "command", ")", "return_value", "=", "subprocess", ".", "call", "(", "command", ",", "*", "*", "kwargs", ")", "return", "CommandReturnValue", "(", "return_value", "=", "return_value", ",", "stdin", "=", "stdin", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ")" ]
27.558824
0.001031
def _should_skip_travis_event(on_travis_push, on_travis_pr, on_travis_api, on_travis_cron): """Detect if the upload should be skipped based on the ``TRAVIS_EVENT_TYPE`` environment variable. Returns ------- should_skip : `bool` True if the upload should be skipped based on the combination of ``TRAVIS_EVENT_TYPE`` and user settings. """ travis_event = os.getenv('TRAVIS_EVENT_TYPE') if travis_event is None: raise click.UsageError( 'Using --travis but the TRAVIS_EVENT_TYPE ' 'environment variable is not detected.') if travis_event == 'push' and on_travis_push is False: click.echo('Skipping upload on Travis push event.') return True elif travis_event == 'pull_request' and on_travis_pr is False: click.echo('Skipping upload on Travis pull request event.') return True elif travis_event == 'api' and on_travis_api is False: click.echo('Skipping upload on Travis pull request event.') return True elif travis_event == 'cron' and on_travis_cron is False: click.echo('Skipping upload on Travis cron event.') return True else: return False
[ "def", "_should_skip_travis_event", "(", "on_travis_push", ",", "on_travis_pr", ",", "on_travis_api", ",", "on_travis_cron", ")", ":", "travis_event", "=", "os", ".", "getenv", "(", "'TRAVIS_EVENT_TYPE'", ")", "if", "travis_event", "is", "None", ":", "raise", "click", ".", "UsageError", "(", "'Using --travis but the TRAVIS_EVENT_TYPE '", "'environment variable is not detected.'", ")", "if", "travis_event", "==", "'push'", "and", "on_travis_push", "is", "False", ":", "click", ".", "echo", "(", "'Skipping upload on Travis push event.'", ")", "return", "True", "elif", "travis_event", "==", "'pull_request'", "and", "on_travis_pr", "is", "False", ":", "click", ".", "echo", "(", "'Skipping upload on Travis pull request event.'", ")", "return", "True", "elif", "travis_event", "==", "'api'", "and", "on_travis_api", "is", "False", ":", "click", ".", "echo", "(", "'Skipping upload on Travis pull request event.'", ")", "return", "True", "elif", "travis_event", "==", "'cron'", "and", "on_travis_cron", "is", "False", ":", "click", ".", "echo", "(", "'Skipping upload on Travis cron event.'", ")", "return", "True", "else", ":", "return", "False" ]
39.096774
0.000805
def pack_own(self, serializer): """ Same as pack but uses a user-defined serializer function to convert items into longstr. """ return Zframe(lib.zhashx_pack_own(self._as_parameter_, serializer), True)
[ "def", "pack_own", "(", "self", ",", "serializer", ")", ":", "return", "Zframe", "(", "lib", ".", "zhashx_pack_own", "(", "self", ".", "_as_parameter_", ",", "serializer", ")", ",", "True", ")" ]
38
0.017167
def _gauc(ref_lca, est_lca, transitive, window): '''Generalized area under the curve (GAUC) This function computes the normalized recall score for correctly ordering triples ``(q, i, j)`` where frames ``(q, i)`` are closer than ``(q, j)`` in the reference annotation. Parameters ---------- ref_lca : scipy.sparse est_lca : scipy.sparse The least common ancestor matrices for the reference and estimated annotations transitive : bool If True, then transitive comparisons are counted, meaning that ``(q, i)`` and ``(q, j)`` can differ by any number of levels. If False, then ``(q, i)`` and ``(q, j)`` can differ by exactly one level. window : number or None The maximum number of frames to consider for each query. If `None`, then all frames are considered. Returns ------- score : number [0, 1] The percentage of reference triples correctly ordered by the estimation. Raises ------ ValueError If ``ref_lca`` and ``est_lca`` have different shapes ''' # Make sure we have the right number of frames if ref_lca.shape != est_lca.shape: raise ValueError('Estimated and reference hierarchies ' 'must have the same shape.') # How many frames? n = ref_lca.shape[0] # By default, the window covers the entire track if window is None: window = n # Initialize the score score = 0.0 # Iterate over query frames num_frames = 0 for query in range(n): # Find all pairs i,j such that ref_lca[q, i] > ref_lca[q, j] results = slice(max(0, query - window), min(n, query + window)) ref_score = ref_lca[query, results] est_score = est_lca[query, results] # Densify the results ref_score = ref_score.toarray().squeeze() est_score = est_score.toarray().squeeze() # Don't count the query as a result # when query < window, query itself is the index within the slice # otherwise, query is located at the center of the slice, window # (this also holds when the slice goes off the end of the array.) idx = min(query, window) ref_score = np.concatenate((ref_score[:idx], ref_score[idx+1:])) est_score = np.concatenate((est_score[:idx], est_score[idx+1:])) inversions, normalizer = _compare_frame_rankings(ref_score, est_score, transitive=transitive) if normalizer: score += 1.0 - inversions / float(normalizer) num_frames += 1 # Normalize by the number of frames counted. # If no frames are counted, take the convention 0/0 -> 0 if num_frames: score /= float(num_frames) else: score = 0.0 return score
[ "def", "_gauc", "(", "ref_lca", ",", "est_lca", ",", "transitive", ",", "window", ")", ":", "# Make sure we have the right number of frames", "if", "ref_lca", ".", "shape", "!=", "est_lca", ".", "shape", ":", "raise", "ValueError", "(", "'Estimated and reference hierarchies '", "'must have the same shape.'", ")", "# How many frames?", "n", "=", "ref_lca", ".", "shape", "[", "0", "]", "# By default, the window covers the entire track", "if", "window", "is", "None", ":", "window", "=", "n", "# Initialize the score", "score", "=", "0.0", "# Iterate over query frames", "num_frames", "=", "0", "for", "query", "in", "range", "(", "n", ")", ":", "# Find all pairs i,j such that ref_lca[q, i] > ref_lca[q, j]", "results", "=", "slice", "(", "max", "(", "0", ",", "query", "-", "window", ")", ",", "min", "(", "n", ",", "query", "+", "window", ")", ")", "ref_score", "=", "ref_lca", "[", "query", ",", "results", "]", "est_score", "=", "est_lca", "[", "query", ",", "results", "]", "# Densify the results", "ref_score", "=", "ref_score", ".", "toarray", "(", ")", ".", "squeeze", "(", ")", "est_score", "=", "est_score", ".", "toarray", "(", ")", ".", "squeeze", "(", ")", "# Don't count the query as a result", "# when query < window, query itself is the index within the slice", "# otherwise, query is located at the center of the slice, window", "# (this also holds when the slice goes off the end of the array.)", "idx", "=", "min", "(", "query", ",", "window", ")", "ref_score", "=", "np", ".", "concatenate", "(", "(", "ref_score", "[", ":", "idx", "]", ",", "ref_score", "[", "idx", "+", "1", ":", "]", ")", ")", "est_score", "=", "np", ".", "concatenate", "(", "(", "est_score", "[", ":", "idx", "]", ",", "est_score", "[", "idx", "+", "1", ":", "]", ")", ")", "inversions", ",", "normalizer", "=", "_compare_frame_rankings", "(", "ref_score", ",", "est_score", ",", "transitive", "=", "transitive", ")", "if", "normalizer", ":", "score", "+=", "1.0", "-", "inversions", "/", "float", "(", "normalizer", ")", "num_frames", "+=", "1", "# Normalize by the number of frames counted.", "# If no frames are counted, take the convention 0/0 -> 0", "if", "num_frames", ":", "score", "/=", "float", "(", "num_frames", ")", "else", ":", "score", "=", "0.0", "return", "score" ]
30.659341
0.000347
def _match(self, **kwargs): """Method which indicates if the object matches specified criteria. Match accepts criteria as kwargs and looks them up on attributes. Actual matching is performed with fnmatch, so shell-like wildcards work within match strings. Examples: obj._match(AXTitle='Terminal*') obj._match(AXRole='TextField', AXRoleDescription='search text field') """ for k in kwargs.keys(): try: val = getattr(self, k) except _a11y.Error: return False # Not all values may be strings (e.g. size, position) if sys.version_info[:2] <= (2, 6): if isinstance(val, basestring): if not fnmatch.fnmatch(unicode(val), kwargs[k]): return False else: if val != kwargs[k]: return False elif sys.version_info[0] == 3: if isinstance(val, str): if not fnmatch.fnmatch(val, str(kwargs[k])): return False else: if val != kwargs[k]: return False else: if isinstance(val, str) or isinstance(val, unicode): if not fnmatch.fnmatch(val, kwargs[k]): return False else: if val != kwargs[k]: return False return True
[ "def", "_match", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "k", "in", "kwargs", ".", "keys", "(", ")", ":", "try", ":", "val", "=", "getattr", "(", "self", ",", "k", ")", "except", "_a11y", ".", "Error", ":", "return", "False", "# Not all values may be strings (e.g. size, position)", "if", "sys", ".", "version_info", "[", ":", "2", "]", "<=", "(", "2", ",", "6", ")", ":", "if", "isinstance", "(", "val", ",", "basestring", ")", ":", "if", "not", "fnmatch", ".", "fnmatch", "(", "unicode", "(", "val", ")", ",", "kwargs", "[", "k", "]", ")", ":", "return", "False", "else", ":", "if", "val", "!=", "kwargs", "[", "k", "]", ":", "return", "False", "elif", "sys", ".", "version_info", "[", "0", "]", "==", "3", ":", "if", "isinstance", "(", "val", ",", "str", ")", ":", "if", "not", "fnmatch", ".", "fnmatch", "(", "val", ",", "str", "(", "kwargs", "[", "k", "]", ")", ")", ":", "return", "False", "else", ":", "if", "val", "!=", "kwargs", "[", "k", "]", ":", "return", "False", "else", ":", "if", "isinstance", "(", "val", ",", "str", ")", "or", "isinstance", "(", "val", ",", "unicode", ")", ":", "if", "not", "fnmatch", ".", "fnmatch", "(", "val", ",", "kwargs", "[", "k", "]", ")", ":", "return", "False", "else", ":", "if", "val", "!=", "kwargs", "[", "k", "]", ":", "return", "False", "return", "True" ]
39.421053
0.001303
def set_charset(self, charset): """Changes the <meta> charset tag (default charset in init is UTF-8).""" self.head.charset.attr(charset=charset) return self
[ "def", "set_charset", "(", "self", ",", "charset", ")", ":", "self", ".", "head", ".", "charset", ".", "attr", "(", "charset", "=", "charset", ")", "return", "self" ]
44.25
0.016667
def main(): """ Run the CLI. """ parser = argparse.ArgumentParser( description='Search artists, lyrics, and songs!' ) parser.add_argument( 'artist', help='Specify an artist name (Default: Taylor Swift)', default='Taylor Swift', nargs='?', ) parser.add_argument( '-s', '--song', help='Given artist name, specify a song name', required=False, ) parser.add_argument( '-l', '--lyrics', help='Search for song by lyrics', required=False, ) args = parser.parse_args() if args.lyrics: song = Song.find_song(args.lyrics) else: if args.song: song = Song( title=args.song, artist=args.artist, ) else: artist = Artist(args.artist) if artist.songs: song = random.choice(artist.songs) else: print('Couldn\'t find any songs by artist {}!' .format(args.artist)) sys.exit(1) print(song.format())
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Search artists, lyrics, and songs!'", ")", "parser", ".", "add_argument", "(", "'artist'", ",", "help", "=", "'Specify an artist name (Default: Taylor Swift)'", ",", "default", "=", "'Taylor Swift'", ",", "nargs", "=", "'?'", ",", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--song'", ",", "help", "=", "'Given artist name, specify a song name'", ",", "required", "=", "False", ",", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--lyrics'", ",", "help", "=", "'Search for song by lyrics'", ",", "required", "=", "False", ",", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "lyrics", ":", "song", "=", "Song", ".", "find_song", "(", "args", ".", "lyrics", ")", "else", ":", "if", "args", ".", "song", ":", "song", "=", "Song", "(", "title", "=", "args", ".", "song", ",", "artist", "=", "args", ".", "artist", ",", ")", "else", ":", "artist", "=", "Artist", "(", "args", ".", "artist", ")", "if", "artist", ".", "songs", ":", "song", "=", "random", ".", "choice", "(", "artist", ".", "songs", ")", "else", ":", "print", "(", "'Couldn\\'t find any songs by artist {}!'", ".", "format", "(", "args", ".", "artist", ")", ")", "sys", ".", "exit", "(", "1", ")", "print", "(", "song", ".", "format", "(", ")", ")" ]
24.930233
0.000898
def normalize(self): """ Sum the values in a Counter, then create a new Counter where each new value (while keeping the original key) is equal to the original value divided by sum of all the original values (this is sometimes referred to as the normalization constant). https://en.wikipedia.org/wiki/Normalization_(statistics) """ total = sum(self.values()) stats = {k: (v / float(total)) for k, v in self.items()} return StatsCounter(stats)
[ "def", "normalize", "(", "self", ")", ":", "total", "=", "sum", "(", "self", ".", "values", "(", ")", ")", "stats", "=", "{", "k", ":", "(", "v", "/", "float", "(", "total", ")", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", "}", "return", "StatsCounter", "(", "stats", ")" ]
38
0.036403
def KeyPress(self, key, n=1, delay=0): """Press key for n times. :param key: :param n: press key for n times """ self._delay(delay) cmd = Command("KeyPress", 'KeyPress "%s", %s' % (key, n)) self.add(cmd)
[ "def", "KeyPress", "(", "self", ",", "key", ",", "n", "=", "1", ",", "delay", "=", "0", ")", ":", "self", ".", "_delay", "(", "delay", ")", "cmd", "=", "Command", "(", "\"KeyPress\"", ",", "'KeyPress \"%s\", %s'", "%", "(", "key", ",", "n", ")", ")", "self", ".", "add", "(", "cmd", ")" ]
28.888889
0.011194
def preprocess(net, image): ''' convert to Caffe input image layout ''' return np.float32(np.rollaxis(image, 2)[::-1]) - net.transformer.mean["data"]
[ "def", "preprocess", "(", "net", ",", "image", ")", ":", "return", "np", ".", "float32", "(", "np", ".", "rollaxis", "(", "image", ",", "2", ")", "[", ":", ":", "-", "1", "]", ")", "-", "net", ".", "transformer", ".", "mean", "[", "\"data\"", "]" ]
32.2
0.012121
def comments_2(self, value=None): """Corresponds to IDD Field `comments_2` Args: value (str): value for IDD Field `comments_2` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `comments_2`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `comments_2`') self._comments_2 = value
[ "def", "comments_2", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `comments_2`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `comments_2`'", ")", "self", ".", "_comments_2", "=", "value" ]
34.826087
0.00243
def set_device_name(self, newname): """ Sets internal device name. (not announced bluetooth name). requires utf-8 encoded string. """ return self.write(request.SetDeviceName(self.seq, *self.prep_str(newname)))
[ "def", "set_device_name", "(", "self", ",", "newname", ")", ":", "return", "self", ".", "write", "(", "request", ".", "SetDeviceName", "(", "self", ".", "seq", ",", "*", "self", ".", "prep_str", "(", "newname", ")", ")", ")" ]
57.5
0.012876
def get_id(opts, cache_minion_id=False): ''' Guess the id of the minion. If CONFIG_DIR/minion_id exists, use the cached minion ID from that file. If no minion id is configured, use multiple sources to find a FQDN. If no FQDN is found you may get an ip address. Returns two values: the detected ID, and a boolean value noting whether or not an IP address is being used for the ID. ''' if opts['root_dir'] is None: root_dir = salt.syspaths.ROOT_DIR else: root_dir = opts['root_dir'] config_dir = salt.syspaths.CONFIG_DIR if config_dir.startswith(salt.syspaths.ROOT_DIR): config_dir = config_dir.split(salt.syspaths.ROOT_DIR, 1)[-1] # Check for cached minion ID id_cache = os.path.join(root_dir, config_dir.lstrip(os.path.sep), 'minion_id') if opts.get('minion_id_caching', True): try: with salt.utils.files.fopen(id_cache) as idf: name = salt.utils.stringutils.to_unicode(idf.readline().strip()) bname = salt.utils.stringutils.to_bytes(name) if bname.startswith(codecs.BOM): # Remove BOM if exists name = salt.utils.stringutils.to_str(bname.replace(codecs.BOM, '', 1)) if name and name != 'localhost': log.debug('Using cached minion ID from %s: %s', id_cache, name) return name, False except (IOError, OSError): pass if '__role' in opts and opts.get('__role') == 'minion': log.debug( 'Guessing ID. The id can be explicitly set in %s', os.path.join(salt.syspaths.CONFIG_DIR, 'minion') ) if opts.get('id_function'): newid = call_id_function(opts) else: newid = salt.utils.network.generate_minion_id() if opts.get('minion_id_lowercase'): newid = newid.lower() log.debug('Changed minion id %s to lowercase.', newid) # Optionally remove one or many domains in a generated minion id if opts.get('minion_id_remove_domain'): newid = remove_domain_from_fqdn(opts, newid) if '__role' in opts and opts.get('__role') == 'minion': if opts.get('id_function'): log.debug( 'Found minion id from external function %s: %s', opts['id_function'], newid ) else: log.debug('Found minion id from generate_minion_id(): %s', newid) if cache_minion_id and opts.get('minion_id_caching', True): _cache_id(newid, id_cache) is_ipv4 = salt.utils.network.is_ipv4(newid) return newid, is_ipv4
[ "def", "get_id", "(", "opts", ",", "cache_minion_id", "=", "False", ")", ":", "if", "opts", "[", "'root_dir'", "]", "is", "None", ":", "root_dir", "=", "salt", ".", "syspaths", ".", "ROOT_DIR", "else", ":", "root_dir", "=", "opts", "[", "'root_dir'", "]", "config_dir", "=", "salt", ".", "syspaths", ".", "CONFIG_DIR", "if", "config_dir", ".", "startswith", "(", "salt", ".", "syspaths", ".", "ROOT_DIR", ")", ":", "config_dir", "=", "config_dir", ".", "split", "(", "salt", ".", "syspaths", ".", "ROOT_DIR", ",", "1", ")", "[", "-", "1", "]", "# Check for cached minion ID", "id_cache", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "config_dir", ".", "lstrip", "(", "os", ".", "path", ".", "sep", ")", ",", "'minion_id'", ")", "if", "opts", ".", "get", "(", "'minion_id_caching'", ",", "True", ")", ":", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "id_cache", ")", "as", "idf", ":", "name", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "idf", ".", "readline", "(", ")", ".", "strip", "(", ")", ")", "bname", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "name", ")", "if", "bname", ".", "startswith", "(", "codecs", ".", "BOM", ")", ":", "# Remove BOM if exists", "name", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "bname", ".", "replace", "(", "codecs", ".", "BOM", ",", "''", ",", "1", ")", ")", "if", "name", "and", "name", "!=", "'localhost'", ":", "log", ".", "debug", "(", "'Using cached minion ID from %s: %s'", ",", "id_cache", ",", "name", ")", "return", "name", ",", "False", "except", "(", "IOError", ",", "OSError", ")", ":", "pass", "if", "'__role'", "in", "opts", "and", "opts", ".", "get", "(", "'__role'", ")", "==", "'minion'", ":", "log", ".", "debug", "(", "'Guessing ID. The id can be explicitly set in %s'", ",", "os", ".", "path", ".", "join", "(", "salt", ".", "syspaths", ".", "CONFIG_DIR", ",", "'minion'", ")", ")", "if", "opts", ".", "get", "(", "'id_function'", ")", ":", "newid", "=", "call_id_function", "(", "opts", ")", "else", ":", "newid", "=", "salt", ".", "utils", ".", "network", ".", "generate_minion_id", "(", ")", "if", "opts", ".", "get", "(", "'minion_id_lowercase'", ")", ":", "newid", "=", "newid", ".", "lower", "(", ")", "log", ".", "debug", "(", "'Changed minion id %s to lowercase.'", ",", "newid", ")", "# Optionally remove one or many domains in a generated minion id", "if", "opts", ".", "get", "(", "'minion_id_remove_domain'", ")", ":", "newid", "=", "remove_domain_from_fqdn", "(", "opts", ",", "newid", ")", "if", "'__role'", "in", "opts", "and", "opts", ".", "get", "(", "'__role'", ")", "==", "'minion'", ":", "if", "opts", ".", "get", "(", "'id_function'", ")", ":", "log", ".", "debug", "(", "'Found minion id from external function %s: %s'", ",", "opts", "[", "'id_function'", "]", ",", "newid", ")", "else", ":", "log", ".", "debug", "(", "'Found minion id from generate_minion_id(): %s'", ",", "newid", ")", "if", "cache_minion_id", "and", "opts", ".", "get", "(", "'minion_id_caching'", ",", "True", ")", ":", "_cache_id", "(", "newid", ",", "id_cache", ")", "is_ipv4", "=", "salt", ".", "utils", ".", "network", ".", "is_ipv4", "(", "newid", ")", "return", "newid", ",", "is_ipv4" ]
38.294118
0.001123
def PHASE(angle, qubit): """Produces the PHASE gate:: PHASE(phi) = [[1, 0], [0, exp(1j * phi)]] This is the same as the RZ gate. :param angle: The angle to rotate around the z-axis on the bloch sphere. :param qubit: The qubit apply the gate to. :returns: A Gate object. """ return Gate(name="PHASE", params=[angle], qubits=[unpack_qubit(qubit)])
[ "def", "PHASE", "(", "angle", ",", "qubit", ")", ":", "return", "Gate", "(", "name", "=", "\"PHASE\"", ",", "params", "=", "[", "angle", "]", ",", "qubits", "=", "[", "unpack_qubit", "(", "qubit", ")", "]", ")" ]
30.307692
0.002463
def follow(self, something, follow=True): """关注用户、问题、话题或收藏夹 :param Author/Question/Topic something: 需要关注的对象 :param bool follow: True-->关注,False-->取消关注 :return: 成功返回True,失败返回False :rtype: bool """ from .question import Question from .topic import Topic from .collection import Collection if isinstance(something, Author): if something.url == self.url: return False data = { '_xsrf': something.xsrf, 'method': ' follow_member' if follow else 'unfollow_member', 'params': json.dumps({'hash_id': something.hash_id}) } res = self._session.post(Follow_Author_Url, data=data) return res.json()['r'] == 0 elif isinstance(something, Question): data = { '_xsrf': something.xsrf, 'method': 'follow_question' if follow else 'unfollow_question', 'params': json.dumps({'question_id': str(something.qid)}) } res = self._session.post(Follow_Question_Url, data=data) return res.json()['r'] == 0 elif isinstance(something, Topic): data = { '_xsrf': something.xsrf, 'method': 'follow_topic' if follow else 'unfollow_topic', 'params': json.dumps({'topic_id': something.tid}) } res = self._session.post(Follow_Topic_Url, data=data) return res.json()['r'] == 0 elif isinstance(something, Collection): data = { '_xsrf': something.xsrf, 'favlist_id': something.cid } res = self._session.post( Follow_Collection_Url if follow else Unfollow_Collection_Url, data=data) return res.json()['r'] == 0 else: raise ValueError('argument something need to be ' 'zhihu.Author, zhihu.Question' ', Zhihu.Topic or Zhihu.Collection object.')
[ "def", "follow", "(", "self", ",", "something", ",", "follow", "=", "True", ")", ":", "from", ".", "question", "import", "Question", "from", ".", "topic", "import", "Topic", "from", ".", "collection", "import", "Collection", "if", "isinstance", "(", "something", ",", "Author", ")", ":", "if", "something", ".", "url", "==", "self", ".", "url", ":", "return", "False", "data", "=", "{", "'_xsrf'", ":", "something", ".", "xsrf", ",", "'method'", ":", "'\tfollow_member'", "if", "follow", "else", "'unfollow_member'", ",", "'params'", ":", "json", ".", "dumps", "(", "{", "'hash_id'", ":", "something", ".", "hash_id", "}", ")", "}", "res", "=", "self", ".", "_session", ".", "post", "(", "Follow_Author_Url", ",", "data", "=", "data", ")", "return", "res", ".", "json", "(", ")", "[", "'r'", "]", "==", "0", "elif", "isinstance", "(", "something", ",", "Question", ")", ":", "data", "=", "{", "'_xsrf'", ":", "something", ".", "xsrf", ",", "'method'", ":", "'follow_question'", "if", "follow", "else", "'unfollow_question'", ",", "'params'", ":", "json", ".", "dumps", "(", "{", "'question_id'", ":", "str", "(", "something", ".", "qid", ")", "}", ")", "}", "res", "=", "self", ".", "_session", ".", "post", "(", "Follow_Question_Url", ",", "data", "=", "data", ")", "return", "res", ".", "json", "(", ")", "[", "'r'", "]", "==", "0", "elif", "isinstance", "(", "something", ",", "Topic", ")", ":", "data", "=", "{", "'_xsrf'", ":", "something", ".", "xsrf", ",", "'method'", ":", "'follow_topic'", "if", "follow", "else", "'unfollow_topic'", ",", "'params'", ":", "json", ".", "dumps", "(", "{", "'topic_id'", ":", "something", ".", "tid", "}", ")", "}", "res", "=", "self", ".", "_session", ".", "post", "(", "Follow_Topic_Url", ",", "data", "=", "data", ")", "return", "res", ".", "json", "(", ")", "[", "'r'", "]", "==", "0", "elif", "isinstance", "(", "something", ",", "Collection", ")", ":", "data", "=", "{", "'_xsrf'", ":", "something", ".", "xsrf", ",", "'favlist_id'", ":", "something", ".", "cid", "}", "res", "=", "self", ".", "_session", ".", "post", "(", "Follow_Collection_Url", "if", "follow", "else", "Unfollow_Collection_Url", ",", "data", "=", "data", ")", "return", "res", ".", "json", "(", ")", "[", "'r'", "]", "==", "0", "else", ":", "raise", "ValueError", "(", "'argument something need to be '", "'zhihu.Author, zhihu.Question'", "', Zhihu.Topic or Zhihu.Collection object.'", ")" ]
41.26
0.000947
def name(dtype): """Returns the string name for this `dtype`.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'name'): return dtype.name if hasattr(dtype, '__name__'): return dtype.__name__ return str(dtype)
[ "def", "name", "(", "dtype", ")", ":", "dtype", "=", "tf", ".", "as_dtype", "(", "dtype", ")", "if", "hasattr", "(", "dtype", ",", "'name'", ")", ":", "return", "dtype", ".", "name", "if", "hasattr", "(", "dtype", ",", "'__name__'", ")", ":", "return", "dtype", ".", "__name__", "return", "str", "(", "dtype", ")" ]
27.25
0.026667
def forwards(apps, schema_editor): """ Change all Movie objects into Work objects, and their associated data into WorkRole and WorkSelection models, then delete the Movie. """ Movie = apps.get_model('spectator_events', 'Movie') Work = apps.get_model('spectator_events', 'Work') WorkRole = apps.get_model('spectator_events', 'WorkRole') WorkSelection = apps.get_model('spectator_events', 'WorkSelection') for m in Movie.objects.all(): work = Work.objects.create( kind='movie', title=m.title, title_sort=m.title_sort, year=m.year, imdb_id=m.imdb_id ) for role in m.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) for selection in m.events.all(): WorkSelection.objects.create( event=selection.event, work=work, order=selection.order ) m.delete()
[ "def", "forwards", "(", "apps", ",", "schema_editor", ")", ":", "Movie", "=", "apps", ".", "get_model", "(", "'spectator_events'", ",", "'Movie'", ")", "Work", "=", "apps", ".", "get_model", "(", "'spectator_events'", ",", "'Work'", ")", "WorkRole", "=", "apps", ".", "get_model", "(", "'spectator_events'", ",", "'WorkRole'", ")", "WorkSelection", "=", "apps", ".", "get_model", "(", "'spectator_events'", ",", "'WorkSelection'", ")", "for", "m", "in", "Movie", ".", "objects", ".", "all", "(", ")", ":", "work", "=", "Work", ".", "objects", ".", "create", "(", "kind", "=", "'movie'", ",", "title", "=", "m", ".", "title", ",", "title_sort", "=", "m", ".", "title_sort", ",", "year", "=", "m", ".", "year", ",", "imdb_id", "=", "m", ".", "imdb_id", ")", "for", "role", "in", "m", ".", "roles", ".", "all", "(", ")", ":", "WorkRole", ".", "objects", ".", "create", "(", "creator", "=", "role", ".", "creator", ",", "work", "=", "work", ",", "role_name", "=", "role", ".", "role_name", ",", "role_order", "=", "role", ".", "role_order", ")", "for", "selection", "in", "m", ".", "events", ".", "all", "(", ")", ":", "WorkSelection", ".", "objects", ".", "create", "(", "event", "=", "selection", ".", "event", ",", "work", "=", "work", ",", "order", "=", "selection", ".", "order", ")", "m", ".", "delete", "(", ")" ]
30.166667
0.000892
def hook(self, name): """ Return a decorator that attaches a callback to a hook. """ def wrapper(func): self.hooks.add(name, func) return func return wrapper
[ "def", "hook", "(", "self", ",", "name", ")", ":", "def", "wrapper", "(", "func", ")", ":", "self", ".", "hooks", ".", "add", "(", "name", ",", "func", ")", "return", "func", "return", "wrapper" ]
33.333333
0.009756
def INT(cpu, op0): """ Calls to interrupt procedure. The INT n instruction generates a call to the interrupt or exception handler specified with the destination operand. The INT n instruction is the general mnemonic for executing a software-generated call to an interrupt handler. The INTO instruction is a special mnemonic for calling overflow exception (#OF), interrupt vector number 4. The overflow interrupt checks the OF flag in the EFLAGS register and calls the overflow interrupt handler if the OF flag is set to 1. :param cpu: current CPU. :param op0: destination operand. """ if op0.read() != 0x80: logger.warning("Unsupported interrupt") raise Interruption(op0.read())
[ "def", "INT", "(", "cpu", ",", "op0", ")", ":", "if", "op0", ".", "read", "(", ")", "!=", "0x80", ":", "logger", ".", "warning", "(", "\"Unsupported interrupt\"", ")", "raise", "Interruption", "(", "op0", ".", "read", "(", ")", ")" ]
45.941176
0.008783