code
stringlengths
64
7.01k
docstring
stringlengths
2
15.8k
text
stringlengths
144
19.2k
#vtb def system_generate_batch_inputs(input_params={}, always_retry=True, **kwargs): return DXHTTPRequest(, input_params, always_retry=always_retry, **kwargs)
Invokes the /system/generateBatchInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs
### Input: Invokes the /system/generateBatchInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method:-/system/generateBatchInputs ### Response: #vtb def system_generate_batch_inputs(input_params={}, always_retry=True, **kwargs): return DXHTTPRequest(, input_params, always_retry=always_retry, **kwargs)
#vtb def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): return self.get_dag().create_dagrun(run_id=run_id, state=state, execution_date=execution_date, start_date=start_date, external_trigger=external_trigger, conf=conf, session=session)
Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param start_date: the date this dag run should be evaluated :type start_date: datetime.datetime :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param session: database session :type session: sqlalchemy.orm.session.Session
### Input: Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param start_date: the date this dag run should be evaluated :type start_date: datetime.datetime :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param session: database session :type session: sqlalchemy.orm.session.Session ### Response: #vtb def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): return self.get_dag().create_dagrun(run_id=run_id, state=state, execution_date=execution_date, start_date=start_date, external_trigger=external_trigger, conf=conf, session=session)
#vtb def resolved_packages(self): if (self.status != SolverStatus.solved): return None final_phase = self.phase_stack[-1] return final_phase._get_solved_variants()
Return a list of PackageVariant objects, or None if the resolve did not complete or was unsuccessful.
### Input: Return a list of PackageVariant objects, or None if the resolve did not complete or was unsuccessful. ### Response: #vtb def resolved_packages(self): if (self.status != SolverStatus.solved): return None final_phase = self.phase_stack[-1] return final_phase._get_solved_variants()
#vtb def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False): flags = 0 if rsa_oaep_padding: flags = Advapi32Const.CRYPT_OAEP out_len = new(advapi32, , len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, null(), out_len, 0 ) handle_error(res) buffer_len = deref(out_len) buffer = buffer_from_bytes(buffer_len) write_to_buffer(buffer, data) pointer_set(out_len, len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, buffer, out_len, buffer_len ) handle_error(res) return bytes_from_buffer(buffer, deref(out_len))[::-1]
Encrypts a value using an RSA public key via CryptoAPI :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext
### Input: Encrypts a value using an RSA public key via CryptoAPI :param certificate_or_public_key: A Certificate or PublicKey instance to encrypt with :param data: A byte string of the data to encrypt :param rsa_oaep_padding: If OAEP padding should be used instead of PKCS#1 v1.5 :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the ciphertext ### Response: #vtb def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False): flags = 0 if rsa_oaep_padding: flags = Advapi32Const.CRYPT_OAEP out_len = new(advapi32, , len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, null(), out_len, 0 ) handle_error(res) buffer_len = deref(out_len) buffer = buffer_from_bytes(buffer_len) write_to_buffer(buffer, data) pointer_set(out_len, len(data)) res = advapi32.CryptEncrypt( certificate_or_public_key.ex_key_handle, null(), True, flags, buffer, out_len, buffer_len ) handle_error(res) return bytes_from_buffer(buffer, deref(out_len))[::-1]
#vtb def add(request, kind, method, *args): request.session.setdefault(_key_name(kind), []).append({ "method": method, "args": args })
add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"}) add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"])
### Input: add(request, "mixpanel", "track", "purchase", {order: "1234", amount: "100"}) add(request, "google", "push", ["_addTrans", "1234", "Gondor", "100"]) ### Response: #vtb def add(request, kind, method, *args): request.session.setdefault(_key_name(kind), []).append({ "method": method, "args": args })
#vtb def main(bot): greenlet = spawn(bot.run) try: greenlet.join() except KeyboardInterrupt: print log.info("Killed by user, disconnecting...") bot.disconnect() finally: greenlet.kill()
Entry point for the command line launcher. :param bot: the IRC bot to run :type bot: :class:`fatbotslim.irc.bot.IRC`
### Input: Entry point for the command line launcher. :param bot: the IRC bot to run :type bot: :class:`fatbotslim.irc.bot.IRC` ### Response: #vtb def main(bot): greenlet = spawn(bot.run) try: greenlet.join() except KeyboardInterrupt: print log.info("Killed by user, disconnecting...") bot.disconnect() finally: greenlet.kill()
#vtb def text(what="sentence", *args, **kwargs): if what == "character": return character(*args, **kwargs) elif what == "characters": return characters(*args, **kwargs) elif what == "word": return word(*args, **kwargs) elif what == "words": return words(*args, **kwargs) elif what == "sentence": return sentence(*args, **kwargs) elif what == "sentences": return sentences(*args, **kwargs) elif what == "paragraph": return paragraph(*args, **kwargs) elif what == "paragraphs": return paragraphs(*args, **kwargs) elif what == "title": return title(*args, **kwargs) else: raise NameError()
An aggregator for all above defined public methods.
### Input: An aggregator for all above defined public methods. ### Response: #vtb def text(what="sentence", *args, **kwargs): if what == "character": return character(*args, **kwargs) elif what == "characters": return characters(*args, **kwargs) elif what == "word": return word(*args, **kwargs) elif what == "words": return words(*args, **kwargs) elif what == "sentence": return sentence(*args, **kwargs) elif what == "sentences": return sentences(*args, **kwargs) elif what == "paragraph": return paragraph(*args, **kwargs) elif what == "paragraphs": return paragraphs(*args, **kwargs) elif what == "title": return title(*args, **kwargs) else: raise NameError()
#vtb def repair_broken_bonds(self, slab, bonds): for pair in bonds.keys(): blength = bonds[pair] cn_dict = {} for i, el in enumerate(pair): cnlist = [] for site in self.oriented_unit_cell: poly_coord = 0 if site.species_string == el: for nn in self.oriented_unit_cell.get_neighbors( site, blength): if nn[0].species_string == pair[i-1]: poly_coord += 1 cnlist.append(poly_coord) cn_dict[el] = cnlist if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]): element1, element2 = pair else: element2, element1 = pair for i, site in enumerate(slab): if site.species_string == element1: poly_coord = 0 for neighbor in slab.get_neighbors(site, blength): poly_coord += 1 if neighbor[0].species_string == element2 else 0 if poly_coord not in cn_dict[element1]: slab = self.move_to_other_side(slab, [i]) neighbors = slab.get_neighbors(slab[i], blength, include_index=True) tomove = [nn[2] for nn in neighbors if nn[0].species_string == element2] tomove.append(i) slab = self.move_to_other_side(slab, tomove) return slab
This method will find undercoordinated atoms due to slab cleaving specified by the bonds parameter and move them to the other surface to make sure the bond is kept intact. In a future release of surface.py, the ghost_sites will be used to tell us how the repair bonds should look like. Arg: slab (structure): A structure object representing a slab. bonds ({(specie1, specie2): max_bond_dist}: bonds are specified as a dict of tuples: float of specie1, specie2 and the max bonding distance. For example, PO4 groups may be defined as {("P", "O"): 3}. Returns: (Slab) A Slab object with a particular shifted oriented unit cell.
### Input: This method will find undercoordinated atoms due to slab cleaving specified by the bonds parameter and move them to the other surface to make sure the bond is kept intact. In a future release of surface.py, the ghost_sites will be used to tell us how the repair bonds should look like. Arg: slab (structure): A structure object representing a slab. bonds ({(specie1, specie2): max_bond_dist}: bonds are specified as a dict of tuples: float of specie1, specie2 and the max bonding distance. For example, PO4 groups may be defined as {("P", "O"): 3}. Returns: (Slab) A Slab object with a particular shifted oriented unit cell. ### Response: #vtb def repair_broken_bonds(self, slab, bonds): for pair in bonds.keys(): blength = bonds[pair] cn_dict = {} for i, el in enumerate(pair): cnlist = [] for site in self.oriented_unit_cell: poly_coord = 0 if site.species_string == el: for nn in self.oriented_unit_cell.get_neighbors( site, blength): if nn[0].species_string == pair[i-1]: poly_coord += 1 cnlist.append(poly_coord) cn_dict[el] = cnlist if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]): element1, element2 = pair else: element2, element1 = pair for i, site in enumerate(slab): if site.species_string == element1: poly_coord = 0 for neighbor in slab.get_neighbors(site, blength): poly_coord += 1 if neighbor[0].species_string == element2 else 0 if poly_coord not in cn_dict[element1]: slab = self.move_to_other_side(slab, [i]) neighbors = slab.get_neighbors(slab[i], blength, include_index=True) tomove = [nn[2] for nn in neighbors if nn[0].species_string == element2] tomove.append(i) slab = self.move_to_other_side(slab, tomove) return slab
#vtb def get_account(self, account): try: return self.get_account_by_cookie(account.account_cookie) except: QA_util_log_info( % account.account_cookie ) return None
check the account whether in the protfolio dict or not :param account: QA_Account :return: QA_Account if in dict None not in list
### Input: check the account whether in the protfolio dict or not :param account: QA_Account :return: QA_Account if in dict None not in list ### Response: #vtb def get_account(self, account): try: return self.get_account_by_cookie(account.account_cookie) except: QA_util_log_info( % account.account_cookie ) return None
#vtb def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads): if progress: if await_all_uploads: msg = "Completed descriptor uploads" else: msg = "At least one descriptor uploaded" try: progress(100.0, "wait_descriptor", msg) except Exception: log.err()
Internal helper. :param tor_protocol: ITorControlProtocol instance :param onion: IOnionService instance :param progress: a progess callback, or None :returns: a Deferred that fires once we've detected at least one descriptor upload for the service (as detected by listening for HS_DESC events)
### Input: Internal helper. :param tor_protocol: ITorControlProtocol instance :param onion: IOnionService instance :param progress: a progess callback, or None :returns: a Deferred that fires once we've detected at least one descriptor upload for the service (as detected by listening for HS_DESC events) ### Response: #vtb def _await_descriptor_upload(tor_protocol, onion, progress, await_all_uploads): if progress: if await_all_uploads: msg = "Completed descriptor uploads" else: msg = "At least one descriptor uploaded" try: progress(100.0, "wait_descriptor", msg) except Exception: log.err()
#vtb def eval_nonagg_call(self, exp): "helper for eval_callx; evaluator for CallX that consume a single value" args=self.eval(exp.args) if exp.f==: a,b=args return b if a is None else a elif exp.f==: return self.eval(exp.args)[0] elif exp.f in (,): return set(self.eval(exp.args.children[0]).split()) else: raise NotImplementedError(,exp.f)
helper for eval_callx; evaluator for CallX that consume a single value
### Input: helper for eval_callx; evaluator for CallX that consume a single value ### Response: #vtb def eval_nonagg_call(self, exp): "helper for eval_callx; evaluator for CallX that consume a single value" args=self.eval(exp.args) if exp.f==: a,b=args return b if a is None else a elif exp.f==: return self.eval(exp.args)[0] elif exp.f in (,): return set(self.eval(exp.args.children[0]).split()) else: raise NotImplementedError(,exp.f)
#vtb def _add_spin_magnitudes(self, structure): for idx, site in enumerate(structure): if getattr(site.specie, , None): spin = site.specie._properties.get(, None) sign = int(spin) if spin else 0 if spin: new_properties = site.specie._properties.copy() sp = str(site.specie).split(",")[0] new_properties.update({ : sign * self.mag_species_spin.get(sp, 0) }) new_specie = Specie(site.specie.symbol, getattr(site.specie, , None), new_properties) structure.replace(idx, new_specie, properties=site.properties) logger.debug(.format(str(structure))) return structure
Replaces Spin.up/Spin.down with spin magnitudes specified by mag_species_spin. :param structure: :return:
### Input: Replaces Spin.up/Spin.down with spin magnitudes specified by mag_species_spin. :param structure: :return: ### Response: #vtb def _add_spin_magnitudes(self, structure): for idx, site in enumerate(structure): if getattr(site.specie, , None): spin = site.specie._properties.get(, None) sign = int(spin) if spin else 0 if spin: new_properties = site.specie._properties.copy() sp = str(site.specie).split(",")[0] new_properties.update({ : sign * self.mag_species_spin.get(sp, 0) }) new_specie = Specie(site.specie.symbol, getattr(site.specie, , None), new_properties) structure.replace(idx, new_specie, properties=site.properties) logger.debug(.format(str(structure))) return structure
#vtb def _migrate_subresource(subresource, parent, migrations): for key, doc in getattr(parent, subresource.parent_key, {}).items(): for migration in migrations[]: instance = migration(subresource(id=key, **doc)) parent._resource[] = unicode(migration.version) instance = _migrate_subresources( instance, migrations[] ) doc = instance._resource doc.pop(, None) doc.pop(instance.resource_type + , None) getattr(parent, subresource.parent_key)[key] = doc return parent
Migrate a resource's subresource :param subresource: the perch.SubResource instance :param parent: the parent perch.Document instance :param migrations: the migrations for a resource
### Input: Migrate a resource's subresource :param subresource: the perch.SubResource instance :param parent: the parent perch.Document instance :param migrations: the migrations for a resource ### Response: #vtb def _migrate_subresource(subresource, parent, migrations): for key, doc in getattr(parent, subresource.parent_key, {}).items(): for migration in migrations[]: instance = migration(subresource(id=key, **doc)) parent._resource[] = unicode(migration.version) instance = _migrate_subresources( instance, migrations[] ) doc = instance._resource doc.pop(, None) doc.pop(instance.resource_type + , None) getattr(parent, subresource.parent_key)[key] = doc return parent
#vtb def gen_age(output, ascii_props=False, append=False, prefix=""): obj = {} all_chars = ALL_ASCII if ascii_props else ALL_CHARS with codecs.open(os.path.join(HOME, , UNIVERSION, ), , ) as uf: for line in uf: if not line.startswith(): data = line.split()[0].split() if len(data) < 2: continue span = create_span([int(i, 16) for i in data[0].strip().split()], is_bytes=ascii_props) name = format_name(data[1]) if name not in obj: obj[name] = [] if span is None: continue obj[name].extend(span) unassigned = set() for x in obj.values(): unassigned |= set(x) obj[] = list(all_chars - unassigned) for name in list(obj.keys()): s = set(obj[name]) obj[name] = sorted(s) char2range(obj, is_bytes=ascii_props) with codecs.open(output, if append else , ) as f: if not append: f.write(HEADER) f.write( % prefix) count = len(obj) - 1 i = 0 for k1, v1 in sorted(obj.items()): f.write( % (k1, v1)) if i == count: f.write() else: f.write() i += 1
Generate `age` property.
### Input: Generate `age` property. ### Response: #vtb def gen_age(output, ascii_props=False, append=False, prefix=""): obj = {} all_chars = ALL_ASCII if ascii_props else ALL_CHARS with codecs.open(os.path.join(HOME, , UNIVERSION, ), , ) as uf: for line in uf: if not line.startswith(): data = line.split()[0].split() if len(data) < 2: continue span = create_span([int(i, 16) for i in data[0].strip().split()], is_bytes=ascii_props) name = format_name(data[1]) if name not in obj: obj[name] = [] if span is None: continue obj[name].extend(span) unassigned = set() for x in obj.values(): unassigned |= set(x) obj[] = list(all_chars - unassigned) for name in list(obj.keys()): s = set(obj[name]) obj[name] = sorted(s) char2range(obj, is_bytes=ascii_props) with codecs.open(output, if append else , ) as f: if not append: f.write(HEADER) f.write( % prefix) count = len(obj) - 1 i = 0 for k1, v1 in sorted(obj.items()): f.write( % (k1, v1)) if i == count: f.write() else: f.write() i += 1
#vtb def make_success_redirect(self): new_authorization_code = AuthorizationCode.objects.create( user=self.user, client=self.client, redirect_uri=(self.redirect_uri if self.request_redirect_uri else None) ) new_authorization_code.scopes = self.valid_scope_objects new_authorization_code.save() response_params = {: new_authorization_code.value} if self.state is not None: response_params[] = self.state return HttpResponseRedirect( update_parameters(self.redirect_uri, response_params))
Return a Django ``HttpResponseRedirect`` describing the request success. The custom authorization endpoint should return the result of this method when the user grants the Client's authorization request. The request is assumed to have successfully been vetted by the :py:meth:`validate` method.
### Input: Return a Django ``HttpResponseRedirect`` describing the request success. The custom authorization endpoint should return the result of this method when the user grants the Client's authorization request. The request is assumed to have successfully been vetted by the :py:meth:`validate` method. ### Response: #vtb def make_success_redirect(self): new_authorization_code = AuthorizationCode.objects.create( user=self.user, client=self.client, redirect_uri=(self.redirect_uri if self.request_redirect_uri else None) ) new_authorization_code.scopes = self.valid_scope_objects new_authorization_code.save() response_params = {: new_authorization_code.value} if self.state is not None: response_params[] = self.state return HttpResponseRedirect( update_parameters(self.redirect_uri, response_params))
#vtb def short_dask_repr(array, show_dtype=True): chunksize = tuple(c[0] for c in array.chunks) if show_dtype: return .format( array.shape, array.dtype, chunksize) else: return .format( array.shape, chunksize)
Similar to dask.array.DataArray.__repr__, but without redundant information that's already printed by the repr function of the xarray wrapper.
### Input: Similar to dask.array.DataArray.__repr__, but without redundant information that's already printed by the repr function of the xarray wrapper. ### Response: #vtb def short_dask_repr(array, show_dtype=True): chunksize = tuple(c[0] for c in array.chunks) if show_dtype: return .format( array.shape, array.dtype, chunksize) else: return .format( array.shape, chunksize)
#vtb def add(repo, args, targetdir, execute=False, generator=False, includes=[], script=False, source=None): if not execute: files = add_files(args=args, targetdir=targetdir, source=source, script=script, generator=generator) else: files = run_executable(repo, args, includes) if files is None or len(files) == 0: return repo filtered_files = [] package = repo.package for h in files: found = False for i, r in enumerate(package[]): if h[] == r[]: found = True if h[] == r[]: change = False for attr in []: if h[attr] != r[attr]: r[attr] = h[attr] change = True if change: filtered_files.append(h) continue else: filtered_files.append(h) package[][i] = h break if not found: filtered_files.append(h) package[].append(h) if len(filtered_files) == 0: return 0 repo.manager.add_files(repo, filtered_files) rootdir = repo.rootdir with cd(rootdir): datapath = "datapackage.json" with open(datapath, ) as fd: fd.write(json.dumps(package, indent=4)) return len(filtered_files)
Add files to the repository by explicitly specifying them or by specifying a pattern over files accessed during execution of an executable. Parameters ---------- repo: Repository args: files or command line (a) If simply adding files, then the list of files that must be added (including any additional arguments to be passed to git (b) If files to be added are an output of a command line, then args is the command lined targetdir: Target directory to store the files execute: Args are not files to be added but scripts that must be run. includes: patterns used to select files to script: Is this a script? generator: Is this a generator source: Link to the original source of the data
### Input: Add files to the repository by explicitly specifying them or by specifying a pattern over files accessed during execution of an executable. Parameters ---------- repo: Repository args: files or command line (a) If simply adding files, then the list of files that must be added (including any additional arguments to be passed to git (b) If files to be added are an output of a command line, then args is the command lined targetdir: Target directory to store the files execute: Args are not files to be added but scripts that must be run. includes: patterns used to select files to script: Is this a script? generator: Is this a generator source: Link to the original source of the data ### Response: #vtb def add(repo, args, targetdir, execute=False, generator=False, includes=[], script=False, source=None): if not execute: files = add_files(args=args, targetdir=targetdir, source=source, script=script, generator=generator) else: files = run_executable(repo, args, includes) if files is None or len(files) == 0: return repo filtered_files = [] package = repo.package for h in files: found = False for i, r in enumerate(package[]): if h[] == r[]: found = True if h[] == r[]: change = False for attr in []: if h[attr] != r[attr]: r[attr] = h[attr] change = True if change: filtered_files.append(h) continue else: filtered_files.append(h) package[][i] = h break if not found: filtered_files.append(h) package[].append(h) if len(filtered_files) == 0: return 0 repo.manager.add_files(repo, filtered_files) rootdir = repo.rootdir with cd(rootdir): datapath = "datapackage.json" with open(datapath, ) as fd: fd.write(json.dumps(package, indent=4)) return len(filtered_files)
#vtb def build_ast_schema( document_ast: DocumentNode, assume_valid: bool = False, assume_valid_sdl: bool = False, ) -> GraphQLSchema: if not isinstance(document_ast, DocumentNode): raise TypeError("Must provide a Document AST.") if not (assume_valid or assume_valid_sdl): from ..validation.validate import assert_valid_sdl assert_valid_sdl(document_ast) schema_def: Optional[SchemaDefinitionNode] = None type_defs: List[TypeDefinitionNode] = [] directive_defs: List[DirectiveDefinitionNode] = [] append_directive_def = directive_defs.append for def_ in document_ast.definitions: if isinstance(def_, SchemaDefinitionNode): schema_def = def_ elif isinstance(def_, TypeDefinitionNode): def_ = cast(TypeDefinitionNode, def_) type_defs.append(def_) elif isinstance(def_, DirectiveDefinitionNode): append_directive_def(def_) def resolve_type(type_name: str) -> GraphQLNamedType: type_ = type_map.get(type_name) if not type: raise TypeError(f"Type not found in document.") return type_ ast_builder = ASTDefinitionBuilder( assume_valid=assume_valid, resolve_type=resolve_type ) type_map = {node.name.value: ast_builder.build_type(node) for node in type_defs} if schema_def: operation_types = get_operation_types(schema_def) else: operation_types = { OperationType.QUERY: "Query", OperationType.MUTATION: "Mutation", OperationType.SUBSCRIPTION: "Subscription", } directives = [ ast_builder.build_directive(directive_def) for directive_def in directive_defs ] if not any(directive.name == "skip" for directive in directives): directives.append(GraphQLSkipDirective) if not any(directive.name == "include" for directive in directives): directives.append(GraphQLIncludeDirective) if not any(directive.name == "deprecated" for directive in directives): directives.append(GraphQLDeprecatedDirective) query_type = operation_types.get(OperationType.QUERY) mutation_type = operation_types.get(OperationType.MUTATION) subscription_type = operation_types.get(OperationType.SUBSCRIPTION) return GraphQLSchema( query=cast(GraphQLObjectType, type_map.get(query_type)) if query_type else None, mutation=cast(GraphQLObjectType, type_map.get(mutation_type)) if mutation_type else None, subscription=cast(GraphQLObjectType, type_map.get(subscription_type)) if subscription_type else None, types=list(type_map.values()), directives=directives, ast_node=schema_def, assume_valid=assume_valid, )
Build a GraphQL Schema from a given AST. This takes the ast of a schema document produced by the parse function in src/language/parser.py. If no schema definition is provided, then it will look for types named Query and Mutation. Given that AST it constructs a GraphQLSchema. The resulting schema has no resolve methods, so execution will use default resolvers. When building a schema from a GraphQL service's introspection result, it might be safe to assume the schema is valid. Set `assume_valid` to True to assume the produced schema is valid. Set `assume_valid_sdl` to True to assume it is already a valid SDL document.
### Input: Build a GraphQL Schema from a given AST. This takes the ast of a schema document produced by the parse function in src/language/parser.py. If no schema definition is provided, then it will look for types named Query and Mutation. Given that AST it constructs a GraphQLSchema. The resulting schema has no resolve methods, so execution will use default resolvers. When building a schema from a GraphQL service's introspection result, it might be safe to assume the schema is valid. Set `assume_valid` to True to assume the produced schema is valid. Set `assume_valid_sdl` to True to assume it is already a valid SDL document. ### Response: #vtb def build_ast_schema( document_ast: DocumentNode, assume_valid: bool = False, assume_valid_sdl: bool = False, ) -> GraphQLSchema: if not isinstance(document_ast, DocumentNode): raise TypeError("Must provide a Document AST.") if not (assume_valid or assume_valid_sdl): from ..validation.validate import assert_valid_sdl assert_valid_sdl(document_ast) schema_def: Optional[SchemaDefinitionNode] = None type_defs: List[TypeDefinitionNode] = [] directive_defs: List[DirectiveDefinitionNode] = [] append_directive_def = directive_defs.append for def_ in document_ast.definitions: if isinstance(def_, SchemaDefinitionNode): schema_def = def_ elif isinstance(def_, TypeDefinitionNode): def_ = cast(TypeDefinitionNode, def_) type_defs.append(def_) elif isinstance(def_, DirectiveDefinitionNode): append_directive_def(def_) def resolve_type(type_name: str) -> GraphQLNamedType: type_ = type_map.get(type_name) if not type: raise TypeError(f"Type not found in document.") return type_ ast_builder = ASTDefinitionBuilder( assume_valid=assume_valid, resolve_type=resolve_type ) type_map = {node.name.value: ast_builder.build_type(node) for node in type_defs} if schema_def: operation_types = get_operation_types(schema_def) else: operation_types = { OperationType.QUERY: "Query", OperationType.MUTATION: "Mutation", OperationType.SUBSCRIPTION: "Subscription", } directives = [ ast_builder.build_directive(directive_def) for directive_def in directive_defs ] if not any(directive.name == "skip" for directive in directives): directives.append(GraphQLSkipDirective) if not any(directive.name == "include" for directive in directives): directives.append(GraphQLIncludeDirective) if not any(directive.name == "deprecated" for directive in directives): directives.append(GraphQLDeprecatedDirective) query_type = operation_types.get(OperationType.QUERY) mutation_type = operation_types.get(OperationType.MUTATION) subscription_type = operation_types.get(OperationType.SUBSCRIPTION) return GraphQLSchema( query=cast(GraphQLObjectType, type_map.get(query_type)) if query_type else None, mutation=cast(GraphQLObjectType, type_map.get(mutation_type)) if mutation_type else None, subscription=cast(GraphQLObjectType, type_map.get(subscription_type)) if subscription_type else None, types=list(type_map.values()), directives=directives, ast_node=schema_def, assume_valid=assume_valid, )
#vtb def variable_declaration(self): self._process(Nature.LET) node = VariableDeclaration(assignment=self.assignment()) self._process(Nature.SEMI) return node
variable_declaration: 'let' assignment ';'
### Input: variable_declaration: 'let' assignment ';' ### Response: #vtb def variable_declaration(self): self._process(Nature.LET) node = VariableDeclaration(assignment=self.assignment()) self._process(Nature.SEMI) return node
#vtb def DEBUG(msg, *args, **kwargs): logger = getLogger("DEBUG") if len(logger.handlers) == 0: logger.addHandler(StreamHandler()) logger.propagate = False logger.setLevel(logging.DEBUG) logger.DEV(msg, *args, **kwargs)
temporary logger during development that is always on
### Input: temporary logger during development that is always on ### Response: #vtb def DEBUG(msg, *args, **kwargs): logger = getLogger("DEBUG") if len(logger.handlers) == 0: logger.addHandler(StreamHandler()) logger.propagate = False logger.setLevel(logging.DEBUG) logger.DEV(msg, *args, **kwargs)
#vtb def AddProperty(self, interface, name, value): s main interface (as specified on construction). name: Property name. value: Property value. property %s already exists.PropertyExistst do if not (isinstance(value, dbus.Dictionary) or isinstance(value, dbus.Array)): value = copy.copy(value) self.props.setdefault(interface, {})[name] = value
Add property to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the property to the object's main interface (as specified on construction). name: Property name. value: Property value.
### Input: Add property to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the property to the object's main interface (as specified on construction). name: Property name. value: Property value. ### Response: #vtb def AddProperty(self, interface, name, value): s main interface (as specified on construction). name: Property name. value: Property value. property %s already exists.PropertyExistst do if not (isinstance(value, dbus.Dictionary) or isinstance(value, dbus.Array)): value = copy.copy(value) self.props.setdefault(interface, {})[name] = value
#vtb def ensure_table_strings(table): for row in range(len(table)): for column in range(len(table[row])): table[row][column] = str(table[row][column]) return table
Force each cell in the table to be a string Parameters ---------- table : list of lists Returns ------- table : list of lists of str
### Input: Force each cell in the table to be a string Parameters ---------- table : list of lists Returns ------- table : list of lists of str ### Response: #vtb def ensure_table_strings(table): for row in range(len(table)): for column in range(len(table[row])): table[row][column] = str(table[row][column]) return table
#vtb def verbose(self): log = copy.copy(self) log._is_verbose = True return log
Make it the verbose log. A verbose log can be only shown when user want to see more logs. It works as:: log.verbose.warn('this is a verbose warn') log.verbose.info('this is a verbose info')
### Input: Make it the verbose log. A verbose log can be only shown when user want to see more logs. It works as:: log.verbose.warn('this is a verbose warn') log.verbose.info('this is a verbose info') ### Response: #vtb def verbose(self): log = copy.copy(self) log._is_verbose = True return log
#vtb def GET_AUTH(self, courseid, taskid): if not id_checker(taskid): raise Exception("Invalid task id") self.get_course_and_check_rights(courseid, allow_all_staff=False) request = web.input() if request.get("action") == "download" and request.get() is not None: return self.action_download(courseid, taskid, request.get()) elif request.get("action") == "delete" and request.get() is not None: return self.action_delete(courseid, taskid, request.get()) elif request.get("action") == "rename" and request.get() is not None and request.get() is not None: return self.action_rename(courseid, taskid, request.get(), request.get()) elif request.get("action") == "create" and request.get() is not None: return self.action_create(courseid, taskid, request.get()) elif request.get("action") == "edit" and request.get() is not None: return self.action_edit(courseid, taskid, request.get()) else: return self.show_tab_file(courseid, taskid)
Edit a task
### Input: Edit a task ### Response: #vtb def GET_AUTH(self, courseid, taskid): if not id_checker(taskid): raise Exception("Invalid task id") self.get_course_and_check_rights(courseid, allow_all_staff=False) request = web.input() if request.get("action") == "download" and request.get() is not None: return self.action_download(courseid, taskid, request.get()) elif request.get("action") == "delete" and request.get() is not None: return self.action_delete(courseid, taskid, request.get()) elif request.get("action") == "rename" and request.get() is not None and request.get() is not None: return self.action_rename(courseid, taskid, request.get(), request.get()) elif request.get("action") == "create" and request.get() is not None: return self.action_create(courseid, taskid, request.get()) elif request.get("action") == "edit" and request.get() is not None: return self.action_edit(courseid, taskid, request.get()) else: return self.show_tab_file(courseid, taskid)
#vtb def latitude(self, dms: bool = False) -> Union[str, float]: return self._get_fs(, dms)
Generate a random value of latitude. :param dms: DMS format. :return: Value of longitude.
### Input: Generate a random value of latitude. :param dms: DMS format. :return: Value of longitude. ### Response: #vtb def latitude(self, dms: bool = False) -> Union[str, float]: return self._get_fs(, dms)
#vtb def parse_package_string(path): parts = path.split() if parts[-1][0].isupper(): return ".".join(parts[:-1]), parts[-1] return path, ""
Parse the effect package string. Can contain the package python path or path to effect class in an effect package. Examples:: # Path to effect pacakge examples.cubes # Path to effect class examples.cubes.Cubes Args: path: python path to effect package. May also include effect class name. Returns: tuple: (package_path, effect_class)
### Input: Parse the effect package string. Can contain the package python path or path to effect class in an effect package. Examples:: # Path to effect pacakge examples.cubes # Path to effect class examples.cubes.Cubes Args: path: python path to effect package. May also include effect class name. Returns: tuple: (package_path, effect_class) ### Response: #vtb def parse_package_string(path): parts = path.split() if parts[-1][0].isupper(): return ".".join(parts[:-1]), parts[-1] return path, ""
#vtb def visit_pass(self, node, parent): return nodes.Pass(node.lineno, node.col_offset, parent)
visit a Pass node by returning a fresh instance of it
### Input: visit a Pass node by returning a fresh instance of it ### Response: #vtb def visit_pass(self, node, parent): return nodes.Pass(node.lineno, node.col_offset, parent)
#vtb def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
Remove overlaps in UFOs' glyphs' contours.
### Input: Remove overlaps in UFOs' glyphs' contours. ### Response: #vtb def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
#vtb def run(arguments: List[str], execution_directory: str=None, execution_environment: Dict=None) -> str: process = subprocess.Popen( arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=execution_directory, env=execution_environment) out, error = process.communicate() stdout = out.decode(_DATA_ENCODING).rstrip() if process.returncode == _SUCCESS_RETURN_CODE: return stdout else: raise RunException(stdout, error.decode(_DATA_ENCODING).rstrip(), arguments, execution_directory)
Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory). :param arguments: the CLI arguments to run :param execution_directory: the directory to execute the arguments in :param execution_environment: the environment to execute in :return: what is written to stdout following execution :exception RunException: called if the execution has a non-zero return code
### Input: Runs the given arguments from the given directory (if given, else resorts to the (undefined) current directory). :param arguments: the CLI arguments to run :param execution_directory: the directory to execute the arguments in :param execution_environment: the environment to execute in :return: what is written to stdout following execution :exception RunException: called if the execution has a non-zero return code ### Response: #vtb def run(arguments: List[str], execution_directory: str=None, execution_environment: Dict=None) -> str: process = subprocess.Popen( arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=execution_directory, env=execution_environment) out, error = process.communicate() stdout = out.decode(_DATA_ENCODING).rstrip() if process.returncode == _SUCCESS_RETURN_CODE: return stdout else: raise RunException(stdout, error.decode(_DATA_ENCODING).rstrip(), arguments, execution_directory)
#vtb def get_pmap_from_nrml(oqparam, fname): hcurves_by_imt = {} oqparam.hazard_imtls = imtls = {} for hcurves in nrml.read(fname): imt = hcurves[] oqparam.investigation_time = hcurves[] if imt == : imt += % hcurves[] imtls[imt] = ~hcurves.IMLs data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:]) hcurves_by_imt[imt] = numpy.array([d[1] for d in data]) lons, lats = [], [] for xy, poes in data: lons.append(xy[0]) lats.append(xy[1]) mesh = geo.Mesh(numpy.array(lons), numpy.array(lats)) num_levels = sum(len(v) for v in imtls.values()) array = numpy.zeros((len(mesh), num_levels)) imtls = DictArray(imtls) for imt_ in hcurves_by_imt: array[:, imtls(imt_)] = hcurves_by_imt[imt_] return mesh, ProbabilityMap.from_array(array, range(len(mesh)))
:param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: an XML file containing hazard curves :returns: site mesh, curve array
### Input: :param oqparam: an :class:`openquake.commonlib.oqvalidation.OqParam` instance :param fname: an XML file containing hazard curves :returns: site mesh, curve array ### Response: #vtb def get_pmap_from_nrml(oqparam, fname): hcurves_by_imt = {} oqparam.hazard_imtls = imtls = {} for hcurves in nrml.read(fname): imt = hcurves[] oqparam.investigation_time = hcurves[] if imt == : imt += % hcurves[] imtls[imt] = ~hcurves.IMLs data = sorted((~node.Point.pos, ~node.poEs) for node in hcurves[1:]) hcurves_by_imt[imt] = numpy.array([d[1] for d in data]) lons, lats = [], [] for xy, poes in data: lons.append(xy[0]) lats.append(xy[1]) mesh = geo.Mesh(numpy.array(lons), numpy.array(lats)) num_levels = sum(len(v) for v in imtls.values()) array = numpy.zeros((len(mesh), num_levels)) imtls = DictArray(imtls) for imt_ in hcurves_by_imt: array[:, imtls(imt_)] = hcurves_by_imt[imt_] return mesh, ProbabilityMap.from_array(array, range(len(mesh)))
#vtb def tell(self): pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos))) return pos.value
Returns the current position of read head.
### Input: Returns the current position of read head. ### Response: #vtb def tell(self): pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos))) return pos.value
#vtb def _add_post_data(self, request: Request): if self._item_session.url_record.post_data: data = wpull.string.to_bytes(self._item_session.url_record.post_data) else: data = wpull.string.to_bytes( self._processor.fetch_params.post_data ) request.method = request.fields[] = request.fields[] = str(len(data)) _logger.debug(, data) if not request.body: request.body = Body(io.BytesIO()) with wpull.util.reset_file_offset(request.body): request.body.write(data)
Add data to the payload.
### Input: Add data to the payload. ### Response: #vtb def _add_post_data(self, request: Request): if self._item_session.url_record.post_data: data = wpull.string.to_bytes(self._item_session.url_record.post_data) else: data = wpull.string.to_bytes( self._processor.fetch_params.post_data ) request.method = request.fields[] = request.fields[] = str(len(data)) _logger.debug(, data) if not request.body: request.body = Body(io.BytesIO()) with wpull.util.reset_file_offset(request.body): request.body.write(data)
#vtb def scan_processes_fast(self): new_pids = set( win32.EnumProcesses() ) old_pids = set( compat.iterkeys(self.__processDict) ) our_pid = win32.GetCurrentProcessId() if our_pid in new_pids: new_pids.remove(our_pid) if our_pid in old_pids: old_pids.remove(our_pid) for pid in new_pids.difference(old_pids): self._add_process( Process(pid) ) for pid in old_pids.difference(new_pids): self._del_process(pid)
Populates the snapshot with running processes. Only the PID is retrieved for each process. Dead processes are removed. Threads and modules of living processes are ignored. Tipically you don't need to call this method directly, if unsure use L{scan} instead. @note: This method uses the PSAPI. It may be faster for scanning, but some information may be missing, outdated or slower to obtain. This could be a good tradeoff under some circumstances.
### Input: Populates the snapshot with running processes. Only the PID is retrieved for each process. Dead processes are removed. Threads and modules of living processes are ignored. Tipically you don't need to call this method directly, if unsure use L{scan} instead. @note: This method uses the PSAPI. It may be faster for scanning, but some information may be missing, outdated or slower to obtain. This could be a good tradeoff under some circumstances. ### Response: #vtb def scan_processes_fast(self): new_pids = set( win32.EnumProcesses() ) old_pids = set( compat.iterkeys(self.__processDict) ) our_pid = win32.GetCurrentProcessId() if our_pid in new_pids: new_pids.remove(our_pid) if our_pid in old_pids: old_pids.remove(our_pid) for pid in new_pids.difference(old_pids): self._add_process( Process(pid) ) for pid in old_pids.difference(new_pids): self._del_process(pid)
#vtb def get_uvec(vec): l = np.linalg.norm(vec) if l < 1e-8: return vec return vec / l
Gets a unit vector parallel to input vector
### Input: Gets a unit vector parallel to input vector ### Response: #vtb def get_uvec(vec): l = np.linalg.norm(vec) if l < 1e-8: return vec return vec / l
#vtb def add_ne(self, ne): ne_id = self.get_element_id(ne) ne_label = +ne.attrib[] self.add_node(ne_id, layers={self.ns, self.ns+}, attr_dict=self.element_attribs_to_dict(ne), label=ne_label) for child in ne.iterchildren(): child_id = self.get_element_id(child) self.add_edge(ne_id, child_id, layers={self.ns, self.ns+}, edge_type=dg.EdgeTypes.spanning_relation, label=ne_label)
Parameters ---------- ne : etree.Element etree representation of a <ne> element (marks a text span -- (one or more <node> or <word> elements) as a named entity) Example ------- <ne xml:id="ne_23" type="PER"> <word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/> <word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/> </ne>
### Input: Parameters ---------- ne : etree.Element etree representation of a <ne> element (marks a text span -- (one or more <node> or <word> elements) as a named entity) Example ------- <ne xml:id="ne_23" type="PER"> <word xml:id="s3_2" form="Ute" pos="NE" morph="nsf" lemma="Ute" func="-" parent="s3_501" dephead="s3_1" deprel="APP"/> <word xml:id="s3_3" form="Wedemeier" pos="NE" morph="nsf" lemma="Wedemeier" func="-" parent="s3_501" dephead="s3_2" deprel="APP"/> </ne> ### Response: #vtb def add_ne(self, ne): ne_id = self.get_element_id(ne) ne_label = +ne.attrib[] self.add_node(ne_id, layers={self.ns, self.ns+}, attr_dict=self.element_attribs_to_dict(ne), label=ne_label) for child in ne.iterchildren(): child_id = self.get_element_id(child) self.add_edge(ne_id, child_id, layers={self.ns, self.ns+}, edge_type=dg.EdgeTypes.spanning_relation, label=ne_label)
#vtb def delete(self, id, **kwargs): kwargs[] = True if kwargs.get(): return self.delete_with_http_info(id, **kwargs) else: (data) = self.delete_with_http_info(id, **kwargs) return data
Deletes an existing License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: None If the method is called asynchronously, returns the request thread.
### Input: Deletes an existing License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: None If the method is called asynchronously, returns the request thread. ### Response: #vtb def delete(self, id, **kwargs): kwargs[] = True if kwargs.get(): return self.delete_with_http_info(id, **kwargs) else: (data) = self.delete_with_http_info(id, **kwargs) return data
#vtb def _in_version(self, *versions): "Returns true if this frame is in any of the specified versions of ID3." for version in versions: if (self._version == version or (isinstance(self._version, collections.Container) and version in self._version)): return True return False
Returns true if this frame is in any of the specified versions of ID3.
### Input: Returns true if this frame is in any of the specified versions of ID3. ### Response: #vtb def _in_version(self, *versions): "Returns true if this frame is in any of the specified versions of ID3." for version in versions: if (self._version == version or (isinstance(self._version, collections.Container) and version in self._version)): return True return False
#vtb def generous_parse_uri(uri): parse_result = urlparse(uri) if parse_result.scheme == : abspath = os.path.abspath(parse_result.path) if IS_WINDOWS: abspath = windows_to_unix_path(abspath) fixed_uri = "file://{}".format(abspath) parse_result = urlparse(fixed_uri) return parse_result
Return a urlparse.ParseResult object with the results of parsing the given URI. This has the same properties as the result of parse_uri. When passed a relative path, it determines the absolute path, sets the scheme to file, the netloc to localhost and returns a parse of the result.
### Input: Return a urlparse.ParseResult object with the results of parsing the given URI. This has the same properties as the result of parse_uri. When passed a relative path, it determines the absolute path, sets the scheme to file, the netloc to localhost and returns a parse of the result. ### Response: #vtb def generous_parse_uri(uri): parse_result = urlparse(uri) if parse_result.scheme == : abspath = os.path.abspath(parse_result.path) if IS_WINDOWS: abspath = windows_to_unix_path(abspath) fixed_uri = "file://{}".format(abspath) parse_result = urlparse(fixed_uri) return parse_result
#vtb def reconstruct_interval(experiment_id): start, end = map(lambda x: udatetime.utcfromtimestamp(x / 1000.0), map(float, experiment_id.split("-"))) from ..time_interval import TimeInterval return TimeInterval(start, end)
Reverse the construct_experiment_id operation :param experiment_id: The experiment id :return: time interval
### Input: Reverse the construct_experiment_id operation :param experiment_id: The experiment id :return: time interval ### Response: #vtb def reconstruct_interval(experiment_id): start, end = map(lambda x: udatetime.utcfromtimestamp(x / 1000.0), map(float, experiment_id.split("-"))) from ..time_interval import TimeInterval return TimeInterval(start, end)
#vtb def html(theme_name=): os.environ[] = theme(theme_name) api() man() clean() local("cd docs; make html") local("fab security.check") local("touch docs/build/html/.nojekyll")
build the doc locally and view
### Input: build the doc locally and view ### Response: #vtb def html(theme_name=): os.environ[] = theme(theme_name) api() man() clean() local("cd docs; make html") local("fab security.check") local("touch docs/build/html/.nojekyll")
#vtb def bulk_call(self, call_params): path = + self.api_version + method = return self.request(path, method, call_params)
REST BulkCalls Helper
### Input: REST BulkCalls Helper ### Response: #vtb def bulk_call(self, call_params): path = + self.api_version + method = return self.request(path, method, call_params)
#vtb def predict(self, X): val = numpy.dot(X, self.coef_) if hasattr(self, "intercept_"): val += self.intercept_ if self.rank_ratio == 1: val *= -1 else: val = numpy.exp(val) return val
Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks.
### Input: Rank samples according to survival times Lower ranks indicate shorter survival, higher ranks longer survival. Parameters ---------- X : array-like, shape = (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape = (n_samples,) Predicted ranks. ### Response: #vtb def predict(self, X): val = numpy.dot(X, self.coef_) if hasattr(self, "intercept_"): val += self.intercept_ if self.rank_ratio == 1: val *= -1 else: val = numpy.exp(val) return val
#vtb def items(self): pipe = self.conn.pipeline() pipe.lrange(Q_STORAGE_ITEMS, 0, -1) pipe.ltrim(Q_STORAGE_ITEMS, 1, 0) items = pipe.execute()[0] for item in items: item = pickle.loads(item) yield item
Get the items fetched by the jobs.
### Input: Get the items fetched by the jobs. ### Response: #vtb def items(self): pipe = self.conn.pipeline() pipe.lrange(Q_STORAGE_ITEMS, 0, -1) pipe.ltrim(Q_STORAGE_ITEMS, 1, 0) items = pipe.execute()[0] for item in items: item = pickle.loads(item) yield item
#vtb async def dump_blob(elem, elem_type=None): elem_is_blob = isinstance(elem, x.BlobType) data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem if data is None or len(data) == 0: return b if isinstance(data, (bytes, bytearray, list)): return base64.b16encode(bytes(data)) else: raise ValueError()
Dumps blob message. Supports both blob and raw value. :param writer: :param elem: :param elem_type: :param params: :return:
### Input: Dumps blob message. Supports both blob and raw value. :param writer: :param elem: :param elem_type: :param params: :return: ### Response: #vtb async def dump_blob(elem, elem_type=None): elem_is_blob = isinstance(elem, x.BlobType) data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem if data is None or len(data) == 0: return b if isinstance(data, (bytes, bytearray, list)): return base64.b16encode(bytes(data)) else: raise ValueError()
#vtb def from_shapefile(cls, shapefile, *args, **kwargs): reader = Reader(shapefile) return cls.from_records(reader.records(), *args, **kwargs)
Loads a shapefile from disk and optionally merges it with a dataset. See ``from_records`` for full signature. Parameters ---------- records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries
### Input: Loads a shapefile from disk and optionally merges it with a dataset. See ``from_records`` for full signature. Parameters ---------- records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries ### Response: #vtb def from_shapefile(cls, shapefile, *args, **kwargs): reader = Reader(shapefile) return cls.from_records(reader.records(), *args, **kwargs)
#vtb def similar(self, **kwargs): path = self._get_id_path() response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the similar TV series for a specific TV series id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any TV method. Returns: A dict respresentation of the JSON returned from the API.
### Input: Get the similar TV series for a specific TV series id. Args: page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any TV method. Returns: A dict respresentation of the JSON returned from the API. ### Response: #vtb def similar(self, **kwargs): path = self._get_id_path() response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
#vtb def _apply_dvs_capability(capability_spec, capability_dict): if in capability_dict: capability_spec.dvsOperationSupported = \ capability_dict[] if in capability_dict: capability_spec.dvPortOperationSupported = \ capability_dict[] if in capability_dict: capability_spec.dvPortGroupOperationSupported = \ capability_dict[]
Applies the values of the capability_dict dictionary to a DVS capability object (vim.vim.DVSCapability)
### Input: Applies the values of the capability_dict dictionary to a DVS capability object (vim.vim.DVSCapability) ### Response: #vtb def _apply_dvs_capability(capability_spec, capability_dict): if in capability_dict: capability_spec.dvsOperationSupported = \ capability_dict[] if in capability_dict: capability_spec.dvPortOperationSupported = \ capability_dict[] if in capability_dict: capability_spec.dvPortGroupOperationSupported = \ capability_dict[]
#vtb def parse_manifest(path_to_manifest): bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)" fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)" samples = [] with open(path_to_manifest, ) as f: for line in f.readlines(): line = line.strip() if line.startswith(): continue bam_match = re.match(bam_re, line) fastq_match = re.match(fq_re, line) if bam_match: uuid = bam_match.group() url = bam_match.group() paired_url = None rg_line = None require( in url.lower(), .format(uuid, url)) elif fastq_match: uuid = fastq_match.group() url = fastq_match.group() paired_url = fastq_match.group() rg_line = fastq_match.group() require( in url.lower() or in url.lower(), .format(uuid, url)) else: raise ValueError( % (f.name, line)) require(urlparse(url).scheme, .format(url)) samples.append(GermlineSample(uuid, url, paired_url, rg_line)) return samples
Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample]
### Input: Parses manifest file for Toil Germline Pipeline :param str path_to_manifest: Path to sample manifest file :return: List of GermlineSample namedtuples :rtype: list[GermlineSample] ### Response: #vtb def parse_manifest(path_to_manifest): bam_re = r"^(?P<uuid>\S+)\s(?P<url>\S+[bsc][r]?am)" fq_re = r"^(?P<uuid>\S+)\s(?P<url>\S+)\s(?P<paired_url>\S+)?\s?(?P<rg_line>@RG\S+)" samples = [] with open(path_to_manifest, ) as f: for line in f.readlines(): line = line.strip() if line.startswith(): continue bam_match = re.match(bam_re, line) fastq_match = re.match(fq_re, line) if bam_match: uuid = bam_match.group() url = bam_match.group() paired_url = None rg_line = None require( in url.lower(), .format(uuid, url)) elif fastq_match: uuid = fastq_match.group() url = fastq_match.group() paired_url = fastq_match.group() rg_line = fastq_match.group() require( in url.lower() or in url.lower(), .format(uuid, url)) else: raise ValueError( % (f.name, line)) require(urlparse(url).scheme, .format(url)) samples.append(GermlineSample(uuid, url, paired_url, rg_line)) return samples
#vtb def rnd_date(start=date(1970, 1, 1), end=None, **kwargs): if end is None: end = date.today() start = parser.parse_date(start) end = parser.parse_date(end) _assert_correct_start_end(start, end) return _rnd_date(start, end)
Generate a random date between ``start`` to ``end``. :param start: Left bound :type start: string or datetime.date, (default date(1970, 1, 1)) :param end: Right bound :type end: string or datetime.date, (default date.today()) :return: a datetime.date object **中文文档** 随机生成一个位于 ``start`` 和 ``end`` 之间的日期。
### Input: Generate a random date between ``start`` to ``end``. :param start: Left bound :type start: string or datetime.date, (default date(1970, 1, 1)) :param end: Right bound :type end: string or datetime.date, (default date.today()) :return: a datetime.date object **中文文档** 随机生成一个位于 ``start`` 和 ``end`` 之间的日期。 ### Response: #vtb def rnd_date(start=date(1970, 1, 1), end=None, **kwargs): if end is None: end = date.today() start = parser.parse_date(start) end = parser.parse_date(end) _assert_correct_start_end(start, end) return _rnd_date(start, end)
#vtb def close(self): self._execute_plugin_hooks_sync(hook=) if not self.session.closed: ensure_future(self.session.close(), loop=self.loop)
Close service client and its plugins.
### Input: Close service client and its plugins. ### Response: #vtb def close(self): self._execute_plugin_hooks_sync(hook=) if not self.session.closed: ensure_future(self.session.close(), loop=self.loop)
#vtb def xcom_pull( self, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=False): if dag_id is None: dag_id = self.dag_id pull_fn = functools.partial( XCom.get_one, execution_date=self.execution_date, key=key, dag_id=dag_id, include_prior_dates=include_prior_dates) if is_container(task_ids): return tuple(pull_fn(task_id=t) for t in task_ids) else: return pull_fn(task_id=task_ids)
Pull XComs that optionally meet certain criteria. The default value for `key` limits the search to XComs that were returned by other tasks (as opposed to those that were pushed manually). To remove this filter, pass key=None (or any desired value). If a single task_id string is provided, the result is the value of the most recent matching XCom from that task_id. If multiple task_ids are provided, a tuple of matching values is returned. None is returned whenever no matches are found. :param key: A key for the XCom. If provided, only XComs with matching keys will be returned. The default key is 'return_value', also available as a constant XCOM_RETURN_KEY. This key is automatically given to XComs returned by tasks (as opposed to being pushed manually). To remove the filter, pass key=None. :type key: str :param task_ids: Only XComs from tasks with matching ids will be pulled. Can pass None to remove the filter. :type task_ids: str or iterable of strings (representing task_ids) :param dag_id: If provided, only pulls XComs from this DAG. If None (default), the DAG of the calling task is used. :type dag_id: str :param include_prior_dates: If False, only XComs from the current execution_date are returned. If True, XComs from previous dates are returned as well. :type include_prior_dates: bool
### Input: Pull XComs that optionally meet certain criteria. The default value for `key` limits the search to XComs that were returned by other tasks (as opposed to those that were pushed manually). To remove this filter, pass key=None (or any desired value). If a single task_id string is provided, the result is the value of the most recent matching XCom from that task_id. If multiple task_ids are provided, a tuple of matching values is returned. None is returned whenever no matches are found. :param key: A key for the XCom. If provided, only XComs with matching keys will be returned. The default key is 'return_value', also available as a constant XCOM_RETURN_KEY. This key is automatically given to XComs returned by tasks (as opposed to being pushed manually). To remove the filter, pass key=None. :type key: str :param task_ids: Only XComs from tasks with matching ids will be pulled. Can pass None to remove the filter. :type task_ids: str or iterable of strings (representing task_ids) :param dag_id: If provided, only pulls XComs from this DAG. If None (default), the DAG of the calling task is used. :type dag_id: str :param include_prior_dates: If False, only XComs from the current execution_date are returned. If True, XComs from previous dates are returned as well. :type include_prior_dates: bool ### Response: #vtb def xcom_pull( self, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=False): if dag_id is None: dag_id = self.dag_id pull_fn = functools.partial( XCom.get_one, execution_date=self.execution_date, key=key, dag_id=dag_id, include_prior_dates=include_prior_dates) if is_container(task_ids): return tuple(pull_fn(task_id=t) for t in task_ids) else: return pull_fn(task_id=task_ids)
#vtb def getAnalystName(self): mtool = getToolByName(self, ) analyst = self.getAnalyst().strip() analyst_member = mtool.getMemberById(analyst) if analyst_member is not None: return analyst_member.getProperty() return analyst
Returns the name of the currently assigned analyst
### Input: Returns the name of the currently assigned analyst ### Response: #vtb def getAnalystName(self): mtool = getToolByName(self, ) analyst = self.getAnalyst().strip() analyst_member = mtool.getMemberById(analyst) if analyst_member is not None: return analyst_member.getProperty() return analyst
#vtb def seek(self, pos): if (pos > self.file_size) or (pos < 0): raise Exception("Unable to seek - position out of file!") self.file.seek(pos)
Move to new input file position. If position is negative or out of file, raise Exception.
### Input: Move to new input file position. If position is negative or out of file, raise Exception. ### Response: #vtb def seek(self, pos): if (pos > self.file_size) or (pos < 0): raise Exception("Unable to seek - position out of file!") self.file.seek(pos)
#vtb def to_list(self): src = self._source or coll = self._collection or desc = self._description or l = [self._id, src, coll, self._name, .join(sorted(self._genes)), desc] return l
Converts the GeneSet object to a flat list of strings. Note: see also :meth:`from_list`. Parameters ---------- Returns ------- list of str The data from the GeneSet object as a flat list.
### Input: Converts the GeneSet object to a flat list of strings. Note: see also :meth:`from_list`. Parameters ---------- Returns ------- list of str The data from the GeneSet object as a flat list. ### Response: #vtb def to_list(self): src = self._source or coll = self._collection or desc = self._description or l = [self._id, src, coll, self._name, .join(sorted(self._genes)), desc] return l
#vtb def spectral_registration(data, target, initial_guess=(0.0, 0.0), frequency_range=None): data = data.squeeze() target = target.squeeze() if type(frequency_range) is tuple: spectral_weights = frequency_range[0] < data.frequency_axis() & data.frequency_axis() < frequency_range[1] else: spectral_weights = frequency_range def residual(input_vector): transformed_data = transform_fid(data, input_vector[0], input_vector[1]) residual_data = transformed_data - target if frequency_range is not None: spectrum = residual_data.spectrum() weighted_spectrum = residual_data * spectral_weights weighted_spectrum = weighted_spectrum[weighted_spectrum != 0] residual_data = numpy.fft.ifft(numpy.fft.ifftshift(weighted_spectrum)) return_vector = numpy.zeros(len(residual_data) * 2) return_vector[:len(residual_data)] = residual_data.real return_vector[len(residual_data):] = residual_data.imag return return_vector out = scipy.optimize.leastsq(residual, initial_guess) return -out[0][0], -out[0][1]
Performs the spectral registration method to calculate the frequency and phase shifts between the input data and the reference spectrum target. The frequency range over which the two spectra are compared can be specified to exclude regions where the spectra differ. :param data: :param target: :param initial_guess: :param frequency_range: :return:
### Input: Performs the spectral registration method to calculate the frequency and phase shifts between the input data and the reference spectrum target. The frequency range over which the two spectra are compared can be specified to exclude regions where the spectra differ. :param data: :param target: :param initial_guess: :param frequency_range: :return: ### Response: #vtb def spectral_registration(data, target, initial_guess=(0.0, 0.0), frequency_range=None): data = data.squeeze() target = target.squeeze() if type(frequency_range) is tuple: spectral_weights = frequency_range[0] < data.frequency_axis() & data.frequency_axis() < frequency_range[1] else: spectral_weights = frequency_range def residual(input_vector): transformed_data = transform_fid(data, input_vector[0], input_vector[1]) residual_data = transformed_data - target if frequency_range is not None: spectrum = residual_data.spectrum() weighted_spectrum = residual_data * spectral_weights weighted_spectrum = weighted_spectrum[weighted_spectrum != 0] residual_data = numpy.fft.ifft(numpy.fft.ifftshift(weighted_spectrum)) return_vector = numpy.zeros(len(residual_data) * 2) return_vector[:len(residual_data)] = residual_data.real return_vector[len(residual_data):] = residual_data.imag return return_vector out = scipy.optimize.leastsq(residual, initial_guess) return -out[0][0], -out[0][1]
#vtb def callback(self, request, **kwargs): access_token = request.session[] + " access_token += str(request.session[]) kwargs = {: access_token} return super(ServiceGithub, self).callback(request, **kwargs)
Called from the Service when the user accept to activate it :param request: request object :return: callback url :rtype: string , path to the template
### Input: Called from the Service when the user accept to activate it :param request: request object :return: callback url :rtype: string , path to the template ### Response: #vtb def callback(self, request, **kwargs): access_token = request.session[] + " access_token += str(request.session[]) kwargs = {: access_token} return super(ServiceGithub, self).callback(request, **kwargs)
#vtb def custom_object_prefix_lax(instance): if (instance[] not in enums.TYPES and instance[] not in enums.RESERVED_OBJECTS and not CUSTOM_TYPE_LAX_PREFIX_RE.match(instance[])): yield JSONError("Custom object type should start with in " "order to be compatible with future versions of the " "STIX 2 specification." % instance[], instance[], )
Ensure custom objects follow lenient naming style conventions for forward-compatibility.
### Input: Ensure custom objects follow lenient naming style conventions for forward-compatibility. ### Response: #vtb def custom_object_prefix_lax(instance): if (instance[] not in enums.TYPES and instance[] not in enums.RESERVED_OBJECTS and not CUSTOM_TYPE_LAX_PREFIX_RE.match(instance[])): yield JSONError("Custom object type should start with in " "order to be compatible with future versions of the " "STIX 2 specification." % instance[], instance[], )
#vtb def enable_key(self): print("This command will enable a disabled key.") apiKeyID = input("API Key ID: ") try: key = self._curl_bitmex("/apiKey/enable", postdict={"apiKeyID": apiKeyID}) print("Key with ID %s enabled." % key["id"]) except: print("Unable to enable key, please try again.") self.enable_key()
Enable an existing API Key.
### Input: Enable an existing API Key. ### Response: #vtb def enable_key(self): print("This command will enable a disabled key.") apiKeyID = input("API Key ID: ") try: key = self._curl_bitmex("/apiKey/enable", postdict={"apiKeyID": apiKeyID}) print("Key with ID %s enabled." % key["id"]) except: print("Unable to enable key, please try again.") self.enable_key()
#vtb def split_path(path) : "convenience routine for splitting a path into a list of components." if isinstance(path, (tuple, list)) : result = path elif path == "/" : result = [] else : if not path.startswith("/") or path.endswith("/") : raise DBusError(DBUS.ERROR_INVALID_ARGS, "invalid path %s" % repr(path)) result = path.split("/")[1:] return \ result
convenience routine for splitting a path into a list of components.
### Input: convenience routine for splitting a path into a list of components. ### Response: #vtb def split_path(path) : "convenience routine for splitting a path into a list of components." if isinstance(path, (tuple, list)) : result = path elif path == "/" : result = [] else : if not path.startswith("/") or path.endswith("/") : raise DBusError(DBUS.ERROR_INVALID_ARGS, "invalid path %s" % repr(path)) result = path.split("/")[1:] return \ result
#vtb def replyToComment(self, repo_user, repo_name, pull_number, body, in_reply_to): return self.api.makeRequest( ["repos", repo_user, repo_name, "pulls", str(pull_number), "comments"], method="POST", data=dict(body=body, in_reply_to=in_reply_to))
POST /repos/:owner/:repo/pulls/:number/comments Like create, but reply to an existing comment. :param body: The text of the comment. :param in_reply_to: The comment ID to reply to.
### Input: POST /repos/:owner/:repo/pulls/:number/comments Like create, but reply to an existing comment. :param body: The text of the comment. :param in_reply_to: The comment ID to reply to. ### Response: #vtb def replyToComment(self, repo_user, repo_name, pull_number, body, in_reply_to): return self.api.makeRequest( ["repos", repo_user, repo_name, "pulls", str(pull_number), "comments"], method="POST", data=dict(body=body, in_reply_to=in_reply_to))
#vtb def _produce_return(self, cursor): self.callback(self._row_generator(cursor), *self.cb_args) return None
Calls callback once with generator. :rtype: None
### Input: Calls callback once with generator. :rtype: None ### Response: #vtb def _produce_return(self, cursor): self.callback(self._row_generator(cursor), *self.cb_args) return None
#vtb def init(banner, hidden, backup): manage_file = HIDDEN_MANAGE_FILE if hidden else MANAGE_FILE if os.path.exists(manage_file): if not click.confirm(.format(manage_file)): return if backup: bck = .format(manage_file) with open(manage_file, ) as source, open(bck, ) as bck_file: bck_file.write(source.read()) with open(manage_file, ) as output: data = default_manage_dict if banner: data[][][] = banner output.write(yaml.dump(data, default_flow_style=False))
Initialize a manage shell in current directory $ manage init --banner="My awesome app shell" initializing manage... creating manage.yml
### Input: Initialize a manage shell in current directory $ manage init --banner="My awesome app shell" initializing manage... creating manage.yml ### Response: #vtb def init(banner, hidden, backup): manage_file = HIDDEN_MANAGE_FILE if hidden else MANAGE_FILE if os.path.exists(manage_file): if not click.confirm(.format(manage_file)): return if backup: bck = .format(manage_file) with open(manage_file, ) as source, open(bck, ) as bck_file: bck_file.write(source.read()) with open(manage_file, ) as output: data = default_manage_dict if banner: data[][][] = banner output.write(yaml.dump(data, default_flow_style=False))
#vtb def rc_params(usetex=None): rcp = GWPY_RCPARAMS.copy() if usetex: rcp.update(GWPY_TEX_RCPARAMS) return rcp
Returns a new `matplotlib.RcParams` with updated GWpy parameters The updated parameters are globally stored as `gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as `gwpy.plot.rc.GWPY_TEX_RCPARAMS`. .. note:: This function doesn't apply the new `RcParams` in any way, just creates something that can be used to set `matplotlib.rcParams`. Parameters ---------- usetex : `bool`, `None` value to set for `text.usetex`; if `None` determine automatically using the ``GWPY_USETEX`` environment variable, and whether `tex` is available on the system. If `True` is given (or determined) a number of other parameters are updated to improve TeX formatting. Examples -------- >>> import matplotlib >>> from gwpy.plot.rc import rc_params as gwpy_rc_params() >>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False))
### Input: Returns a new `matplotlib.RcParams` with updated GWpy parameters The updated parameters are globally stored as `gwpy.plot.rc.GWPY_RCPARAMS`, with the updated TeX parameters as `gwpy.plot.rc.GWPY_TEX_RCPARAMS`. .. note:: This function doesn't apply the new `RcParams` in any way, just creates something that can be used to set `matplotlib.rcParams`. Parameters ---------- usetex : `bool`, `None` value to set for `text.usetex`; if `None` determine automatically using the ``GWPY_USETEX`` environment variable, and whether `tex` is available on the system. If `True` is given (or determined) a number of other parameters are updated to improve TeX formatting. Examples -------- >>> import matplotlib >>> from gwpy.plot.rc import rc_params as gwpy_rc_params() >>> matplotlib.rcParams.update(gwpy_rc_params(usetex=False)) ### Response: #vtb def rc_params(usetex=None): rcp = GWPY_RCPARAMS.copy() if usetex: rcp.update(GWPY_TEX_RCPARAMS) return rcp
#vtb def calc_allowedremoterelieve_v1(self): flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess flu.allowedremoterelieve = log.loggedallowedremoterelieve[0]
Get the allowed remote relieve of the last simulation step. Required log sequence: |LoggedAllowedRemoteRelieve| Calculated flux sequence: |AllowedRemoteRelieve| Basic equation: :math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve` Example: >>> from hydpy.models.dam import * >>> parameterstep() >>> logs.loggedallowedremoterelieve = 2.0 >>> model.calc_allowedremoterelieve_v1() >>> fluxes.allowedremoterelieve allowedremoterelieve(2.0)
### Input: Get the allowed remote relieve of the last simulation step. Required log sequence: |LoggedAllowedRemoteRelieve| Calculated flux sequence: |AllowedRemoteRelieve| Basic equation: :math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve` Example: >>> from hydpy.models.dam import * >>> parameterstep() >>> logs.loggedallowedremoterelieve = 2.0 >>> model.calc_allowedremoterelieve_v1() >>> fluxes.allowedremoterelieve allowedremoterelieve(2.0) ### Response: #vtb def calc_allowedremoterelieve_v1(self): flu = self.sequences.fluxes.fastaccess log = self.sequences.logs.fastaccess flu.allowedremoterelieve = log.loggedallowedremoterelieve[0]
#vtb def validate(self, generator, axesToMove=None, **kwargs): iterations = 10 for k, default in self._block.configure.defaults.items(): if k not in kwargs: kwargs[k] = default params = ConfigureParams(generator, axesToMove, **kwargs) part_contexts = self.create_part_contexts() status_part_info = self.run_hooks( ReportStatusHook(p, c) for p, c in part_contexts.items()) while iterations > 0: iterations -= 1 validate_part_info = self.run_hooks( ValidateHook(p, c, status_part_info, **kwargs) for p, c, kwargs in self._part_params(part_contexts, params)) tweaks = ParameterTweakInfo.filter_values(validate_part_info) if tweaks: for tweak in tweaks: deserialized = self._block.configure.takes.elements[ tweak.parameter].validate(tweak.value) setattr(params, tweak.parameter, deserialized) self.log.debug( "Tweaking %s to %s", tweak.parameter, deserialized) else: return params raise ValueError("Could not get a consistent set of parameters")
Validate configuration parameters and return validated parameters. Doesn't take device state into account so can be run in any state
### Input: Validate configuration parameters and return validated parameters. Doesn't take device state into account so can be run in any state ### Response: #vtb def validate(self, generator, axesToMove=None, **kwargs): iterations = 10 for k, default in self._block.configure.defaults.items(): if k not in kwargs: kwargs[k] = default params = ConfigureParams(generator, axesToMove, **kwargs) part_contexts = self.create_part_contexts() status_part_info = self.run_hooks( ReportStatusHook(p, c) for p, c in part_contexts.items()) while iterations > 0: iterations -= 1 validate_part_info = self.run_hooks( ValidateHook(p, c, status_part_info, **kwargs) for p, c, kwargs in self._part_params(part_contexts, params)) tweaks = ParameterTweakInfo.filter_values(validate_part_info) if tweaks: for tweak in tweaks: deserialized = self._block.configure.takes.elements[ tweak.parameter].validate(tweak.value) setattr(params, tweak.parameter, deserialized) self.log.debug( "Tweaking %s to %s", tweak.parameter, deserialized) else: return params raise ValueError("Could not get a consistent set of parameters")
#vtb def get_current_cmus(): result = subprocess.run(.split(), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split(): line = line.split() if line[0] != : continue key = line[1] if key in [, , , ] and\ key not in info: info[key] = .join(line[2:]) if in info: info[] = info[] del info[] return Song(**info)
Get the current song from cmus.
### Input: Get the current song from cmus. ### Response: #vtb def get_current_cmus(): result = subprocess.run(.split(), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split(): line = line.split() if line[0] != : continue key = line[1] if key in [, , , ] and\ key not in info: info[key] = .join(line[2:]) if in info: info[] = info[] del info[] return Song(**info)
#vtb def validate_unique_items(value, **kwargs): counter = collections.Counter(( json.dumps(v, sort_keys=True) for v in value )) dupes = [json.loads(v) for v, count in counter.items() if count > 1] if dupes: raise ValidationError( MESSAGES[][].format( repr(dupes), ), )
Validator for ARRAY types to enforce that all array items must be unique.
### Input: Validator for ARRAY types to enforce that all array items must be unique. ### Response: #vtb def validate_unique_items(value, **kwargs): counter = collections.Counter(( json.dumps(v, sort_keys=True) for v in value )) dupes = [json.loads(v) for v, count in counter.items() if count > 1] if dupes: raise ValidationError( MESSAGES[][].format( repr(dupes), ), )
#vtb def _extract_inner_match(self, candidate, offset): for possible_inner_match in _INNER_MATCHES: group_match = possible_inner_match.search(candidate) is_first_match = True while group_match and self._max_tries > 0: if is_first_match: group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, candidate[:group_match.start()]) match = self._parse_and_verify(group, offset) if match is not None: return match self._max_tries -= 1 is_first_match = False group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, group_match.group(1)) match = self._parse_and_verify(group, offset + group_match.start(1)) if match is not None: return match self._max_tries -= 1 group_match = possible_inner_match.search(candidate, group_match.start() + 1) return None
Attempts to extract a match from candidate if the whole candidate does not qualify as a match. Arguments: candidate -- The candidate text that might contain a phone number offset -- The current offset of candidate within text Returns the match found, None if none can be found
### Input: Attempts to extract a match from candidate if the whole candidate does not qualify as a match. Arguments: candidate -- The candidate text that might contain a phone number offset -- The current offset of candidate within text Returns the match found, None if none can be found ### Response: #vtb def _extract_inner_match(self, candidate, offset): for possible_inner_match in _INNER_MATCHES: group_match = possible_inner_match.search(candidate) is_first_match = True while group_match and self._max_tries > 0: if is_first_match: group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, candidate[:group_match.start()]) match = self._parse_and_verify(group, offset) if match is not None: return match self._max_tries -= 1 is_first_match = False group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, group_match.group(1)) match = self._parse_and_verify(group, offset + group_match.start(1)) if match is not None: return match self._max_tries -= 1 group_match = possible_inner_match.search(candidate, group_match.start() + 1) return None
#vtb def _send_rpc_response(self, *packets): if len(packets) == 0: return handle, payload = packets[0] try: self._send_notification(handle, payload) except bable_interface.BaBLEException as err: if err.packet.status == : time.sleep(0.05) self._defer(self._send_rpc_response, list(packets)) else: self._audit() self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload) return if len(packets) > 1: self._defer(self._send_rpc_response, list(packets[1:]))
Send an RPC response. It is executed in the baBLE working thread: should not be blocking. The RPC response is notified in one or two packets depending on whether or not response data is included. If there is a temporary error sending one of the packets it is retried automatically. If there is a permanent error, it is logged and the response is abandoned.
### Input: Send an RPC response. It is executed in the baBLE working thread: should not be blocking. The RPC response is notified in one or two packets depending on whether or not response data is included. If there is a temporary error sending one of the packets it is retried automatically. If there is a permanent error, it is logged and the response is abandoned. ### Response: #vtb def _send_rpc_response(self, *packets): if len(packets) == 0: return handle, payload = packets[0] try: self._send_notification(handle, payload) except bable_interface.BaBLEException as err: if err.packet.status == : time.sleep(0.05) self._defer(self._send_rpc_response, list(packets)) else: self._audit() self._logger.exception("Error while sending RPC response, handle=%s, payload=%s", handle, payload) return if len(packets) > 1: self._defer(self._send_rpc_response, list(packets[1:]))
#vtb def stat(path, user=None): host, port, path_ = split(path, user) fs = hdfs_fs.hdfs(host, port, user) retval = StatResult(fs.get_path_info(path_)) if not host: _update_stat(retval, path_) fs.close() return retval
Performs the equivalent of :func:`os.stat` on ``path``, returning a :class:`StatResult` object.
### Input: Performs the equivalent of :func:`os.stat` on ``path``, returning a :class:`StatResult` object. ### Response: #vtb def stat(path, user=None): host, port, path_ = split(path, user) fs = hdfs_fs.hdfs(host, port, user) retval = StatResult(fs.get_path_info(path_)) if not host: _update_stat(retval, path_) fs.close() return retval
#vtb def _merge_keys(kwargs): TypeConfig log_driver = kwargs.pop(, helpers.NOTSET) log_opt = kwargs.pop(, helpers.NOTSET) if not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs[] = { : log_driver if log_driver is not helpers.NOTSET else , : log_opt if log_opt is not helpers.NOTSET else {} }
The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments.
### Input: The log_config is a mixture of the CLI options --log-driver and --log-opt (which we support in Salt as log_driver and log_opt, respectively), but it must be submitted to the host config in the format {'Type': log_driver, 'Config': log_opt}. So, we need to construct this argument to be passed to the API from those two arguments. ### Response: #vtb def _merge_keys(kwargs): TypeConfig log_driver = kwargs.pop(, helpers.NOTSET) log_opt = kwargs.pop(, helpers.NOTSET) if not in kwargs: if log_driver is not helpers.NOTSET \ or log_opt is not helpers.NOTSET: kwargs[] = { : log_driver if log_driver is not helpers.NOTSET else , : log_opt if log_opt is not helpers.NOTSET else {} }
#vtb def _priority(s): if type(s) in (list, tuple, set, frozenset): return ITERABLE if type(s) is dict: return DICT if issubclass(type(s), type): return TYPE if hasattr(s, "validate"): return VALIDATOR if callable(s): return CALLABLE else: return COMPARABLE
Return priority for a given object.
### Input: Return priority for a given object. ### Response: #vtb def _priority(s): if type(s) in (list, tuple, set, frozenset): return ITERABLE if type(s) is dict: return DICT if issubclass(type(s), type): return TYPE if hasattr(s, "validate"): return VALIDATOR if callable(s): return CALLABLE else: return COMPARABLE
#vtb def permuted_copy(self, partition=None): def take(n, iterable): return [next(iterable) for _ in range(n)] if partition is None: partition = Partition([1] * len(self)) index_tuples = partition.get_membership() alignments = [] for ix in index_tuples: concat = Concatenation(self, ix) sites = concat.alignment.get_sites() random.shuffle(sites) d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)])) new_seqs = [[(k, .join(take(l, d[k]))) for k in d] for l in concat.lengths] for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names): alignment = Alignment(seqs, datatype) alignment.name = name alignments.append(alignment) return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name)))
Return a copy of the collection with all alignment columns permuted
### Input: Return a copy of the collection with all alignment columns permuted ### Response: #vtb def permuted_copy(self, partition=None): def take(n, iterable): return [next(iterable) for _ in range(n)] if partition is None: partition = Partition([1] * len(self)) index_tuples = partition.get_membership() alignments = [] for ix in index_tuples: concat = Concatenation(self, ix) sites = concat.alignment.get_sites() random.shuffle(sites) d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)])) new_seqs = [[(k, .join(take(l, d[k]))) for k in d] for l in concat.lengths] for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names): alignment = Alignment(seqs, datatype) alignment.name = name alignments.append(alignment) return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name)))
#vtb def bounds(self, thr=0): min_lat = float("inf") min_lon = float("inf") max_lat = -float("inf") max_lon = -float("inf") for segment in self.segments: milat, milon, malat, malon = segment.bounds(thr=thr) min_lat = min(milat, min_lat) min_lon = min(milon, min_lon) max_lat = max(malat, max_lat) max_lon = max(malon, max_lon) return min_lat, min_lon, max_lat, max_lon
Gets the bounds of this segment Returns: (float, float, float, float): Bounds, with min latitude, min longitude, max latitude and max longitude
### Input: Gets the bounds of this segment Returns: (float, float, float, float): Bounds, with min latitude, min longitude, max latitude and max longitude ### Response: #vtb def bounds(self, thr=0): min_lat = float("inf") min_lon = float("inf") max_lat = -float("inf") max_lon = -float("inf") for segment in self.segments: milat, milon, malat, malon = segment.bounds(thr=thr) min_lat = min(milat, min_lat) min_lon = min(milon, min_lon) max_lat = max(malat, max_lat) max_lon = max(malon, max_lon) return min_lat, min_lon, max_lat, max_lon
#vtb def getRoutes(self): routes = [] try: out = subprocess.Popen([routeCmd, "-n"], stdout=subprocess.PIPE).communicate()[0] except: raise Exception( % ipCmd) lines = out.splitlines() if len(lines) > 1: headers = [col.lower() for col in lines[1].split()] for line in lines[2:]: routes.append(dict(zip(headers, line.split()))) return routes
Get routing table. @return: List of routes.
### Input: Get routing table. @return: List of routes. ### Response: #vtb def getRoutes(self): routes = [] try: out = subprocess.Popen([routeCmd, "-n"], stdout=subprocess.PIPE).communicate()[0] except: raise Exception( % ipCmd) lines = out.splitlines() if len(lines) > 1: headers = [col.lower() for col in lines[1].split()] for line in lines[2:]: routes.append(dict(zip(headers, line.split()))) return routes
#vtb def verify_multi(self, otp_list, max_time_window=DEFAULT_MAX_TIME_WINDOW, sl=None, timeout=None): otps = [] for otp in otp_list: otps.append(OTP(otp, self.translate_otp)) if len(otp_list) < 2: raise ValueError() device_ids = set() for otp in otps: device_ids.add(otp.device_id) if len(device_ids) != 1: raise Exception() (max_time_window)) return True
Verify a provided list of OTPs. :param max_time_window: Maximum number of seconds which can pass between the first and last OTP generation for the OTP to still be considered valid. :type max_time_window: ``int``
### Input: Verify a provided list of OTPs. :param max_time_window: Maximum number of seconds which can pass between the first and last OTP generation for the OTP to still be considered valid. :type max_time_window: ``int`` ### Response: #vtb def verify_multi(self, otp_list, max_time_window=DEFAULT_MAX_TIME_WINDOW, sl=None, timeout=None): otps = [] for otp in otp_list: otps.append(OTP(otp, self.translate_otp)) if len(otp_list) < 2: raise ValueError() device_ids = set() for otp in otps: device_ids.add(otp.device_id) if len(device_ids) != 1: raise Exception() (max_time_window)) return True
#vtb def mount_share_at_path(share_path, mount_path): sh_url = CFURLCreateWithString(None, share_path, None) mo_url = CFURLCreateWithString(None, mount_path, None) open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI} mount_options = {NetFS.kNetFSAllowSubMountsKey: True, NetFS.kNetFSMountAtMountDirKey: True} result, output = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None, open_options, mount_options, None) if result != 0: raise Exception( % (share_path, mount_path, output)) return str(output[0])
Mounts a share at the specified path Args: share_path: String URL with all auth info to connect to file share. mount_path: Path to mount share on. Returns: The mount point or raises an error
### Input: Mounts a share at the specified path Args: share_path: String URL with all auth info to connect to file share. mount_path: Path to mount share on. Returns: The mount point or raises an error ### Response: #vtb def mount_share_at_path(share_path, mount_path): sh_url = CFURLCreateWithString(None, share_path, None) mo_url = CFURLCreateWithString(None, mount_path, None) open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI} mount_options = {NetFS.kNetFSAllowSubMountsKey: True, NetFS.kNetFSMountAtMountDirKey: True} result, output = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None, open_options, mount_options, None) if result != 0: raise Exception( % (share_path, mount_path, output)) return str(output[0])
#vtb def get_syllable_count(self, syllables: List[str]) -> int: tmp_syllables = copy.deepcopy(syllables) return len(string_utils.remove_blank_spaces( string_utils.move_consonant_right(tmp_syllables, self._find_solo_consonant(tmp_syllables))))
Counts the number of syllable groups that would occur after ellision. Often we will want preserve the position and separation of syllables so that they can be used to reconstitute a line, and apply stresses to the original word positions. However, we also want to be able to count the number of syllables accurately. :param syllables: :return: >>> syllabifier = Syllabifier() >>> print(syllabifier.get_syllable_count([ ... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum'])) 11
### Input: Counts the number of syllable groups that would occur after ellision. Often we will want preserve the position and separation of syllables so that they can be used to reconstitute a line, and apply stresses to the original word positions. However, we also want to be able to count the number of syllables accurately. :param syllables: :return: >>> syllabifier = Syllabifier() >>> print(syllabifier.get_syllable_count([ ... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum'])) 11 ### Response: #vtb def get_syllable_count(self, syllables: List[str]) -> int: tmp_syllables = copy.deepcopy(syllables) return len(string_utils.remove_blank_spaces( string_utils.move_consonant_right(tmp_syllables, self._find_solo_consonant(tmp_syllables))))
#vtb def signFix(val, width): if val > 0: msb = 1 << (width - 1) if val & msb: val -= mask(width) + 1 return val
Convert negative int to positive int which has same bits set
### Input: Convert negative int to positive int which has same bits set ### Response: #vtb def signFix(val, width): if val > 0: msb = 1 << (width - 1) if val & msb: val -= mask(width) + 1 return val
#vtb def update(self, modelID, modelParams, modelParamsHash, metricResult, completed, completionReason, matured, numRecords): assert (modelParamsHash is not None) if completed: matured = True if metricResult is not None and matured and \ completionReason in [ClientJobsDAO.CMPL_REASON_EOF, ClientJobsDAO.CMPL_REASON_STOPPED]: if self._hsObj._maximize: errScore = -1 * metricResult else: errScore = metricResult if errScore < self._bestResult: self._bestResult = errScore self._bestModelID = modelID self._hsObj.logger.info("New best model after %d evaluations: errScore " "%g on model %s" % (len(self._allResults), self._bestResult, self._bestModelID)) else: errScore = numpy.inf if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]: errScore = numpy.inf hidden = True else: hidden = False if completed: self._completedModels.add(modelID) self._numCompletedModels = len(self._completedModels) if completionReason == ClientJobsDAO.CMPL_REASON_ERROR: self._errModels.add(modelID) self._numErrModels = len(self._errModels) wasHidden = False if modelID not in self._modelIDToIdx: assert (modelParams is not None) entry = dict(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, errScore=errScore, completed=completed, matured=matured, numRecords=numRecords, hidden=hidden) self._allResults.append(entry) entryIdx = len(self._allResults) - 1 self._modelIDToIdx[modelID] = entryIdx self._paramsHashToIndexes[modelParamsHash] = entryIdx swarmId = modelParams[][] if not hidden: if swarmId in self._swarmIdToIndexes: self._swarmIdToIndexes[swarmId].append(entryIdx) else: self._swarmIdToIndexes[swarmId] = [entryIdx] genIdx = modelParams[][] numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0]) while genIdx >= len(numPsEntry): numPsEntry.append(0) numPsEntry[genIdx] += 1 self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry else: entryIdx = self._modelIDToIdx.get(modelID, None) assert (entryIdx is not None) entry = self._allResults[entryIdx] wasHidden = entry[] if entry[] != modelParamsHash: self._paramsHashToIndexes.pop(entry[]) self._paramsHashToIndexes[modelParamsHash] = entryIdx entry[] = modelParamsHash modelParams = entry[] swarmId = modelParams[][] genIdx = modelParams[][] if hidden and not wasHidden: assert (entryIdx in self._swarmIdToIndexes[swarmId]) self._swarmIdToIndexes[swarmId].remove(entryIdx) self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1 entry[] = errScore entry[] = completed entry[] = matured entry[] = numRecords entry[] = hidden particleId = modelParams[][] genIdx = modelParams[][] if matured and not hidden: (oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None)) if errScore < oldResult: pos = Particle.getPositionFromState(modelParams[]) self._particleBest[particleId] = (errScore, pos) prevGenIdx = self._particleLatestGenIdx.get(particleId, -1) if not hidden and genIdx > prevGenIdx: self._particleLatestGenIdx[particleId] = genIdx elif hidden and not wasHidden and genIdx == prevGenIdx: self._particleLatestGenIdx[particleId] = genIdx-1 if not hidden: swarmId = modelParams[][] if not swarmId in self._swarmBestOverall: self._swarmBestOverall[swarmId] = [] bestScores = self._swarmBestOverall[swarmId] while genIdx >= len(bestScores): bestScores.append((None, numpy.inf)) if errScore < bestScores[genIdx][1]: bestScores[genIdx] = (modelID, errScore) if not hidden: key = (swarmId, genIdx) if not key in self._maturedSwarmGens: self._modifiedSwarmGens.add(key) return errScore
Insert a new entry or update an existing one. If this is an update of an existing entry, then modelParams will be None Parameters: -------------------------------------------------------------------- modelID: globally unique modelID of this model modelParams: params dict for this model, or None if this is just an update of a model that it already previously reported on. See the comments for the createModels() method for a description of this dict. modelParamsHash: hash of the modelParams dict, generated by the worker that put it into the model database. metricResult: value on the optimizeMetric for this model. May be None if we have no results yet. completed: True if the model has completed evaluation, False if it is still running (and these are online results) completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates matured: True if this model has matured numRecords: Number of records that have been processed so far by this model. retval: Canonicalized result on the optimize metric
### Input: Insert a new entry or update an existing one. If this is an update of an existing entry, then modelParams will be None Parameters: -------------------------------------------------------------------- modelID: globally unique modelID of this model modelParams: params dict for this model, or None if this is just an update of a model that it already previously reported on. See the comments for the createModels() method for a description of this dict. modelParamsHash: hash of the modelParams dict, generated by the worker that put it into the model database. metricResult: value on the optimizeMetric for this model. May be None if we have no results yet. completed: True if the model has completed evaluation, False if it is still running (and these are online results) completionReason: One of the ClientJobsDAO.CMPL_REASON_XXX equates matured: True if this model has matured numRecords: Number of records that have been processed so far by this model. retval: Canonicalized result on the optimize metric ### Response: #vtb def update(self, modelID, modelParams, modelParamsHash, metricResult, completed, completionReason, matured, numRecords): assert (modelParamsHash is not None) if completed: matured = True if metricResult is not None and matured and \ completionReason in [ClientJobsDAO.CMPL_REASON_EOF, ClientJobsDAO.CMPL_REASON_STOPPED]: if self._hsObj._maximize: errScore = -1 * metricResult else: errScore = metricResult if errScore < self._bestResult: self._bestResult = errScore self._bestModelID = modelID self._hsObj.logger.info("New best model after %d evaluations: errScore " "%g on model %s" % (len(self._allResults), self._bestResult, self._bestModelID)) else: errScore = numpy.inf if completed and completionReason in [ClientJobsDAO.CMPL_REASON_ORPHAN]: errScore = numpy.inf hidden = True else: hidden = False if completed: self._completedModels.add(modelID) self._numCompletedModels = len(self._completedModels) if completionReason == ClientJobsDAO.CMPL_REASON_ERROR: self._errModels.add(modelID) self._numErrModels = len(self._errModels) wasHidden = False if modelID not in self._modelIDToIdx: assert (modelParams is not None) entry = dict(modelID=modelID, modelParams=modelParams, modelParamsHash=modelParamsHash, errScore=errScore, completed=completed, matured=matured, numRecords=numRecords, hidden=hidden) self._allResults.append(entry) entryIdx = len(self._allResults) - 1 self._modelIDToIdx[modelID] = entryIdx self._paramsHashToIndexes[modelParamsHash] = entryIdx swarmId = modelParams[][] if not hidden: if swarmId in self._swarmIdToIndexes: self._swarmIdToIndexes[swarmId].append(entryIdx) else: self._swarmIdToIndexes[swarmId] = [entryIdx] genIdx = modelParams[][] numPsEntry = self._swarmNumParticlesPerGeneration.get(swarmId, [0]) while genIdx >= len(numPsEntry): numPsEntry.append(0) numPsEntry[genIdx] += 1 self._swarmNumParticlesPerGeneration[swarmId] = numPsEntry else: entryIdx = self._modelIDToIdx.get(modelID, None) assert (entryIdx is not None) entry = self._allResults[entryIdx] wasHidden = entry[] if entry[] != modelParamsHash: self._paramsHashToIndexes.pop(entry[]) self._paramsHashToIndexes[modelParamsHash] = entryIdx entry[] = modelParamsHash modelParams = entry[] swarmId = modelParams[][] genIdx = modelParams[][] if hidden and not wasHidden: assert (entryIdx in self._swarmIdToIndexes[swarmId]) self._swarmIdToIndexes[swarmId].remove(entryIdx) self._swarmNumParticlesPerGeneration[swarmId][genIdx] -= 1 entry[] = errScore entry[] = completed entry[] = matured entry[] = numRecords entry[] = hidden particleId = modelParams[][] genIdx = modelParams[][] if matured and not hidden: (oldResult, pos) = self._particleBest.get(particleId, (numpy.inf, None)) if errScore < oldResult: pos = Particle.getPositionFromState(modelParams[]) self._particleBest[particleId] = (errScore, pos) prevGenIdx = self._particleLatestGenIdx.get(particleId, -1) if not hidden and genIdx > prevGenIdx: self._particleLatestGenIdx[particleId] = genIdx elif hidden and not wasHidden and genIdx == prevGenIdx: self._particleLatestGenIdx[particleId] = genIdx-1 if not hidden: swarmId = modelParams[][] if not swarmId in self._swarmBestOverall: self._swarmBestOverall[swarmId] = [] bestScores = self._swarmBestOverall[swarmId] while genIdx >= len(bestScores): bestScores.append((None, numpy.inf)) if errScore < bestScores[genIdx][1]: bestScores[genIdx] = (modelID, errScore) if not hidden: key = (swarmId, genIdx) if not key in self._maturedSwarmGens: self._modifiedSwarmGens.add(key) return errScore
#vtb def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None): domain = recipient_address[recipient_address.find() + 1:] if helo_hostname is None: helo_hostname = domain ret = {: 101, : None, : "The server is unable to connect."} records = [] try: records = mx_resolver.get_mx_records(helo_hostname) except socket.gaierror: ret[] = 512 ret[] = "5.1.2 Domain name address resolution failed in MX lookup." smtp = smtplib.SMTP(timeout=smtp_timeout) for mx in records: try: connection_status, connection_message = smtp.connect(mx.exchange) if connection_status == 220: smtp.helo(domain) smtp.mail(sender_address) status, message = smtp.rcpt(recipient_address) ret[] = status pattern = re.compile() matches = re.match(pattern, message) if matches: ret[] = matches.group(1) ret[] = message smtp.quit() break except smtplib.SMTPConnectError: ret[] = 111 ret[] = "Connection refused or unable to open an SMTP stream." except smtplib.SMTPServerDisconnected: ret[] = 111 ret[] = "SMTP Server disconnected" except socket.gaierror: ret[] = 512 ret[] = "5.1.2 Domain name address resolution failed." return ret
Checks if an email might be valid by getting the status from the SMTP server. :param mx_resolver: MXResolver :param recipient_address: string :param sender_address: string :param smtp_timeout: integer :param helo_hostname: string :return: dict
### Input: Checks if an email might be valid by getting the status from the SMTP server. :param mx_resolver: MXResolver :param recipient_address: string :param sender_address: string :param smtp_timeout: integer :param helo_hostname: string :return: dict ### Response: #vtb def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None): domain = recipient_address[recipient_address.find() + 1:] if helo_hostname is None: helo_hostname = domain ret = {: 101, : None, : "The server is unable to connect."} records = [] try: records = mx_resolver.get_mx_records(helo_hostname) except socket.gaierror: ret[] = 512 ret[] = "5.1.2 Domain name address resolution failed in MX lookup." smtp = smtplib.SMTP(timeout=smtp_timeout) for mx in records: try: connection_status, connection_message = smtp.connect(mx.exchange) if connection_status == 220: smtp.helo(domain) smtp.mail(sender_address) status, message = smtp.rcpt(recipient_address) ret[] = status pattern = re.compile() matches = re.match(pattern, message) if matches: ret[] = matches.group(1) ret[] = message smtp.quit() break except smtplib.SMTPConnectError: ret[] = 111 ret[] = "Connection refused or unable to open an SMTP stream." except smtplib.SMTPServerDisconnected: ret[] = 111 ret[] = "SMTP Server disconnected" except socket.gaierror: ret[] = 512 ret[] = "5.1.2 Domain name address resolution failed." return ret
#vtb def run_breiman2(): x, y = build_sample_ace_problem_breiman2(500) ace_solver = ace.ACESolver() ace_solver.specify_data_set(x, y) ace_solver.solve() try: plt = ace.plot_transforms(ace_solver, None) except ImportError: pass plt.subplot(1, 2, 1) phi = numpy.sin(2.0 * numpy.pi * x[0]) plt.plot(x[0], phi, label=) plt.legend() plt.subplot(1, 2, 2) y = numpy.exp(phi) plt.plot(y, phi, label=) plt.legend(loc=) plt.savefig() return ace_solver
Run Breiman's other sample problem.
### Input: Run Breiman's other sample problem. ### Response: #vtb def run_breiman2(): x, y = build_sample_ace_problem_breiman2(500) ace_solver = ace.ACESolver() ace_solver.specify_data_set(x, y) ace_solver.solve() try: plt = ace.plot_transforms(ace_solver, None) except ImportError: pass plt.subplot(1, 2, 1) phi = numpy.sin(2.0 * numpy.pi * x[0]) plt.plot(x[0], phi, label=) plt.legend() plt.subplot(1, 2, 2) y = numpy.exp(phi) plt.plot(y, phi, label=) plt.legend(loc=) plt.savefig() return ace_solver
#vtb def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams, train): with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"): inputs_length = common_layers.length_from_embedding(inputs) inputs = common_layers.flatten4d3d(inputs) encoder_outputs, final_encoder_state = lstm_bid_encoder( inputs, inputs_length, hparams, train, "encoder") shifted_targets = common_layers.shift_right(targets) targets_length = common_layers.length_from_embedding(shifted_targets) + 1 hparams_decoder = copy.copy(hparams) hparams_decoder.hidden_size = 2 * hparams.hidden_size decoder_outputs = lstm_attention_decoder( common_layers.flatten4d3d(shifted_targets), hparams_decoder, train, "decoder", final_encoder_state, encoder_outputs, inputs_length, targets_length) return tf.expand_dims(decoder_outputs, axis=2)
LSTM seq2seq model with attention, main step used for training.
### Input: LSTM seq2seq model with attention, main step used for training. ### Response: #vtb def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams, train): with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"): inputs_length = common_layers.length_from_embedding(inputs) inputs = common_layers.flatten4d3d(inputs) encoder_outputs, final_encoder_state = lstm_bid_encoder( inputs, inputs_length, hparams, train, "encoder") shifted_targets = common_layers.shift_right(targets) targets_length = common_layers.length_from_embedding(shifted_targets) + 1 hparams_decoder = copy.copy(hparams) hparams_decoder.hidden_size = 2 * hparams.hidden_size decoder_outputs = lstm_attention_decoder( common_layers.flatten4d3d(shifted_targets), hparams_decoder, train, "decoder", final_encoder_state, encoder_outputs, inputs_length, targets_length) return tf.expand_dims(decoder_outputs, axis=2)
#vtb def get_keyboard_mapping_unchecked(conn): mn, mx = get_min_max_keycode() return conn.core.GetKeyboardMappingUnchecked(mn, mx - mn + 1)
Return an unchecked keyboard mapping cookie that can be used to fetch the table of keysyms in the current X environment. :rtype: xcb.xproto.GetKeyboardMappingCookie
### Input: Return an unchecked keyboard mapping cookie that can be used to fetch the table of keysyms in the current X environment. :rtype: xcb.xproto.GetKeyboardMappingCookie ### Response: #vtb def get_keyboard_mapping_unchecked(conn): mn, mx = get_min_max_keycode() return conn.core.GetKeyboardMappingUnchecked(mn, mx - mn + 1)
#vtb def tempo_account_get_customers(self, query=None, count_accounts=None): params = {} if query is not None: params[] = query if count_accounts is not None: params[] = count_accounts url = return self.get(url, params=params)
Gets all or some Attribute whose key or name contain a specific substring. Attributes can be a Category or Customer. :param query: OPTIONAL: query for search :param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer :return: list of customers
### Input: Gets all or some Attribute whose key or name contain a specific substring. Attributes can be a Category or Customer. :param query: OPTIONAL: query for search :param count_accounts: bool OPTIONAL: provide how many associated Accounts with Customer :return: list of customers ### Response: #vtb def tempo_account_get_customers(self, query=None, count_accounts=None): params = {} if query is not None: params[] = query if count_accounts is not None: params[] = count_accounts url = return self.get(url, params=params)
#vtb def line_intersection_2D(abarg, cdarg): ((x1,y1),(x2,y2)) = abarg ((x3,y3),(x4,y4)) = cdarg dx12 = (x1 - x2) dx34 = (x3 - x4) dy12 = (y1 - y2) dy34 = (y3 - y4) denom = dx12*dy34 - dy12*dx34 unit = np.isclose(denom, 0) if unit is True: return (np.nan, np.nan) denom = unit + denom q12 = (x1*y2 - y1*x2) / denom q34 = (x3*y4 - y3*x4) / denom xi = q12*dx34 - q34*dx12 yi = q12*dy34 - q34*dy12 if unit is False: return (xi, yi) elif unit is True: return (np.nan, np.nan) else: xi = np.asarray(xi) yi = np.asarray(yi) xi[unit] = np.nan yi[unit] = np.nan return (xi, yi)
line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors.
### Input: line_intersection((a, b), (c, d)) yields the intersection point between the lines that pass through the given pairs of points. If any lines are parallel, (numpy.nan, numpy.nan) is returned; note that a, b, c, and d can all be 2 x n matrices of x and y coordinate row-vectors. ### Response: #vtb def line_intersection_2D(abarg, cdarg): ((x1,y1),(x2,y2)) = abarg ((x3,y3),(x4,y4)) = cdarg dx12 = (x1 - x2) dx34 = (x3 - x4) dy12 = (y1 - y2) dy34 = (y3 - y4) denom = dx12*dy34 - dy12*dx34 unit = np.isclose(denom, 0) if unit is True: return (np.nan, np.nan) denom = unit + denom q12 = (x1*y2 - y1*x2) / denom q34 = (x3*y4 - y3*x4) / denom xi = q12*dx34 - q34*dx12 yi = q12*dy34 - q34*dy12 if unit is False: return (xi, yi) elif unit is True: return (np.nan, np.nan) else: xi = np.asarray(xi) yi = np.asarray(yi) xi[unit] = np.nan yi[unit] = np.nan return (xi, yi)
#vtb def pexpect_monkeypatch(): if pexpect.__version__[:3] >= : return def __del__(self): if not self.closed: try: self.close() except AttributeError: pass pexpect.spawn.__del__ = __del__
Patch pexpect to prevent unhandled exceptions at VM teardown. Calling this function will monkeypatch the pexpect.spawn class and modify its __del__ method to make it more robust in the face of failures that can occur if it is called when the Python VM is shutting down. Since Python may fire __del__ methods arbitrarily late, it's possible for them to execute during the teardown of the Python VM itself. At this point, various builtin modules have been reset to None. Thus, the call to self.close() will trigger an exception because it tries to call os.close(), and os is now None.
### Input: Patch pexpect to prevent unhandled exceptions at VM teardown. Calling this function will monkeypatch the pexpect.spawn class and modify its __del__ method to make it more robust in the face of failures that can occur if it is called when the Python VM is shutting down. Since Python may fire __del__ methods arbitrarily late, it's possible for them to execute during the teardown of the Python VM itself. At this point, various builtin modules have been reset to None. Thus, the call to self.close() will trigger an exception because it tries to call os.close(), and os is now None. ### Response: #vtb def pexpect_monkeypatch(): if pexpect.__version__[:3] >= : return def __del__(self): if not self.closed: try: self.close() except AttributeError: pass pexpect.spawn.__del__ = __del__
#vtb def get(self): tasks = self._get_avaliable_tasks() if not tasks: return None name, data = tasks[0] self._client.kv.delete(name) return data
Get a task from the queue.
### Input: Get a task from the queue. ### Response: #vtb def get(self): tasks = self._get_avaliable_tasks() if not tasks: return None name, data = tasks[0] self._client.kv.delete(name) return data
#vtb def do_alias(self, arg): args = arg.split() if len(args) == 0: keys = sorted(self.aliases.keys()) for alias in keys: self.message("%s = %s" % (alias, self.aliases[alias])) return if args[0] in self.aliases and len(args) == 1: self.message("%s = %s" % (args[0], self.aliases[args[0]])) else: self.aliases[args[0]] = .join(args[1:])
alias [name [command [parameter parameter ...] ]] Create an alias called 'name' that executes 'command'. The command must *not* be enclosed in quotes. Replaceable parameters can be indicated by %1, %2, and so on, while %* is replaced by all the parameters. If no command is given, the current alias for name is shown. If no name is given, all aliases are listed. Aliases may be nested and can contain anything that can be legally typed at the pdb prompt. Note! You *can* override internal pdb commands with aliases! Those internal commands are then hidden until the alias is removed. Aliasing is recursively applied to the first word of the command line; all other words in the line are left alone. As an example, here are two useful aliases (especially when placed in the .pdbrc file): # Print instance variables (usage "pi classInst") alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) # Print instance variables in self alias ps pi self
### Input: alias [name [command [parameter parameter ...] ]] Create an alias called 'name' that executes 'command'. The command must *not* be enclosed in quotes. Replaceable parameters can be indicated by %1, %2, and so on, while %* is replaced by all the parameters. If no command is given, the current alias for name is shown. If no name is given, all aliases are listed. Aliases may be nested and can contain anything that can be legally typed at the pdb prompt. Note! You *can* override internal pdb commands with aliases! Those internal commands are then hidden until the alias is removed. Aliasing is recursively applied to the first word of the command line; all other words in the line are left alone. As an example, here are two useful aliases (especially when placed in the .pdbrc file): # Print instance variables (usage "pi classInst") alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) # Print instance variables in self alias ps pi self ### Response: #vtb def do_alias(self, arg): args = arg.split() if len(args) == 0: keys = sorted(self.aliases.keys()) for alias in keys: self.message("%s = %s" % (alias, self.aliases[alias])) return if args[0] in self.aliases and len(args) == 1: self.message("%s = %s" % (args[0], self.aliases[args[0]])) else: self.aliases[args[0]] = .join(args[1:])
#vtb def _set_datapath(self, datapath): if datapath: self._datapath = datapath.rstrip(os.sep) self._fifo = int(stat.S_ISFIFO(os.stat(self.datapath).st_mode)) else: self._datapath = None self._fifo = False
Set a datapath.
### Input: Set a datapath. ### Response: #vtb def _set_datapath(self, datapath): if datapath: self._datapath = datapath.rstrip(os.sep) self._fifo = int(stat.S_ISFIFO(os.stat(self.datapath).st_mode)) else: self._datapath = None self._fifo = False
#vtb def count_snps(mat): snps = np.zeros(4, dtype=np.uint32) snps[0] = np.uint32(\ mat[0, 5] + mat[0, 10] + mat[0, 15] + \ mat[5, 0] + mat[5, 10] + mat[5, 15] + \ mat[10, 0] + mat[10, 5] + mat[10, 15] + \ mat[15, 0] + mat[15, 5] + mat[15, 10]) for i in range(16): if i % 5: snps[1] += mat[i, i] snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\ mat[4, 1] + mat[6, 9] + mat[7, 13] +\ mat[8, 2] + mat[9, 6] + mat[11, 14] +\ mat[12, 3] + mat[13, 7] + mat[14, 11] snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2] return snps
get dstats from the count array and return as a float tuple
### Input: get dstats from the count array and return as a float tuple ### Response: #vtb def count_snps(mat): snps = np.zeros(4, dtype=np.uint32) snps[0] = np.uint32(\ mat[0, 5] + mat[0, 10] + mat[0, 15] + \ mat[5, 0] + mat[5, 10] + mat[5, 15] + \ mat[10, 0] + mat[10, 5] + mat[10, 15] + \ mat[15, 0] + mat[15, 5] + mat[15, 10]) for i in range(16): if i % 5: snps[1] += mat[i, i] snps[2] = mat[1, 4] + mat[2, 8] + mat[3, 12] +\ mat[4, 1] + mat[6, 9] + mat[7, 13] +\ mat[8, 2] + mat[9, 6] + mat[11, 14] +\ mat[12, 3] + mat[13, 7] + mat[14, 11] snps[3] = (mat.sum() - np.diag(mat).sum()) - snps[2] return snps
#vtb def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0): if (defaultreallimits != None): if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins) lowerreallimit = min(inlist) - binsize/2 bins = [0]*(numbins) extrapoints = 0 for num in inlist: try: if (num-lowerreallimit) < 0: extrapoints = extrapoints + 1 else: bintoincrement = int((num-lowerreallimit)/float(binsize)) bins[bintoincrement] = bins[bintoincrement] + 1 except: extrapoints = extrapoints + 1 if (extrapoints > 0 and printextras == 1): print(,extrapoints) return (bins, lowerreallimit, binsize, extrapoints)
Returns (i) a list of histogram bin counts, (ii) the smallest value of the histogram binning, and (iii) the bin width (the last 2 are not necessarily integers). Default number of bins is 10. If no sequence object is given for defaultreallimits, the routine picks (usually non-pretty) bins spanning all the numbers in the inlist. Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0) Returns: list of bin values, lowerreallimit, binsize, extrapoints
### Input: Returns (i) a list of histogram bin counts, (ii) the smallest value of the histogram binning, and (iii) the bin width (the last 2 are not necessarily integers). Default number of bins is 10. If no sequence object is given for defaultreallimits, the routine picks (usually non-pretty) bins spanning all the numbers in the inlist. Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0) Returns: list of bin values, lowerreallimit, binsize, extrapoints ### Response: #vtb def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0): if (defaultreallimits != None): if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins) lowerreallimit = min(inlist) - binsize/2 bins = [0]*(numbins) extrapoints = 0 for num in inlist: try: if (num-lowerreallimit) < 0: extrapoints = extrapoints + 1 else: bintoincrement = int((num-lowerreallimit)/float(binsize)) bins[bintoincrement] = bins[bintoincrement] + 1 except: extrapoints = extrapoints + 1 if (extrapoints > 0 and printextras == 1): print(,extrapoints) return (bins, lowerreallimit, binsize, extrapoints)
#vtb def get_pip(mov=None, api=None, name=None): if mov is None and api is None: logger.error("need at least one of those") raise ValueError() elif mov is not None and api is not None: logger.error("mov and api are exclusive") raise ValueError() if api is not None: if name is None: logger.error("need a name") raise ValueError() mov = api.new_mov(name) mov.open() if mov is not None: mov._check_open() try: logger.debug(len(Glob().theCollector.collection)) pip = Glob().theCollector.collection[] if name is not None: pip_res = pip[name] elif mov is not None: pip_res = pip[mov.product] logger.debug("pip found in the collection") return pip_res except KeyError: logger.debug("pip not found in the collection") records = [] intervals = [10, 20, 30] def _check_price(interval=10): timeout = time.time() + interval while time.time() < timeout: records.append(mov.get_price()) time.sleep(0.5) for interval in intervals: _check_price(interval) if min(records) == max(records): logger.debug("no variation in %d seconds" % interval) if interval == intervals[-1]: raise TimeoutError("no variation") else: break for price in records: if not in locals(): best_price = price if len(str(price)) > len(str(best_price)): logger.debug("found new best_price %f" % price) best_price = price pip = get_number_unit(best_price) Glob().pipHandler.add_val({mov.product: pip}) return pip
get value of pip
### Input: get value of pip ### Response: #vtb def get_pip(mov=None, api=None, name=None): if mov is None and api is None: logger.error("need at least one of those") raise ValueError() elif mov is not None and api is not None: logger.error("mov and api are exclusive") raise ValueError() if api is not None: if name is None: logger.error("need a name") raise ValueError() mov = api.new_mov(name) mov.open() if mov is not None: mov._check_open() try: logger.debug(len(Glob().theCollector.collection)) pip = Glob().theCollector.collection[] if name is not None: pip_res = pip[name] elif mov is not None: pip_res = pip[mov.product] logger.debug("pip found in the collection") return pip_res except KeyError: logger.debug("pip not found in the collection") records = [] intervals = [10, 20, 30] def _check_price(interval=10): timeout = time.time() + interval while time.time() < timeout: records.append(mov.get_price()) time.sleep(0.5) for interval in intervals: _check_price(interval) if min(records) == max(records): logger.debug("no variation in %d seconds" % interval) if interval == intervals[-1]: raise TimeoutError("no variation") else: break for price in records: if not in locals(): best_price = price if len(str(price)) > len(str(best_price)): logger.debug("found new best_price %f" % price) best_price = price pip = get_number_unit(best_price) Glob().pipHandler.add_val({mov.product: pip}) return pip
#vtb def _dict_contents(self, use_dict=None, as_class=dict): if _debug: Object._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class) if use_dict is None: use_dict = as_class() klasses = list(self.__class__.__mro__) klasses.reverse() property_names = [] properties_seen = set() for c in klasses: for prop in getattr(c, , []): if prop.identifier not in properties_seen: property_names.append(prop.identifier) properties_seen.add(prop.identifier) for property_name in property_names: property_value = self._properties.get(property_name).ReadProperty(self) if property_value is None: continue if hasattr(property_value, "dict_contents"): property_value = property_value.dict_contents(as_class=as_class) use_dict.__setitem__(property_name, property_value) return use_dict
Return the contents of an object as a dict.
### Input: Return the contents of an object as a dict. ### Response: #vtb def _dict_contents(self, use_dict=None, as_class=dict): if _debug: Object._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class) if use_dict is None: use_dict = as_class() klasses = list(self.__class__.__mro__) klasses.reverse() property_names = [] properties_seen = set() for c in klasses: for prop in getattr(c, , []): if prop.identifier not in properties_seen: property_names.append(prop.identifier) properties_seen.add(prop.identifier) for property_name in property_names: property_value = self._properties.get(property_name).ReadProperty(self) if property_value is None: continue if hasattr(property_value, "dict_contents"): property_value = property_value.dict_contents(as_class=as_class) use_dict.__setitem__(property_name, property_value) return use_dict
#vtb def strip_docstrings(tokens): stack = [] state = for t in tokens: typ = t[0] if state == : if typ in (tokenize.NL, tokenize.COMMENT): yield t elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING): stack.append(t) elif typ == tokenize.NEWLINE: stack.append(t) start_line, end_line = stack[0][2][0], stack[-1][3][0]+1 for i in range(start_line, end_line): yield tokenize.NL, , (i, 0), (i,1), for t in stack: if t[0] in (tokenize.DEDENT, tokenize.INDENT): yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4] del stack[:] else: stack.append(t) for t in stack: yield t del stack[:] state = elif state == : if typ == tokenize.NEWLINE: state = yield t
Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised.
### Input: Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised. ### Response: #vtb def strip_docstrings(tokens): stack = [] state = for t in tokens: typ = t[0] if state == : if typ in (tokenize.NL, tokenize.COMMENT): yield t elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING): stack.append(t) elif typ == tokenize.NEWLINE: stack.append(t) start_line, end_line = stack[0][2][0], stack[-1][3][0]+1 for i in range(start_line, end_line): yield tokenize.NL, , (i, 0), (i,1), for t in stack: if t[0] in (tokenize.DEDENT, tokenize.INDENT): yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4] del stack[:] else: stack.append(t) for t in stack: yield t del stack[:] state = elif state == : if typ == tokenize.NEWLINE: state = yield t
#vtb def get_brandings(self): connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_URL) return connection.get_request()
Get all account brandings @return List of brandings
### Input: Get all account brandings @return List of brandings ### Response: #vtb def get_brandings(self): connection = Connection(self.token) connection.set_url(self.production, self.BRANDINGS_URL) return connection.get_request()
#vtb def CopyVcardFields(new_vcard, auth_vcard, field_names): for field in field_names: value_list = auth_vcard.contents.get(field) new_vcard = SetVcardField(new_vcard, field, value_list) return new_vcard
Copy vCard field values from an authoritative vCard into a new one.
### Input: Copy vCard field values from an authoritative vCard into a new one. ### Response: #vtb def CopyVcardFields(new_vcard, auth_vcard, field_names): for field in field_names: value_list = auth_vcard.contents.get(field) new_vcard = SetVcardField(new_vcard, field, value_list) return new_vcard
#vtb def F_(self, X): if self._interpol: if not hasattr(self, ): if self._lookup: x = self._x_lookup F_x = self._f_lookup else: x = np.linspace(0, self._max_interp_X, self._num_interp_X) F_x = self._F(x) self._F_interp = interp.interp1d(x, F_x, kind=, axis=-1, copy=False, bounds_error=False, fill_value=0, assume_sorted=True) return self._F_interp(X) else: return self._F(X)
computes h() :param X: :return:
### Input: computes h() :param X: :return: ### Response: #vtb def F_(self, X): if self._interpol: if not hasattr(self, ): if self._lookup: x = self._x_lookup F_x = self._f_lookup else: x = np.linspace(0, self._max_interp_X, self._num_interp_X) F_x = self._F(x) self._F_interp = interp.interp1d(x, F_x, kind=, axis=-1, copy=False, bounds_error=False, fill_value=0, assume_sorted=True) return self._F_interp(X) else: return self._F(X)
#vtb def write_info (self, url_data): sep = u"<br/>"+os.linesep text = sep.join(cgi.escape(x) for x in url_data.info) self.writeln(u + self.part("info")+ u"</td><td>"+text+u"</td></tr>")
Write url_data.info.
### Input: Write url_data.info. ### Response: #vtb def write_info (self, url_data): sep = u"<br/>"+os.linesep text = sep.join(cgi.escape(x) for x in url_data.info) self.writeln(u + self.part("info")+ u"</td><td>"+text+u"</td></tr>")
#vtb def status(deps=DEPENDENCIES, linesep=os.linesep): maxwidth = 0 col1 = [] col2 = [] for dependency in deps: title1 = dependency.modname title1 += + dependency.required_version col1.append(title1) maxwidth = max([maxwidth, len(title1)]) col2.append(dependency.get_installed_version()) text = "" for index in range(len(deps)): text += col1[index].ljust(maxwidth) + + col2[index] + linesep return text[:-1]
Return a status of dependencies
### Input: Return a status of dependencies ### Response: #vtb def status(deps=DEPENDENCIES, linesep=os.linesep): maxwidth = 0 col1 = [] col2 = [] for dependency in deps: title1 = dependency.modname title1 += + dependency.required_version col1.append(title1) maxwidth = max([maxwidth, len(title1)]) col2.append(dependency.get_installed_version()) text = "" for index in range(len(deps)): text += col1[index].ljust(maxwidth) + + col2[index] + linesep return text[:-1]