code
stringlengths
64
7.01k
docstring
stringlengths
2
15.8k
text
stringlengths
144
19.2k
#vtb def add_media_description(self, media_description): if self.get_media_descriptions_metadata().is_read_only(): raise NoAccess() self.add_or_replace_value(, media_description)
Adds a media_description. arg: media_description (displayText): the new media_description raise: InvalidArgument - ``media_description`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``media_description`` is ``null`` *compliance: mandatory -- This method must be implemented.*
### Input: Adds a media_description. arg: media_description (displayText): the new media_description raise: InvalidArgument - ``media_description`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``media_description`` is ``null`` *compliance: mandatory -- This method must be implemented.* ### Response: #vtb def add_media_description(self, media_description): if self.get_media_descriptions_metadata().is_read_only(): raise NoAccess() self.add_or_replace_value(, media_description)
#vtb def inferObjectsWithRandomMovements(self): for objectName, objectFeatures in self.objects.iteritems(): self.reset() inferred = False prevTouchSequence = None for _ in xrange(4): while True: touchSequence = list(objectFeatures) random.shuffle(touchSequence) if prevTouchSequence is not None: if touchSequence[0] == prevTouchSequence[-1]: continue break for i, feature in enumerate(touchSequence): locationOnObject = (feature["top"] + feature["height"]/2, feature["left"] + feature["width"]/2) self.move(objectName, locationOnObject) featureName = feature["name"] featureSDR = self.features[featureName] self.sense(featureSDR, learn=False) inferred = ( set(self.objectLayer.getActiveCells()) == set(self.objectRepresentations[objectName]) and set(self.inputLayer.getActiveCells()) == set(self.inputRepresentations[(objectName, locationOnObject, featureName)]) and set(self.getActiveLocationCells()) == set(self.locationRepresentations[(objectName, locationOnObject)])) if inferred: break prevTouchSequence = touchSequence if inferred: break
Infer each object without any location input.
### Input: Infer each object without any location input. ### Response: #vtb def inferObjectsWithRandomMovements(self): for objectName, objectFeatures in self.objects.iteritems(): self.reset() inferred = False prevTouchSequence = None for _ in xrange(4): while True: touchSequence = list(objectFeatures) random.shuffle(touchSequence) if prevTouchSequence is not None: if touchSequence[0] == prevTouchSequence[-1]: continue break for i, feature in enumerate(touchSequence): locationOnObject = (feature["top"] + feature["height"]/2, feature["left"] + feature["width"]/2) self.move(objectName, locationOnObject) featureName = feature["name"] featureSDR = self.features[featureName] self.sense(featureSDR, learn=False) inferred = ( set(self.objectLayer.getActiveCells()) == set(self.objectRepresentations[objectName]) and set(self.inputLayer.getActiveCells()) == set(self.inputRepresentations[(objectName, locationOnObject, featureName)]) and set(self.getActiveLocationCells()) == set(self.locationRepresentations[(objectName, locationOnObject)])) if inferred: break prevTouchSequence = touchSequence if inferred: break
#vtb def _element_to_bson(key, value, check_keys, opts): if not isinstance(key, string_type): raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with " % (key,)) if "." in key: raise InvalidDocument("key %r must not contain " % (key,)) name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts)
Encode a single key, value pair.
### Input: Encode a single key, value pair. ### Response: #vtb def _element_to_bson(key, value, check_keys, opts): if not isinstance(key, string_type): raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with " % (key,)) if "." in key: raise InvalidDocument("key %r must not contain " % (key,)) name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts)
#vtb def get_doc(self, objtxt): if self._reading: return wait_loop = QEventLoop() self.sig_got_reply.connect(wait_loop.quit) self.silent_exec_method("get_ipython().kernel.get_doc()" % objtxt) wait_loop.exec_() self.sig_got_reply.disconnect(wait_loop.quit) wait_loop = None return self._kernel_reply
Get object documentation dictionary
### Input: Get object documentation dictionary ### Response: #vtb def get_doc(self, objtxt): if self._reading: return wait_loop = QEventLoop() self.sig_got_reply.connect(wait_loop.quit) self.silent_exec_method("get_ipython().kernel.get_doc()" % objtxt) wait_loop.exec_() self.sig_got_reply.disconnect(wait_loop.quit) wait_loop = None return self._kernel_reply
#vtb def mod_watch(name, **kwargs): sfun = kwargs.pop(, None) mapfun = {: purged, : latest, : removed, : installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {: name, : {}, : .format(sfun), : False}
Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
### Input: Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ### Response: #vtb def mod_watch(name, **kwargs): sfun = kwargs.pop(, None) mapfun = {: purged, : latest, : removed, : installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {: name, : {}, : .format(sfun), : False}
#vtb def loadNetworkbyName(self, name, callback=None, errback=None): import ns1.ipam network = ns1.ipam.Network(self.config, name=name) return network.load(callback=callback, errback=errback)
Load an existing Network by name into a high level Network object :param str name: Name of an existing Network
### Input: Load an existing Network by name into a high level Network object :param str name: Name of an existing Network ### Response: #vtb def loadNetworkbyName(self, name, callback=None, errback=None): import ns1.ipam network = ns1.ipam.Network(self.config, name=name) return network.load(callback=callback, errback=errback)
#vtb def _set_hw_state(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=hw_state.hw_state, is_container=, presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__hw_state = t if hasattr(self, ): self._set()
Setter method for hw_state, mapped from YANG variable /hw_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_hw_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_hw_state() directly. YANG Description: HW Route Info
### Input: Setter method for hw_state, mapped from YANG variable /hw_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_hw_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_hw_state() directly. YANG Description: HW Route Info ### Response: #vtb def _set_hw_state(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=hw_state.hw_state, is_container=, presence=False, yang_name="hw-state", rest_name="hw-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__hw_state = t if hasattr(self, ): self._set()
#vtb def getPotential(self, columnIndex, potential): assert(columnIndex < self._numColumns) potential[:] = self._potentialPools[columnIndex]
:param columnIndex: (int) column index to get potential for. :param potential: (list) will be overwritten with column potentials. Must match the number of inputs.
### Input: :param columnIndex: (int) column index to get potential for. :param potential: (list) will be overwritten with column potentials. Must match the number of inputs. ### Response: #vtb def getPotential(self, columnIndex, potential): assert(columnIndex < self._numColumns) potential[:] = self._potentialPools[columnIndex]
#vtb def name(self, new_name): if self.script_file: content = self.startup_script content = content.replace(self._name, new_name) escaped_name = new_name.replace(, ) content = re.sub(r"^set pcname .+$", "set pcname " + escaped_name, content, flags=re.MULTILINE) self.startup_script = content super(VPCSVM, VPCSVM).name.__set__(self, new_name)
Sets the name of this VPCS VM. :param new_name: name
### Input: Sets the name of this VPCS VM. :param new_name: name ### Response: #vtb def name(self, new_name): if self.script_file: content = self.startup_script content = content.replace(self._name, new_name) escaped_name = new_name.replace(, ) content = re.sub(r"^set pcname .+$", "set pcname " + escaped_name, content, flags=re.MULTILINE) self.startup_script = content super(VPCSVM, VPCSVM).name.__set__(self, new_name)
#vtb def put(self, url, html, cache_info=None): key = hashlib.md5(url).hexdigest() try: self._cache_set(key, html) except: self.exception("Failed to write cache") return self.update(url, cache_info)
Put response into cache :param url: Url to cache :type url: str | unicode :param html: HTML content of url :type html: str | unicode :param cache_info: Cache Info (default: None) :type cache_info: floscraper.models.CacheInfo :rtype: None
### Input: Put response into cache :param url: Url to cache :type url: str | unicode :param html: HTML content of url :type html: str | unicode :param cache_info: Cache Info (default: None) :type cache_info: floscraper.models.CacheInfo :rtype: None ### Response: #vtb def put(self, url, html, cache_info=None): key = hashlib.md5(url).hexdigest() try: self._cache_set(key, html) except: self.exception("Failed to write cache") return self.update(url, cache_info)
#vtb def simulate_leapfrog(config_func: Callable, accel_func: Callable, t0: date, t1: date, steps_per_day: int): N: int = (t1 - t0).days * steps_per_day q0, v0 = config_func(t0) dims: int = q0.shape[1] dt = float(day2sec) / float(steps_per_day) dt2: float = dt * dt q: np.ndarray = np.zeros((N, dims)) v: np.ndarray = np.zeros((N, dims)) q[0, :] = q0 v[0, :] = v0 a: np.ndarray = np.zeros((N, dims)) a[0, :] = accel_func(q[0]) print(f) for i in tqdm(range(N-1)): q[i+1,:] = q[i,:] + v[i,:] * dt + 0.5 * a[i,:] * dt2 a[i+1,:] = accel_func(q[i+1]) v[i+1,:] = v[i,:] + 0.5 * (a[i,:] + a[i+1,:]) * dt return q, v
Simulate the earth-sun system from t0 to t1 using Leapfrog Integration. INPUTS: config_func: function taking a date or date range and returning position and velocity of bodies accel_func: function taking positions of the bodies and returning their accelerations t0: start date of the simulation; a python date t1: end date of the simulation (exclusive); a python date dt: time step in days. num_bodies: the number of celestial bodies in the simulation
### Input: Simulate the earth-sun system from t0 to t1 using Leapfrog Integration. INPUTS: config_func: function taking a date or date range and returning position and velocity of bodies accel_func: function taking positions of the bodies and returning their accelerations t0: start date of the simulation; a python date t1: end date of the simulation (exclusive); a python date dt: time step in days. num_bodies: the number of celestial bodies in the simulation ### Response: #vtb def simulate_leapfrog(config_func: Callable, accel_func: Callable, t0: date, t1: date, steps_per_day: int): N: int = (t1 - t0).days * steps_per_day q0, v0 = config_func(t0) dims: int = q0.shape[1] dt = float(day2sec) / float(steps_per_day) dt2: float = dt * dt q: np.ndarray = np.zeros((N, dims)) v: np.ndarray = np.zeros((N, dims)) q[0, :] = q0 v[0, :] = v0 a: np.ndarray = np.zeros((N, dims)) a[0, :] = accel_func(q[0]) print(f) for i in tqdm(range(N-1)): q[i+1,:] = q[i,:] + v[i,:] * dt + 0.5 * a[i,:] * dt2 a[i+1,:] = accel_func(q[i+1]) v[i+1,:] = v[i,:] + 0.5 * (a[i,:] + a[i+1,:]) * dt return q, v
#vtb def print_about(self): filepath = os.path.join(self.suite_path, "bin", self.tool_name) print "Tool: %s" % self.tool_name print "Path: %s" % filepath print "Suite: %s" % self.suite_path msg = "%s (%r)" % (self.context.load_path, self.context_name) print "Context: %s" % msg variants = self.context.get_tool_variants(self.tool_name) if variants: if len(variants) > 1: self._print_conflicting(variants) else: variant = iter(variants).next() print "Package: %s" % variant.qualified_package_name return 0
Print an info message about the tool.
### Input: Print an info message about the tool. ### Response: #vtb def print_about(self): filepath = os.path.join(self.suite_path, "bin", self.tool_name) print "Tool: %s" % self.tool_name print "Path: %s" % filepath print "Suite: %s" % self.suite_path msg = "%s (%r)" % (self.context.load_path, self.context_name) print "Context: %s" % msg variants = self.context.get_tool_variants(self.tool_name) if variants: if len(variants) > 1: self._print_conflicting(variants) else: variant = iter(variants).next() print "Package: %s" % variant.qualified_package_name return 0
#vtb def search(query, team=None): if team is None: team = _find_logged_in_team() if team is not None: session = _get_session(team) response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query)) print("* Packages in team %s" % team) packages = response.json()[] for pkg in packages: print(("%s:" % team) + ("%(owner)s/%(name)s" % pkg)) if len(packages) == 0: print("(No results)") print("* Packages in public cloud") public_session = _get_session(None) response = public_session.get("%s/api/search/" % get_registry_url(None), params=dict(q=query)) packages = response.json()[] for pkg in packages: print("%(owner)s/%(name)s" % pkg) if len(packages) == 0: print("(No results)")
Search for packages
### Input: Search for packages ### Response: #vtb def search(query, team=None): if team is None: team = _find_logged_in_team() if team is not None: session = _get_session(team) response = session.get("%s/api/search/" % get_registry_url(team), params=dict(q=query)) print("* Packages in team %s" % team) packages = response.json()[] for pkg in packages: print(("%s:" % team) + ("%(owner)s/%(name)s" % pkg)) if len(packages) == 0: print("(No results)") print("* Packages in public cloud") public_session = _get_session(None) response = public_session.get("%s/api/search/" % get_registry_url(None), params=dict(q=query)) packages = response.json()[] for pkg in packages: print("%(owner)s/%(name)s" % pkg) if len(packages) == 0: print("(No results)")
#vtb def messages(self): if self._messages is None: self._messages = MessageList(self._version, session_sid=self._solution[], ) return self._messages
Access the messages :returns: twilio.rest.messaging.v1.session.message.MessageList :rtype: twilio.rest.messaging.v1.session.message.MessageList
### Input: Access the messages :returns: twilio.rest.messaging.v1.session.message.MessageList :rtype: twilio.rest.messaging.v1.session.message.MessageList ### Response: #vtb def messages(self): if self._messages is None: self._messages = MessageList(self._version, session_sid=self._solution[], ) return self._messages
#vtb def dumps(number): if not isinstance(number, integer_types): raise TypeError() if number < 0: return + dumps(-number) value = while number != 0: number, index = divmod(number, len(alphabet)) value = alphabet[index] + value return value or
Dumps an integer into a base36 string. :param number: the 10-based integer. :returns: the base36 string.
### Input: Dumps an integer into a base36 string. :param number: the 10-based integer. :returns: the base36 string. ### Response: #vtb def dumps(number): if not isinstance(number, integer_types): raise TypeError() if number < 0: return + dumps(-number) value = while number != 0: number, index = divmod(number, len(alphabet)) value = alphabet[index] + value return value or
#vtb def reset_stats(self): scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners])) for v in self._runners: v.total_scores.clear() try: return np.mean(scores), np.max(scores) except Exception: logger.exception("Cannot compute total scores in EnvRunner.") return None, None
Returns: mean, max: two stats of the runners, to be added to backend
### Input: Returns: mean, max: two stats of the runners, to be added to backend ### Response: #vtb def reset_stats(self): scores = list(itertools.chain.from_iterable([v.total_scores for v in self._runners])) for v in self._runners: v.total_scores.clear() try: return np.mean(scores), np.max(scores) except Exception: logger.exception("Cannot compute total scores in EnvRunner.") return None, None
#vtb async def dump_variant(self, elem, elem_type=None, params=None, obj=None): fvalue = None if isinstance(elem, x.VariantType) or elem_type.WRAPS_VALUE: try: self.tracker.push_variant(elem.variant_elem_type) fvalue = { elem.variant_elem: await self._dump_field(getattr(elem, elem.variant_elem), elem.variant_elem_type, obj=obj) } self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e else: try: fdef = elem_type.find_fdef(elem_type.f_specs(), elem) self.tracker.push_variant(fdef[1]) fvalue = { fdef[0]: await self._dump_field(elem, fdef[1], obj=obj) } self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e return fvalue
Dumps variant type to the writer. Supports both wrapped and raw variant. :param elem: :param elem_type: :param params: :param obj: :return:
### Input: Dumps variant type to the writer. Supports both wrapped and raw variant. :param elem: :param elem_type: :param params: :param obj: :return: ### Response: #vtb async def dump_variant(self, elem, elem_type=None, params=None, obj=None): fvalue = None if isinstance(elem, x.VariantType) or elem_type.WRAPS_VALUE: try: self.tracker.push_variant(elem.variant_elem_type) fvalue = { elem.variant_elem: await self._dump_field(getattr(elem, elem.variant_elem), elem.variant_elem_type, obj=obj) } self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e else: try: fdef = elem_type.find_fdef(elem_type.f_specs(), elem) self.tracker.push_variant(fdef[1]) fvalue = { fdef[0]: await self._dump_field(elem, fdef[1], obj=obj) } self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e return fvalue
#vtb def ParseFileObject(self, parser_mediator, file_object): file_offset = 0 file_size = file_object.get_size() record_map = self._GetDataTypeMap() while file_offset < file_size: try: pls_record, record_data_size = self._ReadStructureFromFileObject( file_object, file_offset, record_map) except (ValueError, errors.ParseError) as exception: if file_offset == 0: raise errors.UnableToParseFile() parser_mediator.ProduceExtractionWarning(( ).format(file_offset, exception)) break if file_offset == 0 and not self._VerifyRecord(pls_record): raise errors.UnableToParseFile() event_data = PlsRecallEventData() event_data.database_name = pls_record.database_name.rstrip() event_data.sequence_number = pls_record.sequence_number event_data.offset = file_offset event_data.query = pls_record.query.rstrip() event_data.username = pls_record.username.rstrip() date_time = dfdatetime_delphi_date_time.DelphiDateTime( timestamp=pls_record.last_written_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) file_offset += record_data_size
Parses a PLSRecall.dat file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
### Input: Parses a PLSRecall.dat file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed. ### Response: #vtb def ParseFileObject(self, parser_mediator, file_object): file_offset = 0 file_size = file_object.get_size() record_map = self._GetDataTypeMap() while file_offset < file_size: try: pls_record, record_data_size = self._ReadStructureFromFileObject( file_object, file_offset, record_map) except (ValueError, errors.ParseError) as exception: if file_offset == 0: raise errors.UnableToParseFile() parser_mediator.ProduceExtractionWarning(( ).format(file_offset, exception)) break if file_offset == 0 and not self._VerifyRecord(pls_record): raise errors.UnableToParseFile() event_data = PlsRecallEventData() event_data.database_name = pls_record.database_name.rstrip() event_data.sequence_number = pls_record.sequence_number event_data.offset = file_offset event_data.query = pls_record.query.rstrip() event_data.username = pls_record.username.rstrip() date_time = dfdatetime_delphi_date_time.DelphiDateTime( timestamp=pls_record.last_written_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) file_offset += record_data_size
#vtb def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def visible(app): return VisibleFilter(app, conf) return visible
Returns a WSGI filter app for use with paste.deploy.
### Input: Returns a WSGI filter app for use with paste.deploy. ### Response: #vtb def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def visible(app): return VisibleFilter(app, conf) return visible
#vtb async def echo_all(app, message): for address in app.kv.get_prefix().values(): host, port = address.decode().split() port = int(port) await tcp_echo_client(message, loop, host, port)
Send and recieve a message from all running echo servers
### Input: Send and recieve a message from all running echo servers ### Response: #vtb async def echo_all(app, message): for address in app.kv.get_prefix().values(): host, port = address.decode().split() port = int(port) await tcp_echo_client(message, loop, host, port)
#vtb def label(self): with self.selenium.context(self.selenium.CONTEXT_CHROME): return self.root.get_attribute("label")
Provide access to the notification label. Returns: str: The notification label
### Input: Provide access to the notification label. Returns: str: The notification label ### Response: #vtb def label(self): with self.selenium.context(self.selenium.CONTEXT_CHROME): return self.root.get_attribute("label")
#vtb def get_setup_version(reponame): from param.version import Version return Version.setup_version(os.path.dirname(__file__),reponame,archive_commit="$Format:%h$")
Use autover to get up to date version.
### Input: Use autover to get up to date version. ### Response: #vtb def get_setup_version(reponame): from param.version import Version return Version.setup_version(os.path.dirname(__file__),reponame,archive_commit="$Format:%h$")
#vtb def gssa(model, maxit=100, tol=1e-8, initial_dr=None, verbose=False, n_sim=10000, deg=3, damp=0.1, seed=42): if deg < 0 or deg > 5: raise ValueError("deg must be in [1, 5]") if damp < 0 or damp > 1: raise ValueError("damp must be in [0, 1]") t1 = time.time() g = model.__original_functions__[] g_gu = model.__original_gufunctions__[] h_gu = model.__original_gufunctions__[] d_gu = model.__original_gufunctions__[] p = model.calibration[] n_s = len(model.symbols["states"]) n_x = len(model.symbols["controls"]) n_z = len(model.symbols["expectations"]) n_eps = len(model.symbols["shocks"]) s0 = model.calibration["states"] x0 = model.calibration["controls"] if initial_dr is None: drp = approximate_controls(model) else: drp = initial_dr distrib = model.get_distribution() nodes, weights = distrib.discretize() np.random.seed(seed) distrib = model.get_distribution() sigma = distrib.sigma epsilon = np.random.multivariate_normal(np.zeros(n_eps), sigma, n_sim) init_sim = simulate(model, drp, horizon=n_sim, return_array=True, forcing_shocks=epsilon) s_sim = init_sim[:, 0, 0:n_s] x_sim = init_sim[:, 0, n_s:n_s + n_x] Phi_sim = complete_polynomial(s_sim.T, deg).T coefs = np.ascontiguousarray(lstsq(Phi_sim, x_sim)[0]) z_sim = np.empty((n_sim, n_z)) S = np.empty_like(s_sim) X = np.empty_like(x_sim) H = np.empty_like(z_sim) new_x = np.empty_like(x_sim) s_sim[0, :] = s0 x_sim[0, :] = x0 Phi_t = np.empty(n_complete(n_s, deg)) @jit(nopython=True) def simulate_states_controls(s, x, Phi_t, coefs): for t in range(1, n_sim): g(s[t - 1, :], x[t - 1, :], epsilon[t, :], p, s[t, :]) _complete_poly_impl_vec(s[t, :], deg, Phi_t) x[t, :] = Phi_t @coefs it = 0 err = 10.0 err_0 = 10 if verbose: headline = headline = headline.format(, , , ) stars = * len(headline) print(stars) print(headline) print(stars) fmt_str = while err > tol and it <= maxit: t_start = time.time() simulate_states_controls(s_sim, x_sim, Phi_t, coefs) z_sim[:, :] = 0.0 for i in range(weights.shape[0]): e = nodes[i, :] g_gu(s_sim, x_sim, e, p, S) _complete_poly_impl(S.T, deg, Phi_sim.T) np.dot(Phi_sim, coefs, out=X) h_gu(S, X, p, H) z_sim += weights[i] * H d_gu(s_sim, z_sim, p, new_x) _complete_poly_impl(s_sim.T, deg, Phi_sim.T) new_coefs = np.ascontiguousarray(lstsq(Phi_sim, new_x)[0]) err = (abs(new_x - x_sim).max()) x_sim[:, :] = new_x coefs = (1 - damp) * new_coefs + damp * coefs if verbose: err_SA = err / err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) it += 1 if it == maxit: warnings.warn(UserWarning("Maximum number of iterations reached")) t2 = time.time() if verbose: print(stars) print(.format(t2 - t1)) print(stars) cp = CompletePolynomial(deg, len(s0)) cp.fit_values(s_sim, x_sim) return cp
Sketch of algorithm: 0. Choose levels for the initial states and the simulation length (n_sim) 1. Obtain an initial decision rule -- here using first order perturbation 2. Draw a sequence of innovations epsilon 3. Iterate on the following steps: - Use the epsilons, initial states, and proposed decision rule to simulate model forward. Will leave us with time series of states and controls - Evaluate expectations using quadrature - Use direct response to get alternative proposal for controls - Regress updated controls on the simulated states to get proposal coefficients. New coefficients are convex combination of previous coefficients and proposal coefficients. Weights controlled by damp, where damp is the weight on the old coefficients. This should be fairly low to increase chances of convergence. - Check difference between the simulated series of controls and the direct response version of controls
### Input: Sketch of algorithm: 0. Choose levels for the initial states and the simulation length (n_sim) 1. Obtain an initial decision rule -- here using first order perturbation 2. Draw a sequence of innovations epsilon 3. Iterate on the following steps: - Use the epsilons, initial states, and proposed decision rule to simulate model forward. Will leave us with time series of states and controls - Evaluate expectations using quadrature - Use direct response to get alternative proposal for controls - Regress updated controls on the simulated states to get proposal coefficients. New coefficients are convex combination of previous coefficients and proposal coefficients. Weights controlled by damp, where damp is the weight on the old coefficients. This should be fairly low to increase chances of convergence. - Check difference between the simulated series of controls and the direct response version of controls ### Response: #vtb def gssa(model, maxit=100, tol=1e-8, initial_dr=None, verbose=False, n_sim=10000, deg=3, damp=0.1, seed=42): if deg < 0 or deg > 5: raise ValueError("deg must be in [1, 5]") if damp < 0 or damp > 1: raise ValueError("damp must be in [0, 1]") t1 = time.time() g = model.__original_functions__[] g_gu = model.__original_gufunctions__[] h_gu = model.__original_gufunctions__[] d_gu = model.__original_gufunctions__[] p = model.calibration[] n_s = len(model.symbols["states"]) n_x = len(model.symbols["controls"]) n_z = len(model.symbols["expectations"]) n_eps = len(model.symbols["shocks"]) s0 = model.calibration["states"] x0 = model.calibration["controls"] if initial_dr is None: drp = approximate_controls(model) else: drp = initial_dr distrib = model.get_distribution() nodes, weights = distrib.discretize() np.random.seed(seed) distrib = model.get_distribution() sigma = distrib.sigma epsilon = np.random.multivariate_normal(np.zeros(n_eps), sigma, n_sim) init_sim = simulate(model, drp, horizon=n_sim, return_array=True, forcing_shocks=epsilon) s_sim = init_sim[:, 0, 0:n_s] x_sim = init_sim[:, 0, n_s:n_s + n_x] Phi_sim = complete_polynomial(s_sim.T, deg).T coefs = np.ascontiguousarray(lstsq(Phi_sim, x_sim)[0]) z_sim = np.empty((n_sim, n_z)) S = np.empty_like(s_sim) X = np.empty_like(x_sim) H = np.empty_like(z_sim) new_x = np.empty_like(x_sim) s_sim[0, :] = s0 x_sim[0, :] = x0 Phi_t = np.empty(n_complete(n_s, deg)) @jit(nopython=True) def simulate_states_controls(s, x, Phi_t, coefs): for t in range(1, n_sim): g(s[t - 1, :], x[t - 1, :], epsilon[t, :], p, s[t, :]) _complete_poly_impl_vec(s[t, :], deg, Phi_t) x[t, :] = Phi_t @coefs it = 0 err = 10.0 err_0 = 10 if verbose: headline = headline = headline.format(, , , ) stars = * len(headline) print(stars) print(headline) print(stars) fmt_str = while err > tol and it <= maxit: t_start = time.time() simulate_states_controls(s_sim, x_sim, Phi_t, coefs) z_sim[:, :] = 0.0 for i in range(weights.shape[0]): e = nodes[i, :] g_gu(s_sim, x_sim, e, p, S) _complete_poly_impl(S.T, deg, Phi_sim.T) np.dot(Phi_sim, coefs, out=X) h_gu(S, X, p, H) z_sim += weights[i] * H d_gu(s_sim, z_sim, p, new_x) _complete_poly_impl(s_sim.T, deg, Phi_sim.T) new_coefs = np.ascontiguousarray(lstsq(Phi_sim, new_x)[0]) err = (abs(new_x - x_sim).max()) x_sim[:, :] = new_x coefs = (1 - damp) * new_coefs + damp * coefs if verbose: err_SA = err / err_0 err_0 = err t_finish = time.time() elapsed = t_finish - t_start if verbose: print(fmt_str.format(it, err, err_SA, elapsed)) it += 1 if it == maxit: warnings.warn(UserWarning("Maximum number of iterations reached")) t2 = time.time() if verbose: print(stars) print(.format(t2 - t1)) print(stars) cp = CompletePolynomial(deg, len(s0)) cp.fit_values(s_sim, x_sim) return cp
#vtb def on_scenario_directory_radio_toggled(self, flag): if flag: self.output_directory.setText(self.source_directory.text()) self.output_directory_chooser.setEnabled(not flag)
Autoconnect slot activated when scenario_directory_radio is checked. :param flag: Flag indicating whether the checkbox was toggled on or off. :type flag: bool
### Input: Autoconnect slot activated when scenario_directory_radio is checked. :param flag: Flag indicating whether the checkbox was toggled on or off. :type flag: bool ### Response: #vtb def on_scenario_directory_radio_toggled(self, flag): if flag: self.output_directory.setText(self.source_directory.text()) self.output_directory_chooser.setEnabled(not flag)
#vtb def __build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T], logger: Logger = None) -> Parser: object_type = get_base_generic_type(object_typ) matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \ self.find_all_matching_parsers(strict=self.is_strict, desired_type=object_type, required_ext=obj_on_filesystem.ext) matching_parsers = matching[0] + matching[1] + matching[2] if len(matching_parsers) == 0: if len(no_ext_match_but_type_match) > 0: raise NoParserFoundForObjectExt.create(obj_on_filesystem, object_type, set([ext_ for ext_set in [p.supported_exts for p in no_ext_match_but_type_match] for ext_ in ext_set])) else: raise NoParserFoundForObjectType.create(obj_on_filesystem, object_type, set([typ_ for typ_set in [p.supported_types for p in no_type_match_but_ext_match] for typ_ in typ_set])) elif len(matching_parsers) == 1: return matching_parsers[0] else: return CascadingParser(list(reversed(matching_parsers)))
Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type. To do that, it iterates through all registered parsers in the list in reverse order (last inserted first), and checks if they support the provided object format (single or multifile) and type. If several parsers match, it returns a cascadingparser that will try them in order. :param obj_on_filesystem: :param object_typ: :param logger: :return:
### Input: Builds from the registry, a parser to parse object obj_on_filesystem as an object of type object_type. To do that, it iterates through all registered parsers in the list in reverse order (last inserted first), and checks if they support the provided object format (single or multifile) and type. If several parsers match, it returns a cascadingparser that will try them in order. :param obj_on_filesystem: :param object_typ: :param logger: :return: ### Response: #vtb def __build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_typ: Type[T], logger: Logger = None) -> Parser: object_type = get_base_generic_type(object_typ) matching, no_type_match_but_ext_match, no_ext_match_but_type_match, no_match = \ self.find_all_matching_parsers(strict=self.is_strict, desired_type=object_type, required_ext=obj_on_filesystem.ext) matching_parsers = matching[0] + matching[1] + matching[2] if len(matching_parsers) == 0: if len(no_ext_match_but_type_match) > 0: raise NoParserFoundForObjectExt.create(obj_on_filesystem, object_type, set([ext_ for ext_set in [p.supported_exts for p in no_ext_match_but_type_match] for ext_ in ext_set])) else: raise NoParserFoundForObjectType.create(obj_on_filesystem, object_type, set([typ_ for typ_set in [p.supported_types for p in no_type_match_but_ext_match] for typ_ in typ_set])) elif len(matching_parsers) == 1: return matching_parsers[0] else: return CascadingParser(list(reversed(matching_parsers)))
#vtb def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0): if ef is None: ef = np.ones_like(self.e0)*0.0000001 beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2) e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points) for i in range(len(self.e0))]) integrand = self._find_integrand(e_vals) integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:]) for i in range(e_vals.shape[1])]).T if a0 is None and f0 is None: a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1]) elif a0 is None: a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.) c0 = self._c0_func(a0, self.e0) a_vals = c0[:, np.newaxis]*self._f_e(e_vals) delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral return e_vals, a_vals, delta_t
Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time.
### Input: Rearranged versions of Peters equations This function calculates the semi-major axis and eccentricity over time. ### Response: #vtb def _t_of_e(self, a0=None, t_start=None, f0=None, ef=None, t_obs=5.0): if ef is None: ef = np.ones_like(self.e0)*0.0000001 beta = 64.0/5.0*self.m1*self.m2*(self.m1+self.m2) e_vals = np.asarray([np.linspace(ef[i], self.e0[i], self.num_points) for i in range(len(self.e0))]) integrand = self._find_integrand(e_vals) integral = np.asarray([np.trapz(integrand[:, i:], x=e_vals[:, i:]) for i in range(e_vals.shape[1])]).T if a0 is None and f0 is None: a0 = (19./12.*t_start*beta*1/integral[:, 0])**(1./4.) * self._f_e(e_vals[:, -1]) elif a0 is None: a0 = ((self.m1 + self.m2)/self.f0**2)**(1./3.) c0 = self._c0_func(a0, self.e0) a_vals = c0[:, np.newaxis]*self._f_e(e_vals) delta_t = 12./19*c0[:, np.newaxis]**4/beta[:, np.newaxis]*integral return e_vals, a_vals, delta_t
#vtb def set_outflow_BC(self, pores, mode=): r mode = self._parse_mode(mode, allowed=[, , ], single=True) pores = self._parse_indices(pores) network = self.project.network phase = self.project.phases()[self.settings[]] throats = network.find_neighbor_throats(pores=pores) C12 = network[][throats] P12 = phase[self.settings[]][C12] gh = phase[self.settings[]][throats] Q12 = -gh * np.diff(P12, axis=1).squeeze() Qp = np.zeros(self.Np) np.add.at(Qp, C12[:, 0], -Q12) np.add.at(Qp, C12[:, 1], Q12) if ( not in self.keys()) or (mode == ): self[] = np.nan self[][pores] = Qp[pores]
r""" Adds outflow boundary condition to the selected pores. Outflow condition simply means that the gradient of the solved quantity does not change, i.e. is 0.
### Input: r""" Adds outflow boundary condition to the selected pores. Outflow condition simply means that the gradient of the solved quantity does not change, i.e. is 0. ### Response: #vtb def set_outflow_BC(self, pores, mode=): r mode = self._parse_mode(mode, allowed=[, , ], single=True) pores = self._parse_indices(pores) network = self.project.network phase = self.project.phases()[self.settings[]] throats = network.find_neighbor_throats(pores=pores) C12 = network[][throats] P12 = phase[self.settings[]][C12] gh = phase[self.settings[]][throats] Q12 = -gh * np.diff(P12, axis=1).squeeze() Qp = np.zeros(self.Np) np.add.at(Qp, C12[:, 0], -Q12) np.add.at(Qp, C12[:, 1], Q12) if ( not in self.keys()) or (mode == ): self[] = np.nan self[][pores] = Qp[pores]
#vtb def dims(x): if isinstance(x, tf.TensorShape): return x.dims r = tf.TensorShape(x).dims return None if r is None else list(map(tf.compat.dimension_value, r))
Returns a list of dimension sizes, or `None` if `rank` is unknown. For more details, see `help(tf.TensorShape.dims)`. Args: x: object representing a shape; convertible to `tf.TensorShape`. Returns: shape_as_list: list of sizes or `None` values representing each dimensions size if known. A size is `tf.Dimension` if input is a `tf.TensorShape` and an `int` otherwise.
### Input: Returns a list of dimension sizes, or `None` if `rank` is unknown. For more details, see `help(tf.TensorShape.dims)`. Args: x: object representing a shape; convertible to `tf.TensorShape`. Returns: shape_as_list: list of sizes or `None` values representing each dimensions size if known. A size is `tf.Dimension` if input is a `tf.TensorShape` and an `int` otherwise. ### Response: #vtb def dims(x): if isinstance(x, tf.TensorShape): return x.dims r = tf.TensorShape(x).dims return None if r is None else list(map(tf.compat.dimension_value, r))
#vtb def msgmerge(self, locale_file, po_string): cmd = "msgmerge -q %s -" % locale_file p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (msg, err) = p.communicate(input=po_string) if err: logging.warning("%s \nfile: %s\npostring: %s" % (err, locale_file, po_string)) return msg
Runs msgmerge on a locale_file and po_string
### Input: Runs msgmerge on a locale_file and po_string ### Response: #vtb def msgmerge(self, locale_file, po_string): cmd = "msgmerge -q %s -" % locale_file p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (msg, err) = p.communicate(input=po_string) if err: logging.warning("%s \nfile: %s\npostring: %s" % (err, locale_file, po_string)) return msg
#vtb def build_board_checkers(): grd = Grid(8,8, ["B","W"]) for c in range(4): grd.set_tile(0,(c*2) - 1, "B") grd.set_tile(1,(c*2) - 0, "B") grd.set_tile(6,(c*2) + 1, "W") grd.set_tile(7,(c*2) - 0, "W") print(grd) return grd
builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0
### Input: builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0 ### Response: #vtb def build_board_checkers(): grd = Grid(8,8, ["B","W"]) for c in range(4): grd.set_tile(0,(c*2) - 1, "B") grd.set_tile(1,(c*2) - 0, "B") grd.set_tile(6,(c*2) + 1, "W") grd.set_tile(7,(c*2) - 0, "W") print(grd) return grd
#vtb def merge_with(self, other): other = as_shape(other) if self._dims is None: return other else: try: self.assert_same_rank(other) new_dims = [] for i, dim in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not convertible" % (self, other))
Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible.
### Input: Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible. ### Response: #vtb def merge_with(self, other): other = as_shape(other) if self._dims is None: return other else: try: self.assert_same_rank(other) new_dims = [] for i, dim in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not convertible" % (self, other))
#vtb def _post(url, headers={}, data=None, files=None): try: response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL) return _process_response(response) except requests.exceptions.RequestException as e: _log_and_raise_exception(, e)
Tries to POST data to an endpoint
### Input: Tries to POST data to an endpoint ### Response: #vtb def _post(url, headers={}, data=None, files=None): try: response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL) return _process_response(response) except requests.exceptions.RequestException as e: _log_and_raise_exception(, e)
#vtb def is_entailed_by(self, other): for (s_key, s_val) in self: if s_key in other: if not hasattr(other[s_key], ): raise Exception("Cell for %s is missing implies()" % s_key) if not other[s_key].implies(s_val): return False else: return False return True
Given two beliefstates, returns True iff the calling instance implies the other beliefstate, meaning it contains at least the same structure (for all structures) and all values (for all defined values). Inverse of `entails`. Note: this only compares the items in the DictCell, not `pos`, `environment_variables` or `deferred_effects`.
### Input: Given two beliefstates, returns True iff the calling instance implies the other beliefstate, meaning it contains at least the same structure (for all structures) and all values (for all defined values). Inverse of `entails`. Note: this only compares the items in the DictCell, not `pos`, `environment_variables` or `deferred_effects`. ### Response: #vtb def is_entailed_by(self, other): for (s_key, s_val) in self: if s_key in other: if not hasattr(other[s_key], ): raise Exception("Cell for %s is missing implies()" % s_key) if not other[s_key].implies(s_val): return False else: return False return True
#vtb def request(self, action, data={}, headers={}, method=): headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "X-Version": "1", "Accept": "application/json" } return Transport.request(self, action, data, headers, method)
Append the REST headers to every request
### Input: Append the REST headers to every request ### Response: #vtb def request(self, action, data={}, headers={}, method=): headers = { "Authorization": "Bearer " + self.token, "Content-Type": "application/json", "X-Version": "1", "Accept": "application/json" } return Transport.request(self, action, data, headers, method)
#vtb def process_inlines(parser, token): args = token.split_contents() if not len(args) in (2, 4, 6): raise template.TemplateSyntaxError("%r tag requires either 1, 3 or 5 arguments." % args[0]) var_name = args[1] ALLOWED_ARGS = [, ] kwargs = { : None } if len(args) > 2: tuples = zip(*[args[2:][i::2] for i in range(2)]) for k,v in tuples: if not k in ALLOWED_ARGS: raise template.TemplateSyntaxError("%r tag options arguments must be one of %s." % (args[0], .join(ALLOWED_ARGS))) if k == : kwargs[] = v if k == : kwargs[] = v return InlinesNode(var_name, **kwargs)
Searches through the provided content and applies inlines where ever they are found. Syntax:: {% process_inlines entry.body [in template_dir] [as varname] } Examples:: {% process_inlines entry.body %} {% process_inlines entry.body as body %} {% process_inlines entry.body in 'inlines/sidebar' %} {% process_inlines entry.body in 'inlines/sidebar' as body %}
### Input: Searches through the provided content and applies inlines where ever they are found. Syntax:: {% process_inlines entry.body [in template_dir] [as varname] } Examples:: {% process_inlines entry.body %} {% process_inlines entry.body as body %} {% process_inlines entry.body in 'inlines/sidebar' %} {% process_inlines entry.body in 'inlines/sidebar' as body %} ### Response: #vtb def process_inlines(parser, token): args = token.split_contents() if not len(args) in (2, 4, 6): raise template.TemplateSyntaxError("%r tag requires either 1, 3 or 5 arguments." % args[0]) var_name = args[1] ALLOWED_ARGS = [, ] kwargs = { : None } if len(args) > 2: tuples = zip(*[args[2:][i::2] for i in range(2)]) for k,v in tuples: if not k in ALLOWED_ARGS: raise template.TemplateSyntaxError("%r tag options arguments must be one of %s." % (args[0], .join(ALLOWED_ARGS))) if k == : kwargs[] = v if k == : kwargs[] = v return InlinesNode(var_name, **kwargs)
#vtb def amount(self): return sum(self.get_compound_amount(c) for c in self.material.compounds)
Determine the sum of mole amounts of all the compounds. :returns: Amount. [kmol]
### Input: Determine the sum of mole amounts of all the compounds. :returns: Amount. [kmol] ### Response: #vtb def amount(self): return sum(self.get_compound_amount(c) for c in self.material.compounds)
#vtb def overlay_gateway_sflow_sflow_vlan_action(self, **kwargs): config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop() sflow = ET.SubElement(overlay_gateway, "sflow") sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name") sflow_profile_name_key.text = kwargs.pop() sflow_vlan_action = ET.SubElement(sflow, "sflow-vlan-action") sflow_vlan_action.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
### Input: Auto Generated Code ### Response: #vtb def overlay_gateway_sflow_sflow_vlan_action(self, **kwargs): config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop() sflow = ET.SubElement(overlay_gateway, "sflow") sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name") sflow_profile_name_key.text = kwargs.pop() sflow_vlan_action = ET.SubElement(sflow, "sflow-vlan-action") sflow_vlan_action.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
#vtb def main(): import colorama import argparse import logging import sys import os parser = argparse.ArgumentParser(prog="gulpless", description="Simple build system.") parser.add_argument("-v", "--version", action="version", version="%(prog)s 0.7.6") parser.add_argument("-d", "--directory", action="store", default=os.getcwd(), help="Look for `build.py` in this folder (defaults to " "the current directory)") parser.add_argument("mode", action="store", choices=["build", "interactive"], default="interactive", metavar="mode", nargs="?", help="If `interactive` (the default), will wait for " "filesystem events and attempt to keep the input " "and output folders in sync. If `build`, it will " "attempt to build all updated files, then exit.") args = parser.parse_args() os.chdir(args.directory) sys.path.append(os.getcwd()) if os.environ.get("TERM") == "cygwin": del os.environ["TERM"] colorama.init() os.environ["TERM"] = "cygwin" else: colorama.init() try: old, sys.dont_write_bytecode = sys.dont_write_bytecode, True import build except ImportError: sys.exit("No `build.py` found in current folder.") finally: sys.dont_write_bytecode = old try: logging.basicConfig(level=build.LOGGING, format="%(message)s") except AttributeError: logging.basicConfig(level=logging.INFO, format="%(message)s") reactor = Reactor(build.SRC, build.DEST) for handler in build.HANDLERS: reactor.add_handler(handler) reactor.run(args.mode == "build")
Entry point for command line usage.
### Input: Entry point for command line usage. ### Response: #vtb def main(): import colorama import argparse import logging import sys import os parser = argparse.ArgumentParser(prog="gulpless", description="Simple build system.") parser.add_argument("-v", "--version", action="version", version="%(prog)s 0.7.6") parser.add_argument("-d", "--directory", action="store", default=os.getcwd(), help="Look for `build.py` in this folder (defaults to " "the current directory)") parser.add_argument("mode", action="store", choices=["build", "interactive"], default="interactive", metavar="mode", nargs="?", help="If `interactive` (the default), will wait for " "filesystem events and attempt to keep the input " "and output folders in sync. If `build`, it will " "attempt to build all updated files, then exit.") args = parser.parse_args() os.chdir(args.directory) sys.path.append(os.getcwd()) if os.environ.get("TERM") == "cygwin": del os.environ["TERM"] colorama.init() os.environ["TERM"] = "cygwin" else: colorama.init() try: old, sys.dont_write_bytecode = sys.dont_write_bytecode, True import build except ImportError: sys.exit("No `build.py` found in current folder.") finally: sys.dont_write_bytecode = old try: logging.basicConfig(level=build.LOGGING, format="%(message)s") except AttributeError: logging.basicConfig(level=logging.INFO, format="%(message)s") reactor = Reactor(build.SRC, build.DEST) for handler in build.HANDLERS: reactor.add_handler(handler) reactor.run(args.mode == "build")
#vtb def members(name, members_list, root=None): *user1,user2,user3,...foo cmd = .format(members_list, name) retcode = __salt__[](cmd, python_shell=False) return not retcode
Replaces members of the group with a provided list. CLI Example: salt '*' group.members foo 'user1,user2,user3,...' Replaces a membership list for a local group 'foo'. foo:x:1234:user1,user2,user3,...
### Input: Replaces members of the group with a provided list. CLI Example: salt '*' group.members foo 'user1,user2,user3,...' Replaces a membership list for a local group 'foo'. foo:x:1234:user1,user2,user3,... ### Response: #vtb def members(name, members_list, root=None): *user1,user2,user3,...foo cmd = .format(members_list, name) retcode = __salt__[](cmd, python_shell=False) return not retcode
#vtb def to_dict(self, save_data=True): input_dict = super(SparseGP, self).to_dict(save_data) input_dict["class"] = "GPy.core.SparseGP" input_dict["Z"] = self.Z.tolist() return input_dict
Convert the object into a json serializable dictionary. :param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary :return dict: json serializable dictionary containing the needed information to instantiate the object
### Input: Convert the object into a json serializable dictionary. :param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary :return dict: json serializable dictionary containing the needed information to instantiate the object ### Response: #vtb def to_dict(self, save_data=True): input_dict = super(SparseGP, self).to_dict(save_data) input_dict["class"] = "GPy.core.SparseGP" input_dict["Z"] = self.Z.tolist() return input_dict
#vtb def acl_show(self, msg, args): name = args[0] if len(args) > 0 else None if name is None: return "%s: The following ACLs are defined: %s" % (msg.user, .join(self._acl.keys())) if name not in self._acl: return "Sorry, couldn%s\n%s, allow, deny']) ])
Show current allow and deny blocks for the given acl.
### Input: Show current allow and deny blocks for the given acl. ### Response: #vtb def acl_show(self, msg, args): name = args[0] if len(args) > 0 else None if name is None: return "%s: The following ACLs are defined: %s" % (msg.user, .join(self._acl.keys())) if name not in self._acl: return "Sorry, couldn%s\n%s, allow, deny']) ])
#vtb def extract_ipv4(roster_order, ipv4): for ip_type in roster_order: for ip_ in ipv4: if in ip_: continue if not salt.utils.validate.net.ipv4_addr(ip_): continue if ip_type == and ip_.startswith(): return ip_ elif ip_type == and not salt.utils.cloud.is_public_ip(ip_): return ip_ elif ip_type == and salt.utils.cloud.is_public_ip(ip_): return ip_ return None
Extract the preferred IP address from the ipv4 grain
### Input: Extract the preferred IP address from the ipv4 grain ### Response: #vtb def extract_ipv4(roster_order, ipv4): for ip_type in roster_order: for ip_ in ipv4: if in ip_: continue if not salt.utils.validate.net.ipv4_addr(ip_): continue if ip_type == and ip_.startswith(): return ip_ elif ip_type == and not salt.utils.cloud.is_public_ip(ip_): return ip_ elif ip_type == and salt.utils.cloud.is_public_ip(ip_): return ip_ return None
#vtb def assign_tip_labels_and_colors(self): "assign tip labels based on user provided kwargs" if self.style.tip_labels_colors: if self.ttree._fixed_order: if isinstance(self.style.tip_labels_colors, (list, np.ndarray)): cols = np.array(self.style.tip_labels_colors) orde = cols[self.ttree._fixed_idx] self.style.tip_labels_colors = list(orde) if self.style.tip_labels is False: self.style.tip_labels_style["-toyplot-anchor-shift"] = "0px" self.tip_labels = ["" for i in self.ttree.get_tip_labels()] else: if not self.style.tip_labels_style["-toyplot-anchor-shift"]: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" if isinstance(self.style.tip_labels, list): self.tip_labels = self.style.tip_labels else: if self.ttree._fixed_order: self.tip_labels = self.ttree._fixed_order else: self.tip_labels = self.ttree.get_tip_labels()
assign tip labels based on user provided kwargs
### Input: assign tip labels based on user provided kwargs ### Response: #vtb def assign_tip_labels_and_colors(self): "assign tip labels based on user provided kwargs" if self.style.tip_labels_colors: if self.ttree._fixed_order: if isinstance(self.style.tip_labels_colors, (list, np.ndarray)): cols = np.array(self.style.tip_labels_colors) orde = cols[self.ttree._fixed_idx] self.style.tip_labels_colors = list(orde) if self.style.tip_labels is False: self.style.tip_labels_style["-toyplot-anchor-shift"] = "0px" self.tip_labels = ["" for i in self.ttree.get_tip_labels()] else: if not self.style.tip_labels_style["-toyplot-anchor-shift"]: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" if isinstance(self.style.tip_labels, list): self.tip_labels = self.style.tip_labels else: if self.ttree._fixed_order: self.tip_labels = self.ttree._fixed_order else: self.tip_labels = self.ttree.get_tip_labels()
#vtb def setup_handlers(): __grains__ = salt.loader.grains(__opts__) __salt__ = salt.loader.minion_mods(__opts__) if not in __opts__: log.debug(sentry_handler\) return False options = {} dsn = get_config_value() if dsn is not None: try: from raven.transport import TransportRegistry, default_transports from raven.utils.urlparse import urlparse transport_registry = TransportRegistry(default_transports) url = urlparse(dsn) if not transport_registry.supported_scheme(url.scheme): raise ValueError(.format(url.scheme)) except ValueError as exc: log.info( , exc ) if not dsn: for key in (, , , ): config_value = get_config_value(key) if config_value is None and key not in options: log.debug( sentry_handler\ %s\ , key ) return elif config_value is None: continue options[key] = config_value options.update({ : get_config_value(), : get_config_value(), : get_config_value(, ()), : get_config_value(, ()), : get_config_value(), : get_config_value(), : get_config_value(), : get_config_value(, 1), : get_config_value(), : dsn }) client = raven.Client(**options) context = get_config_value() context_dict = {} if context is not None: for tag in context: try: tag_value = __grains__[tag] except KeyError: log.debug(%s\, tag) continue if tag_value: context_dict[tag] = tag_value if context_dict: client.context.merge({: context_dict}) try: handler = SentryHandler(client) exclude_patterns = get_config_value(, None) if exclude_patterns: filter_regexes = [re.compile(pattern) for pattern in exclude_patterns] class FilterExcludedMessages(object): @staticmethod def filter(record): m = record.getMessage() return not any(regex.search(m) for regex in filter_regexes) handler.addFilter(FilterExcludedMessages()) handler.setLevel(LOG_LEVELS[get_config_value(, )]) return handler except ValueError as exc: log.debug(, exc_info=True)
sets up the sentry handler
### Input: sets up the sentry handler ### Response: #vtb def setup_handlers(): __grains__ = salt.loader.grains(__opts__) __salt__ = salt.loader.minion_mods(__opts__) if not in __opts__: log.debug(sentry_handler\) return False options = {} dsn = get_config_value() if dsn is not None: try: from raven.transport import TransportRegistry, default_transports from raven.utils.urlparse import urlparse transport_registry = TransportRegistry(default_transports) url = urlparse(dsn) if not transport_registry.supported_scheme(url.scheme): raise ValueError(.format(url.scheme)) except ValueError as exc: log.info( , exc ) if not dsn: for key in (, , , ): config_value = get_config_value(key) if config_value is None and key not in options: log.debug( sentry_handler\ %s\ , key ) return elif config_value is None: continue options[key] = config_value options.update({ : get_config_value(), : get_config_value(), : get_config_value(, ()), : get_config_value(, ()), : get_config_value(), : get_config_value(), : get_config_value(), : get_config_value(, 1), : get_config_value(), : dsn }) client = raven.Client(**options) context = get_config_value() context_dict = {} if context is not None: for tag in context: try: tag_value = __grains__[tag] except KeyError: log.debug(%s\, tag) continue if tag_value: context_dict[tag] = tag_value if context_dict: client.context.merge({: context_dict}) try: handler = SentryHandler(client) exclude_patterns = get_config_value(, None) if exclude_patterns: filter_regexes = [re.compile(pattern) for pattern in exclude_patterns] class FilterExcludedMessages(object): @staticmethod def filter(record): m = record.getMessage() return not any(regex.search(m) for regex in filter_regexes) handler.addFilter(FilterExcludedMessages()) handler.setLevel(LOG_LEVELS[get_config_value(, )]) return handler except ValueError as exc: log.debug(, exc_info=True)
#vtb def delete_contacts( self, ids: List[int] ): contacts = [] for i in ids: try: input_user = self.resolve_peer(i) except PeerIdInvalid: continue else: if isinstance(input_user, types.InputPeerUser): contacts.append(input_user) return self.send( functions.contacts.DeleteContacts( id=contacts ) )
Use this method to delete contacts from your Telegram address book. Args: ids (List of ``int``): A list of unique identifiers for the target users. Can be an ID (int), a username (string) or phone number (string). Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
### Input: Use this method to delete contacts from your Telegram address book. Args: ids (List of ``int``): A list of unique identifiers for the target users. Can be an ID (int), a username (string) or phone number (string). Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ### Response: #vtb def delete_contacts( self, ids: List[int] ): contacts = [] for i in ids: try: input_user = self.resolve_peer(i) except PeerIdInvalid: continue else: if isinstance(input_user, types.InputPeerUser): contacts.append(input_user) return self.send( functions.contacts.DeleteContacts( id=contacts ) )
#vtb def detached_signature_for(plaintext_str, keys): ctx = gpg.core.Context(armor=True) ctx.signers = keys (sigblob, sign_result) = ctx.sign(plaintext_str, mode=gpg.constants.SIG_MODE_DETACH) return sign_result.signatures, sigblob
Signs the given plaintext string and returns the detached signature. A detached signature in GPG speak is a separate blob of data containing a signature for the specified plaintext. :param bytes plaintext_str: bytestring to sign :param keys: list of one or more key to sign with. :type keys: list[gpg.gpgme._gpgme_key] :returns: A list of signature and the signed blob of data :rtype: tuple[list[gpg.results.NewSignature], str]
### Input: Signs the given plaintext string and returns the detached signature. A detached signature in GPG speak is a separate blob of data containing a signature for the specified plaintext. :param bytes plaintext_str: bytestring to sign :param keys: list of one or more key to sign with. :type keys: list[gpg.gpgme._gpgme_key] :returns: A list of signature and the signed blob of data :rtype: tuple[list[gpg.results.NewSignature], str] ### Response: #vtb def detached_signature_for(plaintext_str, keys): ctx = gpg.core.Context(armor=True) ctx.signers = keys (sigblob, sign_result) = ctx.sign(plaintext_str, mode=gpg.constants.SIG_MODE_DETACH) return sign_result.signatures, sigblob
#vtb def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: if isinstance(expression, Operation): if hasattr(expression, ): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name ) operands = [rename_variables(o, renaming) for o in op_iter(expression)] return create_operation_expression(expression, operands) elif isinstance(expression, Expression): expression = expression.__copy__() expression.variable_name = renaming.get(expression.variable_name, expression.variable_name) return expression
Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables.
### Input: Rename the variables in the expression according to the given dictionary. Args: expression: The expression in which the variables are renamed. renaming: The renaming dictionary. Maps old variable names to new ones. Variable names not occuring in the dictionary are left unchanged. Returns: The expression with renamed variables. ### Response: #vtb def rename_variables(expression: Expression, renaming: Dict[str, str]) -> Expression: if isinstance(expression, Operation): if hasattr(expression, ): variable_name = renaming.get(expression.variable_name, expression.variable_name) return create_operation_expression( expression, [rename_variables(o, renaming) for o in op_iter(expression)], variable_name=variable_name ) operands = [rename_variables(o, renaming) for o in op_iter(expression)] return create_operation_expression(expression, operands) elif isinstance(expression, Expression): expression = expression.__copy__() expression.variable_name = renaming.get(expression.variable_name, expression.variable_name) return expression
#vtb def unwrap(self): if self.algorithm == : return self[].parsed key_type = self.algorithm.upper() a_an = if key_type == else raise ValueError(unwrap( , a_an, key_type ))
Unwraps an RSA public key into an RSAPublicKey object. Does not support DSA or EC public keys since they do not have an unwrapped form. :return: An RSAPublicKey object
### Input: Unwraps an RSA public key into an RSAPublicKey object. Does not support DSA or EC public keys since they do not have an unwrapped form. :return: An RSAPublicKey object ### Response: #vtb def unwrap(self): if self.algorithm == : return self[].parsed key_type = self.algorithm.upper() a_an = if key_type == else raise ValueError(unwrap( , a_an, key_type ))
#vtb def index_all(self): self.logger.debug(, self.record_path) with self.db.connection(): for json_path in sorted(self.find_record_files()): self.index_record(json_path)
Index all records under :attr:`record_path`.
### Input: Index all records under :attr:`record_path`. ### Response: #vtb def index_all(self): self.logger.debug(, self.record_path) with self.db.connection(): for json_path in sorted(self.find_record_files()): self.index_record(json_path)
#vtb def decorate_class_method(func, classkey=None, skipmain=False): global __CLASSTYPE_ATTRIBUTES__ assert classkey is not None, __CLASSTYPE_ATTRIBUTES__[classkey].append(func) return func
Will inject all decorated function as methods of classkey classkey is some identifying string, tuple, or object func can also be a tuple
### Input: Will inject all decorated function as methods of classkey classkey is some identifying string, tuple, or object func can also be a tuple ### Response: #vtb def decorate_class_method(func, classkey=None, skipmain=False): global __CLASSTYPE_ATTRIBUTES__ assert classkey is not None, __CLASSTYPE_ATTRIBUTES__[classkey].append(func) return func
#vtb def cli(): ch = logging.StreamHandler() ch.setFormatter(logging.Formatter( , datefmt="%Y-%m-%d %H:%M:%S" )) logger.addHandler(ch) import argparse parser = argparse.ArgumentParser(description="Search for hosts with a \ response to that matches ") parser.add_argument(, help=) parser.add_argument(, , help=, default=) parser.add_argument(, , help=, dest=, default=) parser.add_argument(, , help=, action=) args = parser.parse_args() print() result = survey(**vars(args)) print(.format(len(result), if len(result)!=1 else , if args.pattern else , args.pattern, args.network)) for x in result: print(x.hostname)
Command line interface
### Input: Command line interface ### Response: #vtb def cli(): ch = logging.StreamHandler() ch.setFormatter(logging.Formatter( , datefmt="%Y-%m-%d %H:%M:%S" )) logger.addHandler(ch) import argparse parser = argparse.ArgumentParser(description="Search for hosts with a \ response to that matches ") parser.add_argument(, help=) parser.add_argument(, , help=, default=) parser.add_argument(, , help=, dest=, default=) parser.add_argument(, , help=, action=) args = parser.parse_args() print() result = survey(**vars(args)) print(.format(len(result), if len(result)!=1 else , if args.pattern else , args.pattern, args.network)) for x in result: print(x.hostname)
#vtb def p_element_list(self, p): if len(p) == 3: p[0] = p[1] + [p[2]] else: p[1].extend(p[3]) p[1].append(p[4]) p[0] = p[1]
element_list : elision_opt assignment_expr | element_list COMMA elision_opt assignment_expr
### Input: element_list : elision_opt assignment_expr | element_list COMMA elision_opt assignment_expr ### Response: #vtb def p_element_list(self, p): if len(p) == 3: p[0] = p[1] + [p[2]] else: p[1].extend(p[3]) p[1].append(p[4]) p[0] = p[1]
#vtb def pagure_specific_project_tag_filter(config, message, tags=None, *args, **kw): if not pagure_catchall(config, message): return False tags = tags.split() if tags else [] tags = [tag.strip() for tag in tags if tag and tag.strip()] project_tags = set() project_tags.update(message.get(, {}).get(, [])) project_tags.update( message.get(, {}).get(, {}).get(, [])) project_tags.update( message.get(, {}).get(, {}).get(, [])) valid = len(project_tags.intersection(set(tags))) > 0 return valid
Particular pagure project tags Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects having the specified tags. Specify multiple tags by separating them with a comma ','.
### Input: Particular pagure project tags Adding this rule allows you to get notifications for one or more `pagure.io <https://pagure.io>`_ projects having the specified tags. Specify multiple tags by separating them with a comma ','. ### Response: #vtb def pagure_specific_project_tag_filter(config, message, tags=None, *args, **kw): if not pagure_catchall(config, message): return False tags = tags.split() if tags else [] tags = [tag.strip() for tag in tags if tag and tag.strip()] project_tags = set() project_tags.update(message.get(, {}).get(, [])) project_tags.update( message.get(, {}).get(, {}).get(, [])) project_tags.update( message.get(, {}).get(, {}).get(, [])) valid = len(project_tags.intersection(set(tags))) > 0 return valid
#vtb def createEncoder(): consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption", clipInput=True) time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay") encoder = MultiEncoder() encoder.addEncoder("consumption", consumption_encoder) encoder.addEncoder("timestamp", time_encoder) return encoder
Create the encoder instance for our test and return it.
### Input: Create the encoder instance for our test and return it. ### Response: #vtb def createEncoder(): consumption_encoder = ScalarEncoder(21, 0.0, 100.0, n=50, name="consumption", clipInput=True) time_encoder = DateEncoder(timeOfDay=(21, 9.5), name="timestamp_timeOfDay") encoder = MultiEncoder() encoder.addEncoder("consumption", consumption_encoder) encoder.addEncoder("timestamp", time_encoder) return encoder
#vtb def Create(path, password, generate_default_key=True): wallet = UserWallet(path=path, passwordKey=password, create=True) if generate_default_key: wallet.CreateKey() return wallet
Create a new user wallet. Args: path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet". password (str): a 10 characters minimum password to secure the wallet with. Returns: UserWallet: a UserWallet instance.
### Input: Create a new user wallet. Args: path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet". password (str): a 10 characters minimum password to secure the wallet with. Returns: UserWallet: a UserWallet instance. ### Response: #vtb def Create(path, password, generate_default_key=True): wallet = UserWallet(path=path, passwordKey=password, create=True) if generate_default_key: wallet.CreateKey() return wallet
#vtb def to_dict(self): input_dict = super(Add, self)._save_to_input_dict() input_dict["class"] = str("GPy.kern.Add") return input_dict
Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object
### Input: Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :return dict: json serializable dictionary containing the needed information to instantiate the object ### Response: #vtb def to_dict(self): input_dict = super(Add, self)._save_to_input_dict() input_dict["class"] = str("GPy.kern.Add") return input_dict
#vtb def prune_old_authorization_codes(): from .compat import now from .models import AuthorizationCode AuthorizationCode.objects.with_expiration_before(now()).delete()
Removes all unused and expired authorization codes from the database.
### Input: Removes all unused and expired authorization codes from the database. ### Response: #vtb def prune_old_authorization_codes(): from .compat import now from .models import AuthorizationCode AuthorizationCode.objects.with_expiration_before(now()).delete()
#vtb async def storm(self, text, opts=None): async for mesg in self.cell.streamstorm(text, opts, user=self.user): yield mesg
Evaluate a storm query and yield result messages. Yields: ((str,dict)): Storm messages.
### Input: Evaluate a storm query and yield result messages. Yields: ((str,dict)): Storm messages. ### Response: #vtb async def storm(self, text, opts=None): async for mesg in self.cell.streamstorm(text, opts, user=self.user): yield mesg
#vtb def get_token_issuer(token): try: unverified = decode_token(token) if not in unverified: raise TokenIssuerError return unverified.get() except jwt.DecodeError: raise TokenDecodeError
Issuer of a token is the identifier used to recover the secret Need to extract this from token to ensure we can proceed to the signature validation stage Does not check validity of the token :param token: signed JWT token :return issuer: iss field of the JWT token :raises TokenIssuerError: if iss field not present :raises TokenDecodeError: if token does not conform to JWT spec
### Input: Issuer of a token is the identifier used to recover the secret Need to extract this from token to ensure we can proceed to the signature validation stage Does not check validity of the token :param token: signed JWT token :return issuer: iss field of the JWT token :raises TokenIssuerError: if iss field not present :raises TokenDecodeError: if token does not conform to JWT spec ### Response: #vtb def get_token_issuer(token): try: unverified = decode_token(token) if not in unverified: raise TokenIssuerError return unverified.get() except jwt.DecodeError: raise TokenDecodeError
#vtb def get_verb_phrases(sentence_doc): pattern = r verb_phrases = textacy.extract.pos_regex_matches(sentence_doc, pattern) result = [] for vp in verb_phrases: word_numbers = [] first_word = vp.start x = first_word if len(vp) > 1: for verb_or_adverb in vp: if not verb_or_adverb.pos_ == : word_numbers.append(x) x += 1 else: word_numbers.append(first_word) if ( (word_numbers[0] - 1) < 0) or (sentence_doc[word_numbers[0] - 1].text.lower() != ): result.append(word_numbers) return result
Returns an object like, [(1), (5,6,7)] where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7. - Adverbs are not included. - Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included
### Input: Returns an object like, [(1), (5,6,7)] where this means 2 verb phrases. a single verb at index 1, another verb phrase 5,6,7. - Adverbs are not included. - Infinitive phrases (and verb phrases that are subsets of infinitive phrases) are not included ### Response: #vtb def get_verb_phrases(sentence_doc): pattern = r verb_phrases = textacy.extract.pos_regex_matches(sentence_doc, pattern) result = [] for vp in verb_phrases: word_numbers = [] first_word = vp.start x = first_word if len(vp) > 1: for verb_or_adverb in vp: if not verb_or_adverb.pos_ == : word_numbers.append(x) x += 1 else: word_numbers.append(first_word) if ( (word_numbers[0] - 1) < 0) or (sentence_doc[word_numbers[0] - 1].text.lower() != ): result.append(word_numbers) return result
#vtb def run(path, code=None, params=None, **meta): if in params: ignore_decorators = params[] else: ignore_decorators = None check_source_args = (code, path, ignore_decorators) if THIRD_ARG else (code, path) return [{ : e.line, : (e.message[0:4] + e.message[5:] if e.message[4] == else e.message), : , : e.code } for e in PyDocChecker().check_source(*check_source_args)]
pydocstyle code checking. :return list: List of errors.
### Input: pydocstyle code checking. :return list: List of errors. ### Response: #vtb def run(path, code=None, params=None, **meta): if in params: ignore_decorators = params[] else: ignore_decorators = None check_source_args = (code, path, ignore_decorators) if THIRD_ARG else (code, path) return [{ : e.line, : (e.message[0:4] + e.message[5:] if e.message[4] == else e.message), : , : e.code } for e in PyDocChecker().check_source(*check_source_args)]
#vtb def hazards_for_layer(layer_geometry_key): result = [] for hazard in hazard_all: if layer_geometry_key in hazard.get(): result.append(hazard) return sorted(result, key=lambda k: k[])
Get hazard categories form layer_geometry_key. :param layer_geometry_key: The geometry id :type layer_geometry_key: str :returns: List of hazard :rtype: list
### Input: Get hazard categories form layer_geometry_key. :param layer_geometry_key: The geometry id :type layer_geometry_key: str :returns: List of hazard :rtype: list ### Response: #vtb def hazards_for_layer(layer_geometry_key): result = [] for hazard in hazard_all: if layer_geometry_key in hazard.get(): result.append(hazard) return sorted(result, key=lambda k: k[])
#vtb def flo(string): callers_locals = {} frame = inspect.currentframe() try: outerframe = frame.f_back callers_locals = outerframe.f_locals finally: del frame return string.format(**callers_locals)
Return the string given by param formatted with the callers locals.
### Input: Return the string given by param formatted with the callers locals. ### Response: #vtb def flo(string): callers_locals = {} frame = inspect.currentframe() try: outerframe = frame.f_back callers_locals = outerframe.f_locals finally: del frame return string.format(**callers_locals)
#vtb def clip_foreign(network): foreign_buses = network.buses[network.buses.country_code != ] network.buses = network.buses.drop( network.buses.loc[foreign_buses.index].index) network.lines = network.lines.drop(network.lines[ (network.lines[].isin(network.buses.index) == False) | (network.lines[].isin(network.buses.index) == False)].index) network.links = network.links.drop(network.links[ (network.links[].isin(network.buses.index) == False) | (network.links[].isin(network.buses.index) == False)].index) network.transformers = network.transformers.drop(network.transformers[ (network.transformers[].isin(network.buses.index) == False) | (network.transformers[].isin(network. buses.index) == False)].index) network.generators = network.generators.drop(network.generators[ (network.generators[].isin(network.buses.index) == False)].index) network.loads = network.loads.drop(network.loads[ (network.loads[].isin(network.buses.index) == False)].index) network.storage_units = network.storage_units.drop(network.storage_units[ (network.storage_units[].isin(network. buses.index) == False)].index) components = [, , , , , ] for g in components: h = g + nw = getattr(network, h) for i in nw.keys(): cols = [j for j in getattr( nw, i).columns if j not in getattr(network, g).index] for k in cols: del getattr(nw, i)[k] return network
Delete all components and timelines located outside of Germany. Add transborder flows divided by country of origin as network.foreign_trade. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA
### Input: Delete all components and timelines located outside of Germany. Add transborder flows divided by country of origin as network.foreign_trade. Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA Returns ------- network : :class:`pypsa.Network Overall container of PyPSA ### Response: #vtb def clip_foreign(network): foreign_buses = network.buses[network.buses.country_code != ] network.buses = network.buses.drop( network.buses.loc[foreign_buses.index].index) network.lines = network.lines.drop(network.lines[ (network.lines[].isin(network.buses.index) == False) | (network.lines[].isin(network.buses.index) == False)].index) network.links = network.links.drop(network.links[ (network.links[].isin(network.buses.index) == False) | (network.links[].isin(network.buses.index) == False)].index) network.transformers = network.transformers.drop(network.transformers[ (network.transformers[].isin(network.buses.index) == False) | (network.transformers[].isin(network. buses.index) == False)].index) network.generators = network.generators.drop(network.generators[ (network.generators[].isin(network.buses.index) == False)].index) network.loads = network.loads.drop(network.loads[ (network.loads[].isin(network.buses.index) == False)].index) network.storage_units = network.storage_units.drop(network.storage_units[ (network.storage_units[].isin(network. buses.index) == False)].index) components = [, , , , , ] for g in components: h = g + nw = getattr(network, h) for i in nw.keys(): cols = [j for j in getattr( nw, i).columns if j not in getattr(network, g).index] for k in cols: del getattr(nw, i)[k] return network
#vtb def solve(succ, orien, i, direc): assert orien[i] is not None j = succ[i][direc] if j is None: return False if j == len(orien) - 1: return True if orien[j] is None: for x in [0, 1]: orien[j] = x if solve(succ, orien, j, reflex[direc][x]): return True orien[j] = None return False else: return solve(succ, orien, j, reflex[direc][orien[j]])
Can a laser leaving mirror i in direction direc reach exit ? :param i: mirror index :param direc: direction leaving mirror i :param orient: orient[i]=orientation of mirror i :param succ: succ[i][direc]=succ mirror reached when leaving i in direction direc
### Input: Can a laser leaving mirror i in direction direc reach exit ? :param i: mirror index :param direc: direction leaving mirror i :param orient: orient[i]=orientation of mirror i :param succ: succ[i][direc]=succ mirror reached when leaving i in direction direc ### Response: #vtb def solve(succ, orien, i, direc): assert orien[i] is not None j = succ[i][direc] if j is None: return False if j == len(orien) - 1: return True if orien[j] is None: for x in [0, 1]: orien[j] = x if solve(succ, orien, j, reflex[direc][x]): return True orien[j] = None return False else: return solve(succ, orien, j, reflex[direc][orien[j]])
#vtb def generator_checker_py2(gen, gen_type, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): initialized = False sn = None while True: a = gen.send(sn) if initialized or not a is None: if not gen_type.__args__[0] is Any and \ not _isinstance(a, gen_type.__args__[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): tpa = deep_type(a) msg = _make_generator_error_message(tpa, gen, gen_type.__args__[0], ) _raise_typecheck_error(msg, True, a, tpa, gen_type.__args__[0]) initialized = True sn = yield a if not gen_type.__args__[1] is Any and \ not _isinstance(sn, gen_type.__args__[1], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): tpsn = deep_type(sn) msg = _make_generator_error_message(tpsn, gen, gen_type.__args__[1], ) _raise_typecheck_error(msg, False, sn, tpsn, gen_type.__args__[1])
Builds a typechecking wrapper around a Python 2 style generator object.
### Input: Builds a typechecking wrapper around a Python 2 style generator object. ### Response: #vtb def generator_checker_py2(gen, gen_type, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): initialized = False sn = None while True: a = gen.send(sn) if initialized or not a is None: if not gen_type.__args__[0] is Any and \ not _isinstance(a, gen_type.__args__[0], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): tpa = deep_type(a) msg = _make_generator_error_message(tpa, gen, gen_type.__args__[0], ) _raise_typecheck_error(msg, True, a, tpa, gen_type.__args__[0]) initialized = True sn = yield a if not gen_type.__args__[1] is Any and \ not _isinstance(sn, gen_type.__args__[1], bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): tpsn = deep_type(sn) msg = _make_generator_error_message(tpsn, gen, gen_type.__args__[1], ) _raise_typecheck_error(msg, False, sn, tpsn, gen_type.__args__[1])
#vtb def process_remote_sources(self): unpacked_sources = self.context.products.get_data(UnpackedArchives) remote_sources_targets = self.context.targets(predicate=lambda t: isinstance(t, RemoteSources)) if not remote_sources_targets: return snapshot_specs = [] filespecs = [] unpack_dirs = [] for target in remote_sources_targets: unpacked_archive = unpacked_sources[target.sources_target] sources = unpacked_archive.found_files rel_unpack_dir = unpacked_archive.rel_unpack_dir self.context.log.debug( .format(target, rel_unpack_dir, sources)) sources_in_dir = tuple(os.path.join(rel_unpack_dir, source) for source in sources) snapshot_specs.append(PathGlobsAndRoot( PathGlobs(sources_in_dir), get_buildroot(), )) filespecs.append({: sources_in_dir}) unpack_dirs.append(rel_unpack_dir) snapshots = self.context._scheduler.capture_snapshots(tuple(snapshot_specs)) for target, snapshot, filespec, rel_unpack_dir in \ zip(remote_sources_targets, snapshots, filespecs, unpack_dirs): synthetic_target = self.context.add_new_target( address=Address(os.path.relpath(self.workdir, get_buildroot()), target.id), target_type=target.destination_target_type, dependencies=target.dependencies, sources=EagerFilesetWithSpec(rel_unpack_dir, filespec, snapshot), derived_from=target, **target.destination_target_args ) self.context.log.debug(.format(synthetic_target)) for dependent in self.context.build_graph.dependents_of(target.address): self.context.build_graph.inject_dependency(dependent, synthetic_target.address)
Create synthetic targets with populated sources from remote_sources targets.
### Input: Create synthetic targets with populated sources from remote_sources targets. ### Response: #vtb def process_remote_sources(self): unpacked_sources = self.context.products.get_data(UnpackedArchives) remote_sources_targets = self.context.targets(predicate=lambda t: isinstance(t, RemoteSources)) if not remote_sources_targets: return snapshot_specs = [] filespecs = [] unpack_dirs = [] for target in remote_sources_targets: unpacked_archive = unpacked_sources[target.sources_target] sources = unpacked_archive.found_files rel_unpack_dir = unpacked_archive.rel_unpack_dir self.context.log.debug( .format(target, rel_unpack_dir, sources)) sources_in_dir = tuple(os.path.join(rel_unpack_dir, source) for source in sources) snapshot_specs.append(PathGlobsAndRoot( PathGlobs(sources_in_dir), get_buildroot(), )) filespecs.append({: sources_in_dir}) unpack_dirs.append(rel_unpack_dir) snapshots = self.context._scheduler.capture_snapshots(tuple(snapshot_specs)) for target, snapshot, filespec, rel_unpack_dir in \ zip(remote_sources_targets, snapshots, filespecs, unpack_dirs): synthetic_target = self.context.add_new_target( address=Address(os.path.relpath(self.workdir, get_buildroot()), target.id), target_type=target.destination_target_type, dependencies=target.dependencies, sources=EagerFilesetWithSpec(rel_unpack_dir, filespec, snapshot), derived_from=target, **target.destination_target_args ) self.context.log.debug(.format(synthetic_target)) for dependent in self.context.build_graph.dependents_of(target.address): self.context.build_graph.inject_dependency(dependent, synthetic_target.address)
#vtb def add_volume_bricks(name, bricks): * volinfo = info() if name not in volinfo: log.error(, name) return False new_bricks = [] cmd = .format(name) if isinstance(bricks, six.string_types): bricks = [bricks] volume_bricks = [x[] for x in volinfo[name][].values()] for brick in bricks: if brick in volume_bricks: log.debug( , brick, name) else: new_bricks.append(brick) if new_bricks: for brick in new_bricks: cmd += .format(brick) return _gluster(cmd) return True
Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume CLI Example: .. code-block:: bash salt '*' glusterfs.add_volume_bricks <volume> <bricks>
### Input: Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume CLI Example: .. code-block:: bash salt '*' glusterfs.add_volume_bricks <volume> <bricks> ### Response: #vtb def add_volume_bricks(name, bricks): * volinfo = info() if name not in volinfo: log.error(, name) return False new_bricks = [] cmd = .format(name) if isinstance(bricks, six.string_types): bricks = [bricks] volume_bricks = [x[] for x in volinfo[name][].values()] for brick in bricks: if brick in volume_bricks: log.debug( , brick, name) else: new_bricks.append(brick) if new_bricks: for brick in new_bricks: cmd += .format(brick) return _gluster(cmd) return True
#vtb def flat_list_to_polymer(atom_list, atom_group_s=4): atom_labels = [, , , , ] atom_elements = [, , , , ] atoms_coords = [atom_list[x:x + atom_group_s] for x in range(0, len(atom_list), atom_group_s)] atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)] for y in atoms_coords] if atom_group_s == 5: monomers = [Residue(OrderedDict(zip(atom_labels, x)), ) for x in atoms] elif atom_group_s == 4: monomers = [Residue(OrderedDict(zip(atom_labels, x)), ) for x in atoms] else: raise ValueError( ) polymer = Polypeptide(monomers=monomers) return polymer
Takes a flat list of atomic coordinates and converts it to a `Polymer`. Parameters ---------- atom_list : [Atom] Flat list of coordinates. atom_group_s : int, optional Size of atom groups. Returns ------- polymer : Polypeptide `Polymer` object containing atom coords converted `Monomers`. Raises ------ ValueError Raised if `atom_group_s` != 4 or 5
### Input: Takes a flat list of atomic coordinates and converts it to a `Polymer`. Parameters ---------- atom_list : [Atom] Flat list of coordinates. atom_group_s : int, optional Size of atom groups. Returns ------- polymer : Polypeptide `Polymer` object containing atom coords converted `Monomers`. Raises ------ ValueError Raised if `atom_group_s` != 4 or 5 ### Response: #vtb def flat_list_to_polymer(atom_list, atom_group_s=4): atom_labels = [, , , , ] atom_elements = [, , , , ] atoms_coords = [atom_list[x:x + atom_group_s] for x in range(0, len(atom_list), atom_group_s)] atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)] for y in atoms_coords] if atom_group_s == 5: monomers = [Residue(OrderedDict(zip(atom_labels, x)), ) for x in atoms] elif atom_group_s == 4: monomers = [Residue(OrderedDict(zip(atom_labels, x)), ) for x in atoms] else: raise ValueError( ) polymer = Polypeptide(monomers=monomers) return polymer
#vtb def open_zarr(store, group=None, synchronizer=None, chunks=, decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=False, overwrite_encoded_chunks=False, **kwargs): if in kwargs: auto_chunk = kwargs.pop() if auto_chunk: chunks = else: chunks = None warnings.warn("auto_chunk is deprecated. Use chunks= instead.", FutureWarning, stacklevel=2) if kwargs: raise TypeError("open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys())) if not isinstance(chunks, (int, dict)): if chunks != and chunks is not None: raise ValueError("chunks must be an int, dict, , or None. " "Instead found %s. " % chunks) if not decode_cf: mask_and_scale = False decode_times = False concat_characters = False decode_coords = False def maybe_decode_store(store, lock=False): ds = conventions.decode_cf( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables) return ds mode = zarr_store = ZarrStore.open_group(store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated) ds = maybe_decode_store(zarr_store) if not chunks: return ds if isinstance(chunks, int): chunks = dict.fromkeys(ds.dims, chunks) if isinstance(chunks, tuple) and len(chunks) == len(ds.dims): chunks = dict(zip(ds.dims, chunks)) def get_chunk(name, var, chunks): chunk_spec = dict(zip(var.dims, var.encoding.get())) return var else: return var variables = OrderedDict([(k, maybe_chunk(k, v, chunks)) for k, v in ds.variables.items()]) return ds._replace_vars_and_dims(variables)
Load and decode a dataset from a Zarr store. .. note:: Experimental The Zarr backend is new and experimental. Please report any unexpected behavior via github issues. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, obtional Group path. (a.k.a. `path` in zarr terminology.) chunks : int or dict or tuple or {None, 'auto'}, optional Chunk sizes along each dimension, e.g., ``5`` or ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created based on the variable's zarr chunks. If `chunks=None`, zarr array data will lazily convert to numpy arrays upon access. This accepts all the chunk specifications as Dask does. overwrite_encoded_chunks: bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : string or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset References ---------- http://zarr.readthedocs.io/
### Input: Load and decode a dataset from a Zarr store. .. note:: Experimental The Zarr backend is new and experimental. Please report any unexpected behavior via github issues. The `store` object should be a valid store for a Zarr group. `store` variables must contain dimension metadata encoded in the `_ARRAY_DIMENSIONS` attribute. Parameters ---------- store : MutableMapping or str A MutableMapping where a Zarr Group has been stored or a path to a directory in file system where a Zarr DirectoryStore has been stored. synchronizer : object, optional Array synchronizer provided to zarr group : str, obtional Group path. (a.k.a. `path` in zarr terminology.) chunks : int or dict or tuple or {None, 'auto'}, optional Chunk sizes along each dimension, e.g., ``5`` or ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created based on the variable's zarr chunks. If `chunks=None`, zarr array data will lazily convert to numpy arrays upon access. This accepts all the chunk specifications as Dask does. overwrite_encoded_chunks: bool, optional Whether to drop the zarr chunks encoded for each variable when a dataset is loaded with specified chunk sizes (default: False) decode_cf : bool, optional Whether to decode these variables, assuming they were saved according to CF conventions. mask_and_scale : bool, optional If True, replace array values equal to `_FillValue` with NA and scale values according to the formula `original_values * scale_factor + add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are taken from variable attributes (if they exist). If the `_FillValue` or `missing_value` attribute contains multiple values a warning will be issued and all array values matching one of the multiple values will be replaced by NA. decode_times : bool, optional If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers. concat_characters : bool, optional If True, concatenate along the last dimension of character arrays to form string arrays. Dimensions will only be concatenated over (and removed) if they have no corresponding variable and if they are only used as the last dimension of character arrays. decode_coords : bool, optional If True, decode the 'coordinates' attribute to identify coordinates in the resulting dataset. drop_variables : string or iterable, optional A variable or list of variables to exclude from being parsed from the dataset. This may be useful to drop variables with problems or inconsistent values. consolidated : bool, optional Whether to open the store using zarr's consolidated metadata capability. Only works for stores that have already been consolidated. Returns ------- dataset : Dataset The newly created dataset. See Also -------- open_dataset References ---------- http://zarr.readthedocs.io/ ### Response: #vtb def open_zarr(store, group=None, synchronizer=None, chunks=, decode_cf=True, mask_and_scale=True, decode_times=True, concat_characters=True, decode_coords=True, drop_variables=None, consolidated=False, overwrite_encoded_chunks=False, **kwargs): if in kwargs: auto_chunk = kwargs.pop() if auto_chunk: chunks = else: chunks = None warnings.warn("auto_chunk is deprecated. Use chunks= instead.", FutureWarning, stacklevel=2) if kwargs: raise TypeError("open_zarr() got unexpected keyword arguments " + ",".join(kwargs.keys())) if not isinstance(chunks, (int, dict)): if chunks != and chunks is not None: raise ValueError("chunks must be an int, dict, , or None. " "Instead found %s. " % chunks) if not decode_cf: mask_and_scale = False decode_times = False concat_characters = False decode_coords = False def maybe_decode_store(store, lock=False): ds = conventions.decode_cf( store, mask_and_scale=mask_and_scale, decode_times=decode_times, concat_characters=concat_characters, decode_coords=decode_coords, drop_variables=drop_variables) return ds mode = zarr_store = ZarrStore.open_group(store, mode=mode, synchronizer=synchronizer, group=group, consolidated=consolidated) ds = maybe_decode_store(zarr_store) if not chunks: return ds if isinstance(chunks, int): chunks = dict.fromkeys(ds.dims, chunks) if isinstance(chunks, tuple) and len(chunks) == len(ds.dims): chunks = dict(zip(ds.dims, chunks)) def get_chunk(name, var, chunks): chunk_spec = dict(zip(var.dims, var.encoding.get())) return var else: return var variables = OrderedDict([(k, maybe_chunk(k, v, chunks)) for k, v in ds.variables.items()]) return ds._replace_vars_and_dims(variables)
#vtb def module_can_run_parallel(test_module: unittest.TestSuite) -> bool: for test_class in test_module: raise TestClassNotIterable() for test_case in test_class: return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False)
Checks if a given module of tests can be run in parallel or not :param test_module: the module to run :return: True if the module can be run on parallel, False otherwise
### Input: Checks if a given module of tests can be run in parallel or not :param test_module: the module to run :return: True if the module can be run on parallel, False otherwise ### Response: #vtb def module_can_run_parallel(test_module: unittest.TestSuite) -> bool: for test_class in test_module: raise TestClassNotIterable() for test_case in test_class: return not getattr(sys.modules[test_case.__module__], "__no_parallel__", False)
#vtb def _generate_struct(self, struct_type, extra_parameters=None, nameOverride=None): extra_parameters = extra_parameters if extra_parameters is not None else [] self._emit_jsdoc_header(struct_type.doc) self.emit( % ( nameOverride if nameOverride else fmt_type_name(struct_type) ) ) if struct_type.is_member_of_enumerated_subtypes_tree(): if struct_type.has_enumerated_subtypes(): tag_values = [] for tags, _ in struct_type.get_all_subtypes_with_tags(): for tag in tags: tag_values.append( % tag) jsdoc_tag_union = fmt_jsdoc_union(tag_values) txt = % \ jsdoc_tag_union self.emit_wrapped_text(txt) else: parent = struct_type.parent_type while not parent.has_enumerated_subtypes(): parent = parent.parent_type for subtype in parent.get_enumerated_subtypes(): if subtype.data_type == struct_type: txt = %s\ \ \ \ % \ subtype.name self.emit_wrapped_text(txt) break for param_name, param_type, param_docstring in extra_parameters: param_docstring = % param_docstring if param_docstring else self.emit_wrapped_text( % ( param_type, param_name, param_docstring, ), prefix=, ) for field in struct_type.all_fields: field_doc = + field.doc if field.doc else field_type, nullable, _ = unwrap(field.data_type) field_js_type = fmt_type(field_type) field_name = + field.name + if nullable else field.name self.emit_wrapped_text( % ( field_js_type, field_name, self.process_doc(field_doc, self._docf), ), prefix=, ) self.emit()
Emits a JSDoc @typedef for a struct.
### Input: Emits a JSDoc @typedef for a struct. ### Response: #vtb def _generate_struct(self, struct_type, extra_parameters=None, nameOverride=None): extra_parameters = extra_parameters if extra_parameters is not None else [] self._emit_jsdoc_header(struct_type.doc) self.emit( % ( nameOverride if nameOverride else fmt_type_name(struct_type) ) ) if struct_type.is_member_of_enumerated_subtypes_tree(): if struct_type.has_enumerated_subtypes(): tag_values = [] for tags, _ in struct_type.get_all_subtypes_with_tags(): for tag in tags: tag_values.append( % tag) jsdoc_tag_union = fmt_jsdoc_union(tag_values) txt = % \ jsdoc_tag_union self.emit_wrapped_text(txt) else: parent = struct_type.parent_type while not parent.has_enumerated_subtypes(): parent = parent.parent_type for subtype in parent.get_enumerated_subtypes(): if subtype.data_type == struct_type: txt = %s\ \ \ \ % \ subtype.name self.emit_wrapped_text(txt) break for param_name, param_type, param_docstring in extra_parameters: param_docstring = % param_docstring if param_docstring else self.emit_wrapped_text( % ( param_type, param_name, param_docstring, ), prefix=, ) for field in struct_type.all_fields: field_doc = + field.doc if field.doc else field_type, nullable, _ = unwrap(field.data_type) field_js_type = fmt_type(field_type) field_name = + field.name + if nullable else field.name self.emit_wrapped_text( % ( field_js_type, field_name, self.process_doc(field_doc, self._docf), ), prefix=, ) self.emit()
#vtb def add_ms1_quant_from_top3_mzidtsv(proteins, psms, headerfields, protcol): if not protcol: protcol = mzidtsvdata.HEADER_MASTER_PROT top_ms1_psms = generate_top_psms(psms, protcol) for protein in proteins: prot_acc = protein[prottabledata.HEADER_PROTEIN] prec_area = calculate_protein_precursor_quant(top_ms1_psms, prot_acc) outprotein = {k: v for k, v in protein.items()} outprotein[headerfields[][ prottabledata.HEADER_AREA][None]] = str(prec_area) yield outprotein
Collects PSMs with the highes precursor quant values, adds sum of the top 3 of these to a protein table
### Input: Collects PSMs with the highes precursor quant values, adds sum of the top 3 of these to a protein table ### Response: #vtb def add_ms1_quant_from_top3_mzidtsv(proteins, psms, headerfields, protcol): if not protcol: protcol = mzidtsvdata.HEADER_MASTER_PROT top_ms1_psms = generate_top_psms(psms, protcol) for protein in proteins: prot_acc = protein[prottabledata.HEADER_PROTEIN] prec_area = calculate_protein_precursor_quant(top_ms1_psms, prot_acc) outprotein = {k: v for k, v in protein.items()} outprotein[headerfields[][ prottabledata.HEADER_AREA][None]] = str(prec_area) yield outprotein
#vtb def print_status(self, repo): print(" {0}{1}{2}".format(repo, " " * (19 - len(repo)), self.st))
Print status
### Input: Print status ### Response: #vtb def print_status(self, repo): print(" {0}{1}{2}".format(repo, " " * (19 - len(repo)), self.st))
#vtb def analyze(self, output_folder=".", auto_remove=False): if auto_remove: try: shutil.rmtree(output_folder) except: pass try: mkdir(output_folder) except: pass tokens = [token for sublist in self.sentences for token in sublist] df = pd.DataFrame(tokens) log = u"" log += u"Sentences : {}\n".format(len(self.sentences)) n = df.shape[1] log += self._analyze_first_token(df, 0, output_folder) for i in range(1, n): log += self._analyze_field(df, i, output_folder) print(log) stat_file = join(output_folder, "stats.txt") write(stat_file, log)
:type auto_remove: boolean :param boolean auto_remove: auto remove previous files in analyze folder
### Input: :type auto_remove: boolean :param boolean auto_remove: auto remove previous files in analyze folder ### Response: #vtb def analyze(self, output_folder=".", auto_remove=False): if auto_remove: try: shutil.rmtree(output_folder) except: pass try: mkdir(output_folder) except: pass tokens = [token for sublist in self.sentences for token in sublist] df = pd.DataFrame(tokens) log = u"" log += u"Sentences : {}\n".format(len(self.sentences)) n = df.shape[1] log += self._analyze_first_token(df, 0, output_folder) for i in range(1, n): log += self._analyze_field(df, i, output_folder) print(log) stat_file = join(output_folder, "stats.txt") write(stat_file, log)
#vtb def enable_gtk3(self, app=None): from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3 self.set_inputhook(create_inputhook_gtk3(self._stdin_file)) self._current_gui = GUI_GTK
Enable event loop integration with Gtk3 (gir bindings). Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for Gtk3, which allows the Gtk3 to integrate with terminal based applications like IPython.
### Input: Enable event loop integration with Gtk3 (gir bindings). Parameters ---------- app : ignored Ignored, it's only a placeholder to keep the call signature of all gui activation methods consistent, which simplifies the logic of supporting magics. Notes ----- This methods sets the PyOS_InputHook for Gtk3, which allows the Gtk3 to integrate with terminal based applications like IPython. ### Response: #vtb def enable_gtk3(self, app=None): from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3 self.set_inputhook(create_inputhook_gtk3(self._stdin_file)) self._current_gui = GUI_GTK
#vtb def _clear(self, pipe=None): redis = self.redis if pipe is None else pipe redis.delete(self.key)
Helper for clear operations. :param pipe: Redis pipe in case update is performed as a part of transaction. :type pipe: :class:`redis.client.StrictPipeline` or :class:`redis.client.StrictRedis`
### Input: Helper for clear operations. :param pipe: Redis pipe in case update is performed as a part of transaction. :type pipe: :class:`redis.client.StrictPipeline` or :class:`redis.client.StrictRedis` ### Response: #vtb def _clear(self, pipe=None): redis = self.redis if pipe is None else pipe redis.delete(self.key)
#vtb def send_command_ack(self, device_id, action): yield from self._ready_to_send.acquire() acknowledgement = None try: self._command_ack.clear() self.send_command(device_id, action) log.debug() try: yield from asyncio.wait_for(self._command_ack.wait(), TIMEOUT.seconds, loop=self.loop) log.debug() except concurrent.futures._base.TimeoutError: acknowledgement = {: False, : } log.warning() else: acknowledgement = self._last_ack.get(, False) finally: self._ready_to_send.release() return acknowledgement
Send command, wait for gateway to repond with acknowledgment.
### Input: Send command, wait for gateway to repond with acknowledgment. ### Response: #vtb def send_command_ack(self, device_id, action): yield from self._ready_to_send.acquire() acknowledgement = None try: self._command_ack.clear() self.send_command(device_id, action) log.debug() try: yield from asyncio.wait_for(self._command_ack.wait(), TIMEOUT.seconds, loop=self.loop) log.debug() except concurrent.futures._base.TimeoutError: acknowledgement = {: False, : } log.warning() else: acknowledgement = self._last_ack.get(, False) finally: self._ready_to_send.release() return acknowledgement
#vtb def _stripe_object_to_refunds(cls, target_cls, data, charge): refunds = data.get("refunds") if not refunds: return [] refund_objs = [] for refund_data in refunds.get("data", []): item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False) refund_objs.append(item) return refund_objs
Retrieves Refunds for a charge :param target_cls: The target class to instantiate per invoice item. :type target_cls: ``Refund`` :param data: The data dictionary received from the Stripe API. :type data: dict :param charge: The charge object that refunds are for. :type invoice: ``djstripe.models.Refund`` :return:
### Input: Retrieves Refunds for a charge :param target_cls: The target class to instantiate per invoice item. :type target_cls: ``Refund`` :param data: The data dictionary received from the Stripe API. :type data: dict :param charge: The charge object that refunds are for. :type invoice: ``djstripe.models.Refund`` :return: ### Response: #vtb def _stripe_object_to_refunds(cls, target_cls, data, charge): refunds = data.get("refunds") if not refunds: return [] refund_objs = [] for refund_data in refunds.get("data", []): item, _ = target_cls._get_or_create_from_stripe_object(refund_data, refetch=False) refund_objs.append(item) return refund_objs
#vtb def is_valid_catalog(catalog, validator=None): catalog = readers.read_catalog(catalog) if not validator: if hasattr(catalog, "validator"): validator = catalog.validator else: validator = create_validator() jsonschema_res = validator.is_valid(catalog) custom_errors = iter_custom_errors(catalog) return jsonschema_res and len(list(custom_errors)) == 0
Valida que un archivo `data.json` cumpla con el schema definido. Chequea que el data.json tiene todos los campos obligatorios y que tanto los campos obligatorios como los opcionales siguen la estructura definida en el schema. Args: catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado. Returns: bool: True si el data.json cumple con el schema, sino False.
### Input: Valida que un archivo `data.json` cumpla con el schema definido. Chequea que el data.json tiene todos los campos obligatorios y que tanto los campos obligatorios como los opcionales siguen la estructura definida en el schema. Args: catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado. Returns: bool: True si el data.json cumple con el schema, sino False. ### Response: #vtb def is_valid_catalog(catalog, validator=None): catalog = readers.read_catalog(catalog) if not validator: if hasattr(catalog, "validator"): validator = catalog.validator else: validator = create_validator() jsonschema_res = validator.is_valid(catalog) custom_errors = iter_custom_errors(catalog) return jsonschema_res and len(list(custom_errors)) == 0
#vtb def _write_wrapper(self, name): io_attr = getattr(self._io, name) def write_wrapper(*args, **kwargs): ret_value = io_attr(*args, **kwargs) if not IS_PY2: return ret_value return write_wrapper
Wrap write() to adapt return value for Python 2. Returns: Wrapper which is described below.
### Input: Wrap write() to adapt return value for Python 2. Returns: Wrapper which is described below. ### Response: #vtb def _write_wrapper(self, name): io_attr = getattr(self._io, name) def write_wrapper(*args, **kwargs): ret_value = io_attr(*args, **kwargs) if not IS_PY2: return ret_value return write_wrapper
#vtb def advance_job_status(namespace: str, job: Job, duration: float, err: Optional[Exception]): duration = human_duration(duration) if not err: job.status = JobStatus.SUCCEEDED logger.info(, job, duration) return if job.should_retry: job.status = JobStatus.NOT_SET job.retries += 1 if isinstance(err, RetryException) and err.at is not None: job.at = err.at else: job.at = (datetime.now(timezone.utc) + exponential_backoff(job.retries)) signals.job_schedule_retry.send(namespace, job=job, err=err) log_args = ( job.retries, job.max_retries + 1, job, duration, human_duration( (job.at - datetime.now(tz=timezone.utc)).total_seconds() ) ) if isinstance(err, RetryException): logger.info( , *log_args) else: logger.warning( , *log_args) return job.status = JobStatus.FAILED signals.job_failed.send(namespace, job=job, err=err) logger.error( , job.max_retries + 1, job.max_retries + 1, job, duration, exc_info=err )
Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals.
### Input: Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals. ### Response: #vtb def advance_job_status(namespace: str, job: Job, duration: float, err: Optional[Exception]): duration = human_duration(duration) if not err: job.status = JobStatus.SUCCEEDED logger.info(, job, duration) return if job.should_retry: job.status = JobStatus.NOT_SET job.retries += 1 if isinstance(err, RetryException) and err.at is not None: job.at = err.at else: job.at = (datetime.now(timezone.utc) + exponential_backoff(job.retries)) signals.job_schedule_retry.send(namespace, job=job, err=err) log_args = ( job.retries, job.max_retries + 1, job, duration, human_duration( (job.at - datetime.now(tz=timezone.utc)).total_seconds() ) ) if isinstance(err, RetryException): logger.info( , *log_args) else: logger.warning( , *log_args) return job.status = JobStatus.FAILED signals.job_failed.send(namespace, job=job, err=err) logger.error( , job.max_retries + 1, job.max_retries + 1, job, duration, exc_info=err )
#vtb def pipes(stream, *transformers): for transformer in transformers: stream = stream.pipe(transformer) return stream
Pipe several transformers end to end.
### Input: Pipe several transformers end to end. ### Response: #vtb def pipes(stream, *transformers): for transformer in transformers: stream = stream.pipe(transformer) return stream
#vtb def delete(queue, items): with _conn(commit=True) as cur: if isinstance(items, dict): cmd = str().format( queue, salt.utils.json.dumps(items)) log.debug(, cmd) cur.execute(cmd) return True if isinstance(items, list): items = [(salt.utils.json.dumps(el),) for el in items] cmd = .format(queue) log.debug(, cmd) cur.executemany(cmd, items) return True
Delete an item or items from a queue
### Input: Delete an item or items from a queue ### Response: #vtb def delete(queue, items): with _conn(commit=True) as cur: if isinstance(items, dict): cmd = str().format( queue, salt.utils.json.dumps(items)) log.debug(, cmd) cur.execute(cmd) return True if isinstance(items, list): items = [(salt.utils.json.dumps(el),) for el in items] cmd = .format(queue) log.debug(, cmd) cur.executemany(cmd, items) return True
#vtb def aggregate(self, aggregates=None, drilldowns=None, cuts=None, order=None, page=None, page_size=None, page_max=None): def prep(cuts, drilldowns=False, aggregates=False, columns=None): q = select(columns) bindings = [] cuts, q, bindings = Cuts(self).apply(q, bindings, cuts) attributes = None if drilldowns is not False: attributes, q, bindings = Drilldowns(self).apply( q, bindings, drilldowns ) if aggregates is not False: aggregates, q, bindings = Aggregates(self).apply( q, bindings, aggregates ) q = self.restrict_joins(q, bindings) return q, bindings, attributes, aggregates, cuts count = count_results(self, prep(cuts, drilldowns=drilldowns, columns=[1])[0]) summary = first_result(self, prep(cuts, aggregates=aggregates)[0].limit(1)) q, bindings, attributes, aggregates, cuts = \ prep(cuts, drilldowns=drilldowns, aggregates=aggregates) page, q = Pagination(self).apply(q, page, page_size, page_max) ordering, q, bindings = Ordering(self).apply(q, bindings, order) q = self.restrict_joins(q, bindings) cells = list(generate_results(self, q)) return { : count, : cells, : summary, : cuts, : aggregates, : attributes, : ordering, : page[], : page[] }
Main aggregation function. This is used to compute a given set of aggregates, grouped by a given set of drilldown dimensions (i.e. dividers). The query can also be filtered and sorted.
### Input: Main aggregation function. This is used to compute a given set of aggregates, grouped by a given set of drilldown dimensions (i.e. dividers). The query can also be filtered and sorted. ### Response: #vtb def aggregate(self, aggregates=None, drilldowns=None, cuts=None, order=None, page=None, page_size=None, page_max=None): def prep(cuts, drilldowns=False, aggregates=False, columns=None): q = select(columns) bindings = [] cuts, q, bindings = Cuts(self).apply(q, bindings, cuts) attributes = None if drilldowns is not False: attributes, q, bindings = Drilldowns(self).apply( q, bindings, drilldowns ) if aggregates is not False: aggregates, q, bindings = Aggregates(self).apply( q, bindings, aggregates ) q = self.restrict_joins(q, bindings) return q, bindings, attributes, aggregates, cuts count = count_results(self, prep(cuts, drilldowns=drilldowns, columns=[1])[0]) summary = first_result(self, prep(cuts, aggregates=aggregates)[0].limit(1)) q, bindings, attributes, aggregates, cuts = \ prep(cuts, drilldowns=drilldowns, aggregates=aggregates) page, q = Pagination(self).apply(q, page, page_size, page_max) ordering, q, bindings = Ordering(self).apply(q, bindings, order) q = self.restrict_joins(q, bindings) cells = list(generate_results(self, q)) return { : count, : cells, : summary, : cuts, : aggregates, : attributes, : ordering, : page[], : page[] }
#vtb def add_size_info (self): if self.headers and "Content-Length" in self.headers and \ "Transfer-Encoding" not in self.headers: try: self.size = int(self.getheader("Content-Length")) except (ValueError, OverflowError): pass else: self.size = -1
Get size of URL content from HTTP header.
### Input: Get size of URL content from HTTP header. ### Response: #vtb def add_size_info (self): if self.headers and "Content-Length" in self.headers and \ "Transfer-Encoding" not in self.headers: try: self.size = int(self.getheader("Content-Length")) except (ValueError, OverflowError): pass else: self.size = -1
#vtb def get_parameter_dict(self, include_frozen=False): return OrderedDict(zip( self.get_parameter_names(include_frozen=include_frozen), self.get_parameter_vector(include_frozen=include_frozen), ))
Get an ordered dictionary of the parameters Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
### Input: Get an ordered dictionary of the parameters Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``) ### Response: #vtb def get_parameter_dict(self, include_frozen=False): return OrderedDict(zip( self.get_parameter_names(include_frozen=include_frozen), self.get_parameter_vector(include_frozen=include_frozen), ))
#vtb def _other_dpss_method(N, NW, Kmax): from scipy import linalg as la Kmax = int(Kmax) W = float(NW)/N ab = np.zeros((2,N), ) nidx = np.arange(N) ab[0,1:] = nidx[1:]*(N-nidx[1:])/2. ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W) l,v = la.eig_banded(ab, select=, select_range=(N-Kmax, N-1)) dpss = v.transpose()[::-1] fix_symmetric = (dpss[0::2].sum(axis=1) < 0) for i, f in enumerate(fix_symmetric): if f: dpss[2*i] *= -1 fix_skew = (dpss[1::2,1] < 0) for i, f in enumerate(fix_skew): if f: dpss[2*i+1] *= -1 acvs = _autocov(dpss, debias=False) * N r = 4*W*np.sinc(2*W*nidx) r[0] = 2*W eigvals = np.dot(acvs, r) return dpss, eigvals
Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. See dpss function that is the official version. This version is indepedant of the C code and relies on Scipy function. However, it is slower by a factor 3 Tridiagonal form of DPSS calculation from:
### Input: Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1] for a given frequency-spacing multiple NW and sequence length N. See dpss function that is the official version. This version is indepedant of the C code and relies on Scipy function. However, it is slower by a factor 3 Tridiagonal form of DPSS calculation from: ### Response: #vtb def _other_dpss_method(N, NW, Kmax): from scipy import linalg as la Kmax = int(Kmax) W = float(NW)/N ab = np.zeros((2,N), ) nidx = np.arange(N) ab[0,1:] = nidx[1:]*(N-nidx[1:])/2. ab[1] = ((N-1-2*nidx)/2.)**2 * np.cos(2*np.pi*W) l,v = la.eig_banded(ab, select=, select_range=(N-Kmax, N-1)) dpss = v.transpose()[::-1] fix_symmetric = (dpss[0::2].sum(axis=1) < 0) for i, f in enumerate(fix_symmetric): if f: dpss[2*i] *= -1 fix_skew = (dpss[1::2,1] < 0) for i, f in enumerate(fix_skew): if f: dpss[2*i+1] *= -1 acvs = _autocov(dpss, debias=False) * N r = 4*W*np.sinc(2*W*nidx) r[0] = 2*W eigvals = np.dot(acvs, r) return dpss, eigvals
#vtb def attach_http_service(cls, http_service: HTTPService): if cls._http_service is None: cls._http_service = http_service cls._set_bus(http_service) else: warnings.warn()
Attaches a service for hosting :param http_service: A HTTPService instance
### Input: Attaches a service for hosting :param http_service: A HTTPService instance ### Response: #vtb def attach_http_service(cls, http_service: HTTPService): if cls._http_service is None: cls._http_service = http_service cls._set_bus(http_service) else: warnings.warn()
#vtb def open(self, url): cache = self.cache() id = self.mangle(url, ) d = cache.get(id) if d is None: d = self.fn(url, self.options) cache.put(id, d) else: d.options = self.options for imp in d.imports: imp.imported.options = self.options return d
Open a WSDL at the specified I{url}. First, the WSDL attempted to be retrieved from the I{object cache}. After unpickled from the cache, the I{options} attribute is restored. If not found, it is downloaded and instantiated using the I{fn} constructor and added to the cache for the next open(). @param url: A WSDL url. @type url: str. @return: The WSDL object. @rtype: I{Definitions}
### Input: Open a WSDL at the specified I{url}. First, the WSDL attempted to be retrieved from the I{object cache}. After unpickled from the cache, the I{options} attribute is restored. If not found, it is downloaded and instantiated using the I{fn} constructor and added to the cache for the next open(). @param url: A WSDL url. @type url: str. @return: The WSDL object. @rtype: I{Definitions} ### Response: #vtb def open(self, url): cache = self.cache() id = self.mangle(url, ) d = cache.get(id) if d is None: d = self.fn(url, self.options) cache.put(id, d) else: d.options = self.options for imp in d.imports: imp.imported.options = self.options return d
#vtb def parse_sections(self, offset): self.sections = [] for i in xrange(self.FILE_HEADER.NumberOfSections): section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self ) if not section: break section_offset = offset + section.sizeof() * i section.set_file_offset(section_offset) section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()]) self.__structures__.append(section) if section.SizeOfRawData > len(self.__data__): self.__warnings.append( ( % i) + ) if adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__): self.__warnings.append( ( % i) + ) if section.Misc_VirtualSize > 0x10000000: self.__warnings.append( ( % i) + ) if adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000: self.__warnings.append( ( % i) + ) if ( self.OPTIONAL_HEADER.FileAlignment != 0 and ( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0): self.__warnings.append( ( % i) + + structures is a multiple of FileAlignment, this might imply the file is trying to confuse tools which parse this incorrectlyIMAGE_SCN_s flags according the the Characteristics member set_flags(section, section.Characteristics, section_flags) if ( section.__dict__.get(, False) and section.__dict__.get(, False) ): self.__warnings.append( ( % i) + + ) self.sections.append(section) if self.FILE_HEADER.NumberOfSections > 0 and self.sections: return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections else: return offset
Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info.
### Input: Fetch the PE file sections. The sections will be readily available in the "sections" attribute. Its attributes will contain all the section information plus "data" a buffer containing the section's data. The "Characteristics" member will be processed and attributes representing the section characteristics (with the 'IMAGE_SCN_' string trimmed from the constant's names) will be added to the section instance. Refer to the SectionStructure class for additional info. ### Response: #vtb def parse_sections(self, offset): self.sections = [] for i in xrange(self.FILE_HEADER.NumberOfSections): section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self ) if not section: break section_offset = offset + section.sizeof() * i section.set_file_offset(section_offset) section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()]) self.__structures__.append(section) if section.SizeOfRawData > len(self.__data__): self.__warnings.append( ( % i) + ) if adjust_FileAlignment( section.PointerToRawData, self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__): self.__warnings.append( ( % i) + ) if section.Misc_VirtualSize > 0x10000000: self.__warnings.append( ( % i) + ) if adjust_SectionAlignment( section.VirtualAddress, self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000: self.__warnings.append( ( % i) + ) if ( self.OPTIONAL_HEADER.FileAlignment != 0 and ( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0): self.__warnings.append( ( % i) + + structures is a multiple of FileAlignment, this might imply the file is trying to confuse tools which parse this incorrectlyIMAGE_SCN_s flags according the the Characteristics member set_flags(section, section.Characteristics, section_flags) if ( section.__dict__.get(, False) and section.__dict__.get(, False) ): self.__warnings.append( ( % i) + + ) self.sections.append(section) if self.FILE_HEADER.NumberOfSections > 0 and self.sections: return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections else: return offset
#vtb def visit(self, visitor, predicate=None, **kw): predicate = predicate or bool for n in self.walk(**kw): if predicate(n): visitor(n)
Apply a function to matching nodes in the (sub)tree rooted at self. :param visitor: A callable accepting a Node object as single argument.. :param predicate: A callable accepting a Node object as single argument and \ returning a boolean signaling whether Node matches; if `None` all nodes match. :param kw: Addtional keyword arguments are passed through to self.walk.
### Input: Apply a function to matching nodes in the (sub)tree rooted at self. :param visitor: A callable accepting a Node object as single argument.. :param predicate: A callable accepting a Node object as single argument and \ returning a boolean signaling whether Node matches; if `None` all nodes match. :param kw: Addtional keyword arguments are passed through to self.walk. ### Response: #vtb def visit(self, visitor, predicate=None, **kw): predicate = predicate or bool for n in self.walk(**kw): if predicate(n): visitor(n)
#vtb def sample_categorical(prob, rng): ret = numpy.empty(prob.shape[0], dtype=numpy.float32) for ind in range(prob.shape[0]): ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0, max=prob.shape[ 1] - 0.5) return ret
Sample from independent categorical distributions Each batch is an independent categorical distribution. Parameters ---------- prob : numpy.ndarray Probability of the categorical distribution. Shape --> (batch_num, category_num) rng : numpy.random.RandomState Returns ------- ret : numpy.ndarray Sampling result. Shape --> (batch_num,)
### Input: Sample from independent categorical distributions Each batch is an independent categorical distribution. Parameters ---------- prob : numpy.ndarray Probability of the categorical distribution. Shape --> (batch_num, category_num) rng : numpy.random.RandomState Returns ------- ret : numpy.ndarray Sampling result. Shape --> (batch_num,) ### Response: #vtb def sample_categorical(prob, rng): ret = numpy.empty(prob.shape[0], dtype=numpy.float32) for ind in range(prob.shape[0]): ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0, max=prob.shape[ 1] - 0.5) return ret
#vtb def get_module_verbosity_flags(*labels): verbose_prefix_list = [, , ] veryverbose_prefix_list = [, , ] verbose_flags = tuple( [prefix + lbl for prefix, lbl in itertools.product(verbose_prefix_list, labels)]) veryverbose_flags = tuple( [prefix + lbl for prefix, lbl in itertools.product(veryverbose_prefix_list, labels)]) veryverbose_module = get_argflag(veryverbose_flags) or VERYVERBOSE verbose_module = (get_argflag(verbose_flags) or veryverbose_module or VERBOSE) if veryverbose_module: verbose_module = 2 return verbose_module, veryverbose_module
checks for standard flags for enableing module specific verbosity
### Input: checks for standard flags for enableing module specific verbosity ### Response: #vtb def get_module_verbosity_flags(*labels): verbose_prefix_list = [, , ] veryverbose_prefix_list = [, , ] verbose_flags = tuple( [prefix + lbl for prefix, lbl in itertools.product(verbose_prefix_list, labels)]) veryverbose_flags = tuple( [prefix + lbl for prefix, lbl in itertools.product(veryverbose_prefix_list, labels)]) veryverbose_module = get_argflag(veryverbose_flags) or VERYVERBOSE verbose_module = (get_argflag(verbose_flags) or veryverbose_module or VERBOSE) if veryverbose_module: verbose_module = 2 return verbose_module, veryverbose_module
#vtb def from_export(cls, endpoint): assert isinstance(endpoint, ExportEndpoint) properties = endpoint.get_properties() properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations properties[ pelix.remote.PROP_EXPORTED_INTERFACES ] = endpoint.specifications for key in ( pelix.remote.PROP_EXPORTED_CONFIGS, pelix.remote.PROP_EXPORTED_INTERFACES, pelix.remote.PROP_EXPORTED_INTENTS, pelix.remote.PROP_EXPORTED_INTENTS_EXTRA, ): try: del properties[key] except KeyError: pass properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name properties[ pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID ] = endpoint.framework return EndpointDescription(None, properties)
Converts an ExportEndpoint bean to an EndpointDescription :param endpoint: An ExportEndpoint bean :return: An EndpointDescription bean
### Input: Converts an ExportEndpoint bean to an EndpointDescription :param endpoint: An ExportEndpoint bean :return: An EndpointDescription bean ### Response: #vtb def from_export(cls, endpoint): assert isinstance(endpoint, ExportEndpoint) properties = endpoint.get_properties() properties[pelix.remote.PROP_ENDPOINT_ID] = endpoint.uid properties[pelix.remote.PROP_IMPORTED_CONFIGS] = endpoint.configurations properties[ pelix.remote.PROP_EXPORTED_INTERFACES ] = endpoint.specifications for key in ( pelix.remote.PROP_EXPORTED_CONFIGS, pelix.remote.PROP_EXPORTED_INTERFACES, pelix.remote.PROP_EXPORTED_INTENTS, pelix.remote.PROP_EXPORTED_INTENTS_EXTRA, ): try: del properties[key] except KeyError: pass properties[pelix.remote.PROP_ENDPOINT_NAME] = endpoint.name properties[ pelix.remote.PROP_ENDPOINT_FRAMEWORK_UUID ] = endpoint.framework return EndpointDescription(None, properties)
#vtb def addHydrogens(molecule, usedPyroles=None): for atom in molecule.atoms: if atom.has_explicit_hcount: atom.hcount = atom.explicit_hcount continue if atom.valences: for valence in atom.valences: hcount = max(0, int(valence - atom.sumBondOrders() + atom.charge)) if hcount >= 0: break else: if usedPyroles and not usedPyroles.has_key(atom.handle): raise PinkyError("Valence error in atom %s"%molecule.atoms.index(atom)) pass atom.hcount = hcount return molecule
(molecule) -> add implicit hydrogens to a molecule. If the atom has specified valences and the atom must be charged then a Valence Error is raised
### Input: (molecule) -> add implicit hydrogens to a molecule. If the atom has specified valences and the atom must be charged then a Valence Error is raised ### Response: #vtb def addHydrogens(molecule, usedPyroles=None): for atom in molecule.atoms: if atom.has_explicit_hcount: atom.hcount = atom.explicit_hcount continue if atom.valences: for valence in atom.valences: hcount = max(0, int(valence - atom.sumBondOrders() + atom.charge)) if hcount >= 0: break else: if usedPyroles and not usedPyroles.has_key(atom.handle): raise PinkyError("Valence error in atom %s"%molecule.atoms.index(atom)) pass atom.hcount = hcount return molecule
#vtb def _set_load_interval(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), default=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "uint32", : , }) self.__load_interval = t if hasattr(self, ): self._set()
Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_load_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_interval() directly.
### Input: Setter method for load_interval, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/load_interval (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_load_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_load_interval() directly. ### Response: #vtb def _set_load_interval(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32), restriction_dict={: [u]}), default=RestrictedClassType(base_type=long, restriction_dict={: []}, int_size=32)(300), is_leaf=True, yang_name="load-interval", rest_name="load-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "uint32", : , }) self.__load_interval = t if hasattr(self, ): self._set()
#vtb def cd_to(path, mkdir=False): def cd_to_decorator(func): @functools.wraps(func) def _cd_and_exec(*args, **kwargs): with cd(path, mkdir): return func(*args, **kwargs) return _cd_and_exec return cd_to_decorator
make a generator like cd, but use it for function Usage:: >>> @cd_to("/") ... def say_where(): ... print(os.getcwd()) ... >>> say_where() /
### Input: make a generator like cd, but use it for function Usage:: >>> @cd_to("/") ... def say_where(): ... print(os.getcwd()) ... >>> say_where() / ### Response: #vtb def cd_to(path, mkdir=False): def cd_to_decorator(func): @functools.wraps(func) def _cd_and_exec(*args, **kwargs): with cd(path, mkdir): return func(*args, **kwargs) return _cd_and_exec return cd_to_decorator
#vtb def sparql_query(self, query, flush=None, limit=None): return self.find_statements(query, language=, type=, flush=flush, limit=limit)
Run a Sparql query. :param query: sparql query string :rtype: list of dictionary
### Input: Run a Sparql query. :param query: sparql query string :rtype: list of dictionary ### Response: #vtb def sparql_query(self, query, flush=None, limit=None): return self.find_statements(query, language=, type=, flush=flush, limit=limit)
#vtb def _preprocess(self, struct1, struct2, niggli=True): struct1 = struct1.copy() struct2 = struct2.copy() if niggli: struct1 = struct1.get_reduced_structure(reduction_algo="niggli") struct2 = struct2.get_reduced_structure(reduction_algo="niggli") if self._primitive_cell: struct1 = struct1.get_primitive_structure() struct2 = struct2.get_primitive_structure() if self._supercell: fu, s1_supercell = self._get_supercell_size(struct1, struct2) else: fu, s1_supercell = 1, True mult = fu if s1_supercell else 1/fu if self._scale: ratio = (struct2.volume / (struct1.volume * mult)) ** (1 / 6) nl1 = Lattice(struct1.lattice.matrix * ratio) struct1.lattice = nl1 nl2 = Lattice(struct2.lattice.matrix / ratio) struct2.lattice = nl2 return struct1, struct2, fu, s1_supercell
Rescales, finds the reduced structures (primitive and niggli), and finds fu, the supercell size to make struct1 comparable to s2
### Input: Rescales, finds the reduced structures (primitive and niggli), and finds fu, the supercell size to make struct1 comparable to s2 ### Response: #vtb def _preprocess(self, struct1, struct2, niggli=True): struct1 = struct1.copy() struct2 = struct2.copy() if niggli: struct1 = struct1.get_reduced_structure(reduction_algo="niggli") struct2 = struct2.get_reduced_structure(reduction_algo="niggli") if self._primitive_cell: struct1 = struct1.get_primitive_structure() struct2 = struct2.get_primitive_structure() if self._supercell: fu, s1_supercell = self._get_supercell_size(struct1, struct2) else: fu, s1_supercell = 1, True mult = fu if s1_supercell else 1/fu if self._scale: ratio = (struct2.volume / (struct1.volume * mult)) ** (1 / 6) nl1 = Lattice(struct1.lattice.matrix * ratio) struct1.lattice = nl1 nl2 = Lattice(struct2.lattice.matrix / ratio) struct2.lattice = nl2 return struct1, struct2, fu, s1_supercell