code
stringlengths
64
7.01k
docstring
stringlengths
2
15.8k
text
stringlengths
144
19.2k
#vtb def _augment_observation_files(self, e): e.file_records = [self._augment_file(f) for f in e.file_records] return e
Augment all the file records in an event :internal:
### Input: Augment all the file records in an event :internal: ### Response: #vtb def _augment_observation_files(self, e): e.file_records = [self._augment_file(f) for f in e.file_records] return e
#vtb def Backup(self, duration=0): total = 0 duration_total = duration * 4 children = self.GetChildrenIndexes() notes = 0 for voice in children: v = self.GetChild(voice) indexes = v.GetChildrenIndexes() if len(indexes) > 1: indexes.reverse() for index in indexes: notes += 1 note = v.GetChild(index) if hasattr(note, "duration"): total += note.duration if total >= duration_total: break gap = [ v.GetChild(i).duration for i in range( 0, self.index - notes) if hasattr( v.GetChild(i), "duration")] previous = 0 for item in gap: if item == previous: self.gap -= previous item = item / 2 self.gap += item previous = item self.index -= notes
method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration> :param duration: :return:
### Input: method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration> :param duration: :return: ### Response: #vtb def Backup(self, duration=0): total = 0 duration_total = duration * 4 children = self.GetChildrenIndexes() notes = 0 for voice in children: v = self.GetChild(voice) indexes = v.GetChildrenIndexes() if len(indexes) > 1: indexes.reverse() for index in indexes: notes += 1 note = v.GetChild(index) if hasattr(note, "duration"): total += note.duration if total >= duration_total: break gap = [ v.GetChild(i).duration for i in range( 0, self.index - notes) if hasattr( v.GetChild(i), "duration")] previous = 0 for item in gap: if item == previous: self.gap -= previous item = item / 2 self.gap += item previous = item self.index -= notes
#vtb def pickle_encode(session_dict): "Returns the given session dictionary pickled and encoded as a string." pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL) return base64.encodestring(pickled + get_query_hash(pickled).encode())
Returns the given session dictionary pickled and encoded as a string.
### Input: Returns the given session dictionary pickled and encoded as a string. ### Response: #vtb def pickle_encode(session_dict): "Returns the given session dictionary pickled and encoded as a string." pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL) return base64.encodestring(pickled + get_query_hash(pickled).encode())
#vtb def transform(self, y): if isinstance(y, pd.DataFrame): x = y.ix[:,0] y = y.ix[:,1] else: x = y[:,0] y = y[:,1] if self.transform_type == : return pd.DataFrame(np.add(x, y)) elif self.transform_type == : return pd.DataFrame(np.subtract(x, y)) elif self.transform_type == : return pd.DataFrame(np.multiply(x, y)) elif self.transform_type == : return pd.DataFrame(np.divide(x, y)) elif self.transform_type == : return pd.DataFrame(np.remainder(x, y)) elif self.transform_type == : return pd.DataFrame(x**y)
Transform features per specified math function. :param y: :return:
### Input: Transform features per specified math function. :param y: :return: ### Response: #vtb def transform(self, y): if isinstance(y, pd.DataFrame): x = y.ix[:,0] y = y.ix[:,1] else: x = y[:,0] y = y[:,1] if self.transform_type == : return pd.DataFrame(np.add(x, y)) elif self.transform_type == : return pd.DataFrame(np.subtract(x, y)) elif self.transform_type == : return pd.DataFrame(np.multiply(x, y)) elif self.transform_type == : return pd.DataFrame(np.divide(x, y)) elif self.transform_type == : return pd.DataFrame(np.remainder(x, y)) elif self.transform_type == : return pd.DataFrame(x**y)
#vtb def find_harpoon_options(self, configuration, args_dict): d = lambda r: {} if r in (None, "", NotSpecified) else r return MergedOptions.using( dict(d(configuration.get()).items()) , dict(d(args_dict.get("harpoon")).items()) ).as_dict()
Return us all the harpoon options
### Input: Return us all the harpoon options ### Response: #vtb def find_harpoon_options(self, configuration, args_dict): d = lambda r: {} if r in (None, "", NotSpecified) else r return MergedOptions.using( dict(d(configuration.get()).items()) , dict(d(args_dict.get("harpoon")).items()) ).as_dict()
#vtb def hash_file_contents(requirements_option: RequirementsOptions, path: Path) -> str: return hashlib.sha256(path.read_bytes() + bytes( requirements_option.name + arca.__version__, "utf-8" )).hexdigest()
Returns a SHA256 hash of the contents of ``path`` combined with the Arca version.
### Input: Returns a SHA256 hash of the contents of ``path`` combined with the Arca version. ### Response: #vtb def hash_file_contents(requirements_option: RequirementsOptions, path: Path) -> str: return hashlib.sha256(path.read_bytes() + bytes( requirements_option.name + arca.__version__, "utf-8" )).hexdigest()
#vtb def is_valid_camel(cls, input_string, strcmp=None, ignore=): if not input_string: return False input_string = .join([c for c in input_string if c.isalpha()]) matches = cls._get_regex_search(input_string, cls.REGEX_CAMEL.format(SEP=cls.REGEX_SEPARATORS), match_index=0, ignore=ignore) if matches or input_string == strcmp: if strcmp: index = input_string.find(strcmp) - 1 is_camel = strcmp[0].isupper() and input_string[index].islower() is_input = strcmp == input_string is_start = index + 1 == 0 return is_camel or is_input or is_start return True elif len(input_string) == 1: return True return False
Checks to see if an input string is valid for use in camel casing This assumes that all lowercase strings are not valid camel case situations and no camel string can just be a capitalized word. Took ideas from here: http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python :param input_string: str, input word :param strcmp: str, force detection on a substring just in case its undetectable (e.g. part of a section of text that's all lowercase) :param ignore: str, what kind of string to ignore in the regex search :return: bool, whether it is valid or not
### Input: Checks to see if an input string is valid for use in camel casing This assumes that all lowercase strings are not valid camel case situations and no camel string can just be a capitalized word. Took ideas from here: http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python :param input_string: str, input word :param strcmp: str, force detection on a substring just in case its undetectable (e.g. part of a section of text that's all lowercase) :param ignore: str, what kind of string to ignore in the regex search :return: bool, whether it is valid or not ### Response: #vtb def is_valid_camel(cls, input_string, strcmp=None, ignore=): if not input_string: return False input_string = .join([c for c in input_string if c.isalpha()]) matches = cls._get_regex_search(input_string, cls.REGEX_CAMEL.format(SEP=cls.REGEX_SEPARATORS), match_index=0, ignore=ignore) if matches or input_string == strcmp: if strcmp: index = input_string.find(strcmp) - 1 is_camel = strcmp[0].isupper() and input_string[index].islower() is_input = strcmp == input_string is_start = index + 1 == 0 return is_camel or is_input or is_start return True elif len(input_string) == 1: return True return False
#vtb def ring_is_planar(ring, r_atoms): normals = [] for a in r_atoms: adj = pybel.ob.OBAtomAtomIter(a.OBAtom) n_coords = [pybel.Atom(neigh).coords for neigh in adj if ring.IsMember(neigh)] vec1, vec2 = vector(a.coords, n_coords[0]), vector(a.coords, n_coords[1]) normals.append(np.cross(vec1, vec2)) for n1, n2 in itertools.product(normals, repeat=2): arom_angle = vecangle(n1, n2) if all([arom_angle > config.AROMATIC_PLANARITY, arom_angle < 180.0 - config.AROMATIC_PLANARITY]): return False return True
Given a set of ring atoms, check if the ring is sufficiently planar to be considered aromatic
### Input: Given a set of ring atoms, check if the ring is sufficiently planar to be considered aromatic ### Response: #vtb def ring_is_planar(ring, r_atoms): normals = [] for a in r_atoms: adj = pybel.ob.OBAtomAtomIter(a.OBAtom) n_coords = [pybel.Atom(neigh).coords for neigh in adj if ring.IsMember(neigh)] vec1, vec2 = vector(a.coords, n_coords[0]), vector(a.coords, n_coords[1]) normals.append(np.cross(vec1, vec2)) for n1, n2 in itertools.product(normals, repeat=2): arom_angle = vecangle(n1, n2) if all([arom_angle > config.AROMATIC_PLANARITY, arom_angle < 180.0 - config.AROMATIC_PLANARITY]): return False return True
#vtb def coil_combine(data, w_idx=[1,2,3], coil_dim=2, sampling_rate=5000.): w_data, w_supp_data = separate_signals(data, w_idx) fft_w = np.fft.fftshift(fft.fft(w_data)) fft_w_supp = np.fft.fftshift(fft.fft(w_supp_data)) freqs_w = np.linspace(-sampling_rate/2.0, sampling_rate/2.0, w_data.shape[-1]) bounds = [(None,None), (0,None), (0,None), (-np.pi, np.pi), (None,None), (None, None)] n_params = len(bounds) params = np.zeros(fft_w.shape[:-1] + (n_params,)) for repeat in range(w_data.shape[0]): for echo in range(w_data.shape[1]): for coil in range(w_data.shape[2]): sig = fft_w[repeat, echo, coil] params[repeat, echo, coil] = _do_lorentzian_fit(freqs_w, sig, bounds) area_w = params[..., 1] s = np.mean(area_w.reshape(-1, area_w.shape[-1]), 0) n = np.var(area_w.reshape(-1, area_w.shape[-1]), 0) amp_weight = s/n amp_weight = amp_weight / np.sum(amp_weight) phase_param = params[..., 3] zero_phi_w = np.mean(phase_param.reshape(-1, phase_param.shape[-1]),0) weight = amp_weight * np.exp(-1j * zero_phi_w) na = np.newaxis weighted_w_data = np.mean(np.fft.ifft(np.fft.fftshift( weight[na, na, :, na] * fft_w)), coil_dim) weighted_w_supp_data = np.mean(np.fft.ifft(np.fft.fftshift( weight[na, na, : ,na] * fft_w_supp)) , coil_dim) def normalize_this(x): return x * (x.shape[-1] / (np.sum(np.abs(x)))) weighted_w_data = normalize_this(weighted_w_data) weighted_w_supp_data = normalize_this(weighted_w_supp_data) return weighted_w_data.squeeze(), weighted_w_supp_data.squeeze()
Combine data across coils based on the amplitude of the water peak, according to: .. math:: X = \sum_{i}{w_i S_i} Where X is the resulting combined signal, $S_i$ are the individual coil signals and $w_i$ are calculated as: .. math:: w_i = mean(S_i) / var (S_i) following [Hall2013]_. In addition, we apply a phase-correction, so that all the phases of the signals from each coil are 0 Parameters ---------- data : float array The data as it comes from the scanner, with shape (transients, echos, coils, time points) w_idx : list The indices to the non-water-suppressed transients. Per default we take the 2nd-4th transients. We dump the first one, because it seems to be quite different than the rest of them... coil_dim : int The dimension on which the coils are represented. Default: 2 sampling rate : float The sampling rate in Hz. Default : 5000. References ---------- .. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter G. Morris (2013). Methodology for improved detection of low concentration metabolites in MRS: Optimised combination of signals from multi-element coil arrays. Neuroimage 86: 35-42. .. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410. .. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second edition. Wiley (West Sussex, UK).
### Input: Combine data across coils based on the amplitude of the water peak, according to: .. math:: X = \sum_{i}{w_i S_i} Where X is the resulting combined signal, $S_i$ are the individual coil signals and $w_i$ are calculated as: .. math:: w_i = mean(S_i) / var (S_i) following [Hall2013]_. In addition, we apply a phase-correction, so that all the phases of the signals from each coil are 0 Parameters ---------- data : float array The data as it comes from the scanner, with shape (transients, echos, coils, time points) w_idx : list The indices to the non-water-suppressed transients. Per default we take the 2nd-4th transients. We dump the first one, because it seems to be quite different than the rest of them... coil_dim : int The dimension on which the coils are represented. Default: 2 sampling rate : float The sampling rate in Hz. Default : 5000. References ---------- .. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter G. Morris (2013). Methodology for improved detection of low concentration metabolites in MRS: Optimised combination of signals from multi-element coil arrays. Neuroimage 86: 35-42. .. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410. .. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second edition. Wiley (West Sussex, UK). ### Response: #vtb def coil_combine(data, w_idx=[1,2,3], coil_dim=2, sampling_rate=5000.): w_data, w_supp_data = separate_signals(data, w_idx) fft_w = np.fft.fftshift(fft.fft(w_data)) fft_w_supp = np.fft.fftshift(fft.fft(w_supp_data)) freqs_w = np.linspace(-sampling_rate/2.0, sampling_rate/2.0, w_data.shape[-1]) bounds = [(None,None), (0,None), (0,None), (-np.pi, np.pi), (None,None), (None, None)] n_params = len(bounds) params = np.zeros(fft_w.shape[:-1] + (n_params,)) for repeat in range(w_data.shape[0]): for echo in range(w_data.shape[1]): for coil in range(w_data.shape[2]): sig = fft_w[repeat, echo, coil] params[repeat, echo, coil] = _do_lorentzian_fit(freqs_w, sig, bounds) area_w = params[..., 1] s = np.mean(area_w.reshape(-1, area_w.shape[-1]), 0) n = np.var(area_w.reshape(-1, area_w.shape[-1]), 0) amp_weight = s/n amp_weight = amp_weight / np.sum(amp_weight) phase_param = params[..., 3] zero_phi_w = np.mean(phase_param.reshape(-1, phase_param.shape[-1]),0) weight = amp_weight * np.exp(-1j * zero_phi_w) na = np.newaxis weighted_w_data = np.mean(np.fft.ifft(np.fft.fftshift( weight[na, na, :, na] * fft_w)), coil_dim) weighted_w_supp_data = np.mean(np.fft.ifft(np.fft.fftshift( weight[na, na, : ,na] * fft_w_supp)) , coil_dim) def normalize_this(x): return x * (x.shape[-1] / (np.sum(np.abs(x)))) weighted_w_data = normalize_this(weighted_w_data) weighted_w_supp_data = normalize_this(weighted_w_supp_data) return weighted_w_data.squeeze(), weighted_w_supp_data.squeeze()
#vtb def GetSOAPHeaders(self, create_method): header = create_method(self._SOAP_HEADER_CLASS) header.networkCode = self._ad_manager_client.network_code header.applicationName = .join([ self._ad_manager_client.application_name, googleads.common.GenerateLibSig(self._PRODUCT_SIG)]) return header
Returns the SOAP headers required for request authorization. Args: create_method: The SOAP library specific method used to instantiate SOAP objects. Returns: A SOAP object containing the headers.
### Input: Returns the SOAP headers required for request authorization. Args: create_method: The SOAP library specific method used to instantiate SOAP objects. Returns: A SOAP object containing the headers. ### Response: #vtb def GetSOAPHeaders(self, create_method): header = create_method(self._SOAP_HEADER_CLASS) header.networkCode = self._ad_manager_client.network_code header.applicationName = .join([ self._ad_manager_client.application_name, googleads.common.GenerateLibSig(self._PRODUCT_SIG)]) return header
#vtb def immutable_worker(worker, state, pre_state, created): return WorkerData._make(chain( (getattr(worker, f) for f in WORKER_OWN_FIELDS), (state, pre_state, created), (worker.heartbeats[-1] if worker.heartbeats else None,), ))
Converts to an immutable slots class to handle internally.
### Input: Converts to an immutable slots class to handle internally. ### Response: #vtb def immutable_worker(worker, state, pre_state, created): return WorkerData._make(chain( (getattr(worker, f) for f in WORKER_OWN_FIELDS), (state, pre_state, created), (worker.heartbeats[-1] if worker.heartbeats else None,), ))
#vtb def _get_asam_configuration(driver_url=): asam_config = __opts__[] if in __opts__ else None if asam_config: try: for asam_server, service_config in six.iteritems(asam_config): username = service_config.get(, None) password = service_config.get(, None) protocol = service_config.get(, ) port = service_config.get(, 3451) if not username or not password: log.error( , asam_server ) return False ret = { : "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port), : "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port), : "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port), : "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port), : username, : password } if (not driver_url) or (driver_url == asam_server): return ret except Exception as exc: log.error(, exc) return False if driver_url: log.error( , driver_url ) return False return False
Return the configuration read from the master configuration file or directory
### Input: Return the configuration read from the master configuration file or directory ### Response: #vtb def _get_asam_configuration(driver_url=): asam_config = __opts__[] if in __opts__ else None if asam_config: try: for asam_server, service_config in six.iteritems(asam_config): username = service_config.get(, None) password = service_config.get(, None) protocol = service_config.get(, ) port = service_config.get(, 3451) if not username or not password: log.error( , asam_server ) return False ret = { : "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port), : "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port), : "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port), : "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port), : username, : password } if (not driver_url) or (driver_url == asam_server): return ret except Exception as exc: log.error(, exc) return False if driver_url: log.error( , driver_url ) return False return False
#vtb def _increment(self, n=1): if self._cur_position >= self.num_tokens-1: self._cur_positon = self.num_tokens - 1 self._finished = True else: self._cur_position += n
Move forward n tokens in the stream.
### Input: Move forward n tokens in the stream. ### Response: #vtb def _increment(self, n=1): if self._cur_position >= self.num_tokens-1: self._cur_positon = self.num_tokens - 1 self._finished = True else: self._cur_position += n
#vtb def _get_result_constructor(self): if not self._values_list: return self.model._construct_instance elif self._flat_values_list: key = self._only_fields[0] return lambda row: row[key] else: return lambda row: [row[f] for f in self._only_fields]
Returns a function that will be used to instantiate query results
### Input: Returns a function that will be used to instantiate query results ### Response: #vtb def _get_result_constructor(self): if not self._values_list: return self.model._construct_instance elif self._flat_values_list: key = self._only_fields[0] return lambda row: row[key] else: return lambda row: [row[f] for f in self._only_fields]
#vtb def save_diskspace(fname, reason, config): if config["algorithm"].get("save_diskspace", False): for ext in ["", ".bai"]: if os.path.exists(fname + ext): with open(fname + ext, "w") as out_handle: out_handle.write("File removed to save disk space: %s" % reason)
Overwrite a file in place with a short message to save disk. This keeps files as a sanity check on processes working, but saves disk by replacing them with a short message.
### Input: Overwrite a file in place with a short message to save disk. This keeps files as a sanity check on processes working, but saves disk by replacing them with a short message. ### Response: #vtb def save_diskspace(fname, reason, config): if config["algorithm"].get("save_diskspace", False): for ext in ["", ".bai"]: if os.path.exists(fname + ext): with open(fname + ext, "w") as out_handle: out_handle.write("File removed to save disk space: %s" % reason)
#vtb def cast(self, method, args={}, declare=None, retry=None, retry_policy=None, type=None, exchange=None, **props): retry = self.retry if retry is None else retry body = {: self.name, : method, : args} _retry_policy = self.retry_policy if retry_policy: _retry_policy = dict(_retry_policy, **retry_policy) if type and type not in self.types: raise ValueError(.format(type)) elif not type: type = ACTOR_TYPE.DIRECT props.setdefault(, self.routing_key) props.setdefault(, self.serializer) exchange = exchange or self.type_to_exchange[type]() declare = (maybe_list(declare) or []) + [exchange] with producers[self._connection].acquire(block=True) as producer: return producer.publish(body, exchange=exchange, declare=declare, retry=retry, retry_policy=retry_policy, **props)
Send message to actor. Discarding replies.
### Input: Send message to actor. Discarding replies. ### Response: #vtb def cast(self, method, args={}, declare=None, retry=None, retry_policy=None, type=None, exchange=None, **props): retry = self.retry if retry is None else retry body = {: self.name, : method, : args} _retry_policy = self.retry_policy if retry_policy: _retry_policy = dict(_retry_policy, **retry_policy) if type and type not in self.types: raise ValueError(.format(type)) elif not type: type = ACTOR_TYPE.DIRECT props.setdefault(, self.routing_key) props.setdefault(, self.serializer) exchange = exchange or self.type_to_exchange[type]() declare = (maybe_list(declare) or []) + [exchange] with producers[self._connection].acquire(block=True) as producer: return producer.publish(body, exchange=exchange, declare=declare, retry=retry, retry_policy=retry_policy, **props)
#vtb def pass_to_pipeline_if_article( self, response, source_domain, original_url, rss_title=None ): if self.helper.heuristics.is_article(response, original_url): return self.pass_to_pipeline( response, source_domain, rss_title=None)
Responsible for passing a NewscrawlerItem to the pipeline if the response contains an article. :param obj response: the scrapy response to work on :param str source_domain: the response's domain as set for the crawler :param str original_url: the url set in the json file :param str rss_title: the title extracted by an rssCrawler :return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
### Input: Responsible for passing a NewscrawlerItem to the pipeline if the response contains an article. :param obj response: the scrapy response to work on :param str source_domain: the response's domain as set for the crawler :param str original_url: the url set in the json file :param str rss_title: the title extracted by an rssCrawler :return NewscrawlerItem: NewscrawlerItem to pass to the pipeline ### Response: #vtb def pass_to_pipeline_if_article( self, response, source_domain, original_url, rss_title=None ): if self.helper.heuristics.is_article(response, original_url): return self.pass_to_pipeline( response, source_domain, rss_title=None)
#vtb def update_service(self, service_id, **kwargs): body = self._formdata(kwargs, FastlyService.FIELDS) content = self._fetch("/service/%s" % service_id, method="PUT", body=body) return FastlyService(self, content)
Update a service.
### Input: Update a service. ### Response: #vtb def update_service(self, service_id, **kwargs): body = self._formdata(kwargs, FastlyService.FIELDS) content = self._fetch("/service/%s" % service_id, method="PUT", body=body) return FastlyService(self, content)
#vtb def get_template_name(self): template = self.get_template() page_templates = settings.get_page_templates() for t in page_templates: if t[0] == template: return t[1] return template
Get the template name of this page if defined or if a closer parent has a defined template or :data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise.
### Input: Get the template name of this page if defined or if a closer parent has a defined template or :data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise. ### Response: #vtb def get_template_name(self): template = self.get_template() page_templates = settings.get_page_templates() for t in page_templates: if t[0] == template: return t[1] return template
#vtb def score(package_path): python_files = find_files(package_path, ) total_counter = Counter() for python_file in python_files: output = run_pylint(python_file) counter = parse_pylint_output(output) total_counter += counter score_value = 0 for count, stat in enumerate(total_counter): score_value += SCORING_VALUES[stat] * count return score_value / 5
Runs pylint on a package and returns a score Lower score is better :param package_path: path of the package to score :return: number of score
### Input: Runs pylint on a package and returns a score Lower score is better :param package_path: path of the package to score :return: number of score ### Response: #vtb def score(package_path): python_files = find_files(package_path, ) total_counter = Counter() for python_file in python_files: output = run_pylint(python_file) counter = parse_pylint_output(output) total_counter += counter score_value = 0 for count, stat in enumerate(total_counter): score_value += SCORING_VALUES[stat] * count return score_value / 5
#vtb def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None): new_info = .format(keyword, annotation) logger.debug("Replacing the variant information {0}".format(new_info)) fixed_variant = None new_info_list = [] if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip().split() logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == : new_info_string = new_info else: splitted_info_string = old_info.split() for info in splitted_info_string: splitted_info_entry = info.split() if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = .join(new_info_list) splitted_variant[7] = new_info_string fixed_variant = .join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict[] if old_info == : variant_dict[] = new_info else: for info in old_info.split(): splitted_info_entry = info.split() if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = .join(new_info_list) variant_dict[] = new_info_string fixed_variant = variant_dict return fixed_variant
Replace the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: variant_line (str): A annotated variant line
### Input: Replace the information of a info field of a vcf variant line or a variant dict. Arguments: variant_line (str): A vcf formatted variant line variant_dict (dict): A variant dictionary keyword (str): The info field key annotation (str): If the annotation is a key, value pair this is the string that represents the value Returns: variant_line (str): A annotated variant line ### Response: #vtb def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None): new_info = .format(keyword, annotation) logger.debug("Replacing the variant information {0}".format(new_info)) fixed_variant = None new_info_list = [] if variant_line: logger.debug("Adding information to a variant line") splitted_variant = variant_line.rstrip().split() logger.debug("Adding information to splitted variant line") old_info = splitted_variant[7] if old_info == : new_info_string = new_info else: splitted_info_string = old_info.split() for info in splitted_info_string: splitted_info_entry = info.split() if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = .join(new_info_list) splitted_variant[7] = new_info_string fixed_variant = .join(splitted_variant) elif variant_dict: logger.debug("Adding information to a variant dict") old_info = variant_dict[] if old_info == : variant_dict[] = new_info else: for info in old_info.split(): splitted_info_entry = info.split() if splitted_info_entry[0] == keyword: new_info_list.append(new_info) else: new_info_list.append(info) new_info_string = .join(new_info_list) variant_dict[] = new_info_string fixed_variant = variant_dict return fixed_variant
#vtb def primitive(self, primitive): self.entry_number = primitive[] self.item_hash = primitive[] self.timestamp = primitive[]
Entry from Python primitive.
### Input: Entry from Python primitive. ### Response: #vtb def primitive(self, primitive): self.entry_number = primitive[] self.item_hash = primitive[] self.timestamp = primitive[]
#vtb def interpolate_exe(self, testString): testString = testString.strip() if not (testString.startswith() and testString.endswith()): return testString newString = testString testString = testString[2:-1] testList = testString.split() if len(testList) == 2: if testList[0] == : newString = distutils.spawn.find_executable(testList[1]) if not newString: errmsg = "Cannot find exe %s in your path " %(testList[1]) errmsg += "and you specified ${which:%s}." %(testList[1]) raise ValueError(errmsg) return newString
Replace testString with a path to an executable based on the format. If this looks like ${which:lalapps_tmpltbank} it will return the equivalent of which(lalapps_tmpltbank) Otherwise it will return an unchanged string. Parameters ----------- testString : string The input string Returns -------- newString : string The output string.
### Input: Replace testString with a path to an executable based on the format. If this looks like ${which:lalapps_tmpltbank} it will return the equivalent of which(lalapps_tmpltbank) Otherwise it will return an unchanged string. Parameters ----------- testString : string The input string Returns -------- newString : string The output string. ### Response: #vtb def interpolate_exe(self, testString): testString = testString.strip() if not (testString.startswith() and testString.endswith()): return testString newString = testString testString = testString[2:-1] testList = testString.split() if len(testList) == 2: if testList[0] == : newString = distutils.spawn.find_executable(testList[1]) if not newString: errmsg = "Cannot find exe %s in your path " %(testList[1]) errmsg += "and you specified ${which:%s}." %(testList[1]) raise ValueError(errmsg) return newString
#vtb def client_get(self, url, **kwargs): response = requests.get(self.make_url(url), headers=self.headers) if not response.ok: raise Exception( .format( status=response.status_code, reason=response.reason)) return response.json()
Send GET request with given url.
### Input: Send GET request with given url. ### Response: #vtb def client_get(self, url, **kwargs): response = requests.get(self.make_url(url), headers=self.headers) if not response.ok: raise Exception( .format( status=response.status_code, reason=response.reason)) return response.json()
#vtb def fstab_present(name, fs_file, fs_vfstype, fs_mntops=, fs_freq=0, fs_passno=0, mount_by=None, config=, mount=True, match_on=): UUID=xxx ret = { : name, : False, : {}, : [], } if fs_mntops == : if __grains__[] in [, ]: fs_mntops = elif __grains__[] == : fs_mntops = if config == : if __grains__[] in [, ]: config = elif __grains__[] == : config = if not fs_file == : fs_file = fs_file.rstrip() fs_spec = _convert_to(name, mount_by) if not fs_spec: msg = ret[].append(msg.format(name, mount_by)) return ret if __opts__[]: if __grains__[] in [, ]: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, config=config, test=True) elif __grains__[] == : out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, mount=mount, config=config, test=True, match_on=match_on) else: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, dump=fs_freq, pass_num=fs_passno, config=config, test=True, match_on=match_on) ret[] = None if out == : msg = ret[].append(msg.format(fs_file, config)) elif out == : msg = ret[].append(msg.format(fs_file, config)) elif out == : msg = ret[].append(msg.format(fs_file, config)) else: ret[] = False msg = ret[].append(msg.format(fs_file, config, out)) return ret if __grains__[] in [, ]: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, config=config) elif __grains__[] == : out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, mount=mount, config=config, match_on=match_on) else: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, dump=fs_freq, pass_num=fs_passno, config=config, match_on=match_on) ret[] = True if out == : msg = ret[].append(msg.format(fs_file, config)) elif out == : ret[][] = out msg = ret[].append(msg.format(fs_file, config)) elif out == : ret[][] = out msg = ret[].append(msg.format(fs_file, config)) else: ret[] = False msg = ret[].append(msg.format(fs_file, config, out)) return ret
Makes sure that a fstab mount point is pressent. name The name of block device. Can be any valid fs_spec value. fs_file Mount point (target) for the filesystem. fs_vfstype The type of the filesystem (e.g. ext4, xfs, btrfs, ...) fs_mntops The mount options associated with the filesystem. Default is ``defaults``. fs_freq Field is used by dump to determine which fs need to be dumped. Default is ``0`` fs_passno Field is used by fsck to determine the order in which filesystem checks are done at boot time. Default is ``0`` mount_by Select the final value for fs_spec. Can be [``None``, ``device``, ``label``, ``uuid``, ``partlabel``, ``partuuid``]. If ``None``, the value for fs_spect will be the parameter ``name``, in other case will search the correct value based on the device name. For example, for ``uuid``, the value for fs_spec will be of type 'UUID=xxx' instead of the device name set in ``name``. config Place where the fstab file lives. Default is ``/etc/fstab`` mount Set if the mount should be mounted immediately. Default is ``True`` match_on A name or list of fstab properties on which this state should be applied. Default is ``auto``, a special value indicating to guess based on fstype. In general, ``auto`` matches on name for recognized special devices and device otherwise.
### Input: Makes sure that a fstab mount point is pressent. name The name of block device. Can be any valid fs_spec value. fs_file Mount point (target) for the filesystem. fs_vfstype The type of the filesystem (e.g. ext4, xfs, btrfs, ...) fs_mntops The mount options associated with the filesystem. Default is ``defaults``. fs_freq Field is used by dump to determine which fs need to be dumped. Default is ``0`` fs_passno Field is used by fsck to determine the order in which filesystem checks are done at boot time. Default is ``0`` mount_by Select the final value for fs_spec. Can be [``None``, ``device``, ``label``, ``uuid``, ``partlabel``, ``partuuid``]. If ``None``, the value for fs_spect will be the parameter ``name``, in other case will search the correct value based on the device name. For example, for ``uuid``, the value for fs_spec will be of type 'UUID=xxx' instead of the device name set in ``name``. config Place where the fstab file lives. Default is ``/etc/fstab`` mount Set if the mount should be mounted immediately. Default is ``True`` match_on A name or list of fstab properties on which this state should be applied. Default is ``auto``, a special value indicating to guess based on fstype. In general, ``auto`` matches on name for recognized special devices and device otherwise. ### Response: #vtb def fstab_present(name, fs_file, fs_vfstype, fs_mntops=, fs_freq=0, fs_passno=0, mount_by=None, config=, mount=True, match_on=): UUID=xxx ret = { : name, : False, : {}, : [], } if fs_mntops == : if __grains__[] in [, ]: fs_mntops = elif __grains__[] == : fs_mntops = if config == : if __grains__[] in [, ]: config = elif __grains__[] == : config = if not fs_file == : fs_file = fs_file.rstrip() fs_spec = _convert_to(name, mount_by) if not fs_spec: msg = ret[].append(msg.format(name, mount_by)) return ret if __opts__[]: if __grains__[] in [, ]: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, config=config, test=True) elif __grains__[] == : out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, mount=mount, config=config, test=True, match_on=match_on) else: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, dump=fs_freq, pass_num=fs_passno, config=config, test=True, match_on=match_on) ret[] = None if out == : msg = ret[].append(msg.format(fs_file, config)) elif out == : msg = ret[].append(msg.format(fs_file, config)) elif out == : msg = ret[].append(msg.format(fs_file, config)) else: ret[] = False msg = ret[].append(msg.format(fs_file, config, out)) return ret if __grains__[] in [, ]: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, config=config) elif __grains__[] == : out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, mount=mount, config=config, match_on=match_on) else: out = __salt__[](name=fs_file, device=fs_spec, fstype=fs_vfstype, opts=fs_mntops, dump=fs_freq, pass_num=fs_passno, config=config, match_on=match_on) ret[] = True if out == : msg = ret[].append(msg.format(fs_file, config)) elif out == : ret[][] = out msg = ret[].append(msg.format(fs_file, config)) elif out == : ret[][] = out msg = ret[].append(msg.format(fs_file, config)) else: ret[] = False msg = ret[].append(msg.format(fs_file, config, out)) return ret
#vtb def do_PROPPATCH(self, environ, start_response): path = environ["PATH_INFO"] res = self._davProvider.get_resource_inst(path, environ) environ.setdefault("HTTP_DEPTH", "0") if environ["HTTP_DEPTH"] != "0": self._fail(HTTP_BAD_REQUEST, "Depth must be .") if res is None: self._fail(HTTP_NOT_FOUND) self._evaluate_if_headers(res, environ) self._check_write_permission(res, "0", environ) requestEL = util.parse_xml_body(environ) if requestEL.tag != "{DAV:}propertyupdate": self._fail(HTTP_BAD_REQUEST) propupdatelist = [] for ppnode in requestEL: propupdatemethod = None if ppnode.tag == "{DAV:}remove": propupdatemethod = "remove" elif ppnode.tag == "{DAV:}set": propupdatemethod = "set" else: self._fail( HTTP_BAD_REQUEST, "Unknown tag (expected or )." ) for propnode in ppnode: if propnode.tag != "{DAV:}prop": self._fail(HTTP_BAD_REQUEST, "Unknown tag (expected ).") for propertynode in propnode: propvalue = None if propupdatemethod == "remove": propvalue = None if len(propertynode) > 0: self._fail( HTTP_BAD_REQUEST, "prop element must be empty for .", ) else: propvalue = propertynode propupdatelist.append((propertynode.tag, propvalue)) successflag = True writeresultlist = [] for (name, propvalue) in propupdatelist: try: res.set_property_value(name, propvalue, dry_run=True) except Exception as e: writeresult = as_DAVError(e) else: writeresult = "200 OK" writeresultlist.append((name, writeresult)) successflag = successflag and writeresult == "200 OK" propResponseList = [] responsedescription = [] if not successflag: for (name, result) in writeresultlist: if result == "200 OK": result = DAVError(HTTP_FAILED_DEPENDENCY) elif isinstance(result, DAVError): responsedescription.append(result.get_user_info()) propResponseList.append((name, result)) else: for (name, propvalue) in propupdatelist: try: res.set_property_value(name, propvalue, dry_run=False) propResponseList.append((name, None)) except Exception as e: e = as_DAVError(e) propResponseList.append((name, e)) responsedescription.append(e.get_user_info()) multistatusEL = xml_tools.make_multistatus_el() href = res.get_href() util.add_property_response(multistatusEL, href, propResponseList) if responsedescription: etree.SubElement( multistatusEL, "{DAV:}responsedescription" ).text = "\n".join(responsedescription) return util.send_multi_status_response(environ, start_response, multistatusEL)
Handle PROPPATCH request to set or remove a property. @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
### Input: Handle PROPPATCH request to set or remove a property. @see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH ### Response: #vtb def do_PROPPATCH(self, environ, start_response): path = environ["PATH_INFO"] res = self._davProvider.get_resource_inst(path, environ) environ.setdefault("HTTP_DEPTH", "0") if environ["HTTP_DEPTH"] != "0": self._fail(HTTP_BAD_REQUEST, "Depth must be .") if res is None: self._fail(HTTP_NOT_FOUND) self._evaluate_if_headers(res, environ) self._check_write_permission(res, "0", environ) requestEL = util.parse_xml_body(environ) if requestEL.tag != "{DAV:}propertyupdate": self._fail(HTTP_BAD_REQUEST) propupdatelist = [] for ppnode in requestEL: propupdatemethod = None if ppnode.tag == "{DAV:}remove": propupdatemethod = "remove" elif ppnode.tag == "{DAV:}set": propupdatemethod = "set" else: self._fail( HTTP_BAD_REQUEST, "Unknown tag (expected or )." ) for propnode in ppnode: if propnode.tag != "{DAV:}prop": self._fail(HTTP_BAD_REQUEST, "Unknown tag (expected ).") for propertynode in propnode: propvalue = None if propupdatemethod == "remove": propvalue = None if len(propertynode) > 0: self._fail( HTTP_BAD_REQUEST, "prop element must be empty for .", ) else: propvalue = propertynode propupdatelist.append((propertynode.tag, propvalue)) successflag = True writeresultlist = [] for (name, propvalue) in propupdatelist: try: res.set_property_value(name, propvalue, dry_run=True) except Exception as e: writeresult = as_DAVError(e) else: writeresult = "200 OK" writeresultlist.append((name, writeresult)) successflag = successflag and writeresult == "200 OK" propResponseList = [] responsedescription = [] if not successflag: for (name, result) in writeresultlist: if result == "200 OK": result = DAVError(HTTP_FAILED_DEPENDENCY) elif isinstance(result, DAVError): responsedescription.append(result.get_user_info()) propResponseList.append((name, result)) else: for (name, propvalue) in propupdatelist: try: res.set_property_value(name, propvalue, dry_run=False) propResponseList.append((name, None)) except Exception as e: e = as_DAVError(e) propResponseList.append((name, e)) responsedescription.append(e.get_user_info()) multistatusEL = xml_tools.make_multistatus_el() href = res.get_href() util.add_property_response(multistatusEL, href, propResponseList) if responsedescription: etree.SubElement( multistatusEL, "{DAV:}responsedescription" ).text = "\n".join(responsedescription) return util.send_multi_status_response(environ, start_response, multistatusEL)
#vtb def _add_image_part(self, image): partname = self._next_image_partname(image.ext) image_part = ImagePart.from_image(image, partname) self.append(image_part) return image_part
Return an |ImagePart| instance newly created from image and appended to the collection.
### Input: Return an |ImagePart| instance newly created from image and appended to the collection. ### Response: #vtb def _add_image_part(self, image): partname = self._next_image_partname(image.ext) image_part = ImagePart.from_image(image, partname) self.append(image_part) return image_part
#vtb def generate_ucsm_handle(hostname, username, password): ucs_handle = UcsHandle() try: success = ucs_handle.Login(hostname, username, password) except UcsException as e: print("Cisco client exception %(msg)s" % (e.message)) raise exception.UcsConnectionError(message=e.message) return success, ucs_handle
Creates UCS Manager handle object and establishes a session with UCS Manager. :param hostname: UCS Manager hostname or IP-address :param username: Username to login to UCS Manager :param password: Login user password :raises UcsConnectionError: In case of error.
### Input: Creates UCS Manager handle object and establishes a session with UCS Manager. :param hostname: UCS Manager hostname or IP-address :param username: Username to login to UCS Manager :param password: Login user password :raises UcsConnectionError: In case of error. ### Response: #vtb def generate_ucsm_handle(hostname, username, password): ucs_handle = UcsHandle() try: success = ucs_handle.Login(hostname, username, password) except UcsException as e: print("Cisco client exception %(msg)s" % (e.message)) raise exception.UcsConnectionError(message=e.message) return success, ucs_handle
#vtb def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0): r n = length + len(chain) k = length new_mean = self.recursive_mean(mean, length, chain) t0 = k * np.outer(mean, mean) t1 = np.dot(chain.T, chain) t2 = n * np.outer(new_mean, new_mean) t3 = epsilon * np.eye(cov.shape[0]) new_cov = ( k - 1) / ( n - 1.) * cov + scaling / ( n - 1.) * ( t0 + t1 - t2 + t3) return new_cov, new_mean
r"""Compute the covariance recursively. Return the new covariance and the new mean. .. math:: C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T) C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) & = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) :Parameters: - cov : matrix Previous covariance matrix. - length : int Length of chain used to compute the previous covariance. - mean : array Previous mean. - chain : array Sample used to update covariance. - scaling : float Scaling parameter - epsilon : float Set to a small value to avoid singular matrices.
### Input: r"""Compute the covariance recursively. Return the new covariance and the new mean. .. math:: C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T) C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) & = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) :Parameters: - cov : matrix Previous covariance matrix. - length : int Length of chain used to compute the previous covariance. - mean : array Previous mean. - chain : array Sample used to update covariance. - scaling : float Scaling parameter - epsilon : float Set to a small value to avoid singular matrices. ### Response: #vtb def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0): r n = length + len(chain) k = length new_mean = self.recursive_mean(mean, length, chain) t0 = k * np.outer(mean, mean) t1 = np.dot(chain.T, chain) t2 = n * np.outer(new_mean, new_mean) t3 = epsilon * np.eye(cov.shape[0]) new_cov = ( k - 1) / ( n - 1.) * cov + scaling / ( n - 1.) * ( t0 + t1 - t2 + t3) return new_cov, new_mean
#vtb def get_next_step(self): if self.parent.is_selected_layer_keywordless: self.parent.parent_step = self self.parent.existing_keywords = None self.parent.set_mode_label_to_keywords_creation() new_step = self.parent.step_kw_purpose else: if layers_intersect(self.parent.hazard_layer, self.parent.exposure_layer): new_step = self.parent.step_fc_agglayer_origin else: new_step = self.parent.step_fc_disjoint_layers return new_step
Find the proper step when user clicks the Next button. :returns: The step to be switched to :rtype: WizardStep instance or None
### Input: Find the proper step when user clicks the Next button. :returns: The step to be switched to :rtype: WizardStep instance or None ### Response: #vtb def get_next_step(self): if self.parent.is_selected_layer_keywordless: self.parent.parent_step = self self.parent.existing_keywords = None self.parent.set_mode_label_to_keywords_creation() new_step = self.parent.step_kw_purpose else: if layers_intersect(self.parent.hazard_layer, self.parent.exposure_layer): new_step = self.parent.step_fc_agglayer_origin else: new_step = self.parent.step_fc_disjoint_layers return new_step
#vtb def printer(self, message, color_level=): if self.job_args.get(): print(cloud_utils.return_colorized(msg=message, color=color_level)) else: print(message)
Print Messages and Log it. :param message: item to print to screen
### Input: Print Messages and Log it. :param message: item to print to screen ### Response: #vtb def printer(self, message, color_level=): if self.job_args.get(): print(cloud_utils.return_colorized(msg=message, color=color_level)) else: print(message)
#vtb def _get_col_dimstr(tdim, is_string=False): dimstr = if tdim is None: dimstr = else: if is_string: if len(tdim) > 1: dimstr = [str(d) for d in tdim[1:]] else: if len(tdim) > 1 or tdim[0] > 1: dimstr = [str(d) for d in tdim] if dimstr != : dimstr = .join(dimstr) dimstr = % dimstr return dimstr
not for variable length
### Input: not for variable length ### Response: #vtb def _get_col_dimstr(tdim, is_string=False): dimstr = if tdim is None: dimstr = else: if is_string: if len(tdim) > 1: dimstr = [str(d) for d in tdim[1:]] else: if len(tdim) > 1 or tdim[0] > 1: dimstr = [str(d) for d in tdim] if dimstr != : dimstr = .join(dimstr) dimstr = % dimstr return dimstr
#vtb def show_disk(name=None, kwargs=None, call=None): if not kwargs or not in kwargs: log.error( ) return False conn = get_conn() return _expand_disk(conn.ex_get_volume(kwargs[]))
Show the details of an existing disk. CLI Example: .. code-block:: bash salt-cloud -a show_disk myinstance disk_name=mydisk salt-cloud -f show_disk gce disk_name=mydisk
### Input: Show the details of an existing disk. CLI Example: .. code-block:: bash salt-cloud -a show_disk myinstance disk_name=mydisk salt-cloud -f show_disk gce disk_name=mydisk ### Response: #vtb def show_disk(name=None, kwargs=None, call=None): if not kwargs or not in kwargs: log.error( ) return False conn = get_conn() return _expand_disk(conn.ex_get_volume(kwargs[]))
#vtb def get_environment(self): environment = {} cpu_cmd = mem_cmd = temp_cmd = output = self._send_command(cpu_cmd) environment.setdefault(, {}) environment[][0] = {} environment[][0][] = 0.0 for line in output.splitlines(): if in line: cpu_regex = r match = re.search(cpu_regex, line) environment[][0][] = float(match.group(1)) break output = self._send_command(mem_cmd) for line in output.splitlines(): if in line: _, _, _, proc_used_mem, proc_free_mem = line.split()[:5] elif in line or in line: _, _, _, io_used_mem, io_free_mem = line.split()[:5] used_mem = int(proc_used_mem) + int(io_used_mem) free_mem = int(proc_free_mem) + int(io_free_mem) environment.setdefault(, {}) environment[][] = used_mem environment[][] = free_mem environment.setdefault(, {}) output = self._send_command(temp_cmd) if not in output: for line in output.splitlines(): if in line: system_temp = float(line.split()[1].split()[0]) elif in line: system_temp_alert = float(line.split()[1].split()[0]) elif in line: system_temp_crit = float(line.split()[1].split()[0]) env_value = {: system_temp >= system_temp_alert, : system_temp >= system_temp_crit, : system_temp} environment[][] = env_value else: env_value = {: False, : False, : -1.0} environment[][] = env_value environment.setdefault(, {}) environment[][] = {: True, : -1.0, : -1.0} environment.setdefault(, {}) environment[][] = {: True} return environment
Get environment facts. power and fan are currently not implemented cpu is using 1-minute average cpu hard-coded to cpu0 (i.e. only a single CPU)
### Input: Get environment facts. power and fan are currently not implemented cpu is using 1-minute average cpu hard-coded to cpu0 (i.e. only a single CPU) ### Response: #vtb def get_environment(self): environment = {} cpu_cmd = mem_cmd = temp_cmd = output = self._send_command(cpu_cmd) environment.setdefault(, {}) environment[][0] = {} environment[][0][] = 0.0 for line in output.splitlines(): if in line: cpu_regex = r match = re.search(cpu_regex, line) environment[][0][] = float(match.group(1)) break output = self._send_command(mem_cmd) for line in output.splitlines(): if in line: _, _, _, proc_used_mem, proc_free_mem = line.split()[:5] elif in line or in line: _, _, _, io_used_mem, io_free_mem = line.split()[:5] used_mem = int(proc_used_mem) + int(io_used_mem) free_mem = int(proc_free_mem) + int(io_free_mem) environment.setdefault(, {}) environment[][] = used_mem environment[][] = free_mem environment.setdefault(, {}) output = self._send_command(temp_cmd) if not in output: for line in output.splitlines(): if in line: system_temp = float(line.split()[1].split()[0]) elif in line: system_temp_alert = float(line.split()[1].split()[0]) elif in line: system_temp_crit = float(line.split()[1].split()[0]) env_value = {: system_temp >= system_temp_alert, : system_temp >= system_temp_crit, : system_temp} environment[][] = env_value else: env_value = {: False, : False, : -1.0} environment[][] = env_value environment.setdefault(, {}) environment[][] = {: True, : -1.0, : -1.0} environment.setdefault(, {}) environment[][] = {: True} return environment
#vtb def loadScopeGroupbyID(self, id, callback=None, errback=None): import ns1.ipam scope_group = ns1.ipam.Scopegroup(self.config, id=id) return scope_group.load(callback=callback, errback=errback)
Load an existing Scope Group by ID into a high level Scope Group object :param int id: id of an existing ScopeGroup
### Input: Load an existing Scope Group by ID into a high level Scope Group object :param int id: id of an existing ScopeGroup ### Response: #vtb def loadScopeGroupbyID(self, id, callback=None, errback=None): import ns1.ipam scope_group = ns1.ipam.Scopegroup(self.config, id=id) return scope_group.load(callback=callback, errback=errback)
#vtb def mean(self, values, axis=0, weights=None, dtype=None): values = np.asarray(values) if weights is None: result = self.reduce(values, axis=axis, dtype=dtype) shape = [1] * values.ndim shape[axis] = self.groups weights = self.count.reshape(shape) else: weights = np.asarray(weights) result = self.reduce(values * weights, axis=axis, dtype=dtype) weights = self.reduce(weights, axis=axis, dtype=dtype) return self.unique, result / weights
compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
### Input: compute the mean over each group Parameters ---------- values : array_like, [keys, ...] values to take average of per group axis : int, optional alternative reduction axis for values weights : ndarray, [keys, ...], optional weight to use for each value dtype : output dtype Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups ### Response: #vtb def mean(self, values, axis=0, weights=None, dtype=None): values = np.asarray(values) if weights is None: result = self.reduce(values, axis=axis, dtype=dtype) shape = [1] * values.ndim shape[axis] = self.groups weights = self.count.reshape(shape) else: weights = np.asarray(weights) result = self.reduce(values * weights, axis=axis, dtype=dtype) weights = self.reduce(weights, axis=axis, dtype=dtype) return self.unique, result / weights
#vtb def create(self, name, data): return self._update( % urlparse.quote(name.encode()), data, , dump_json=False)
Create a Job Binary Internal. :param str data: raw data of script text
### Input: Create a Job Binary Internal. :param str data: raw data of script text ### Response: #vtb def create(self, name, data): return self._update( % urlparse.quote(name.encode()), data, , dump_json=False)
#vtb def update(request, ident, stateless=False, **kwargs): dash_app, app = DashApp.locate_item(ident, stateless) request_body = json.loads(request.body.decode()) if app.use_dash_dispatch(): view_func = app.locate_endpoint_function() import flask with app.test_request_context(): flask.request._cached_json = (request_body, flask.request._cached_json[True]) resp = view_func() else: app_state = request.session.get("django_plotly_dash", dict()) arg_map = {: ident, : dash_app, : request.user, : app_state} resp = app.dispatch_with_args(request_body, arg_map) request.session[] = app_state dash_app.handle_current_state() if str(resp) == : return HttpResponse("") try: rdata = resp.data rtype = resp.mimetype except: rdata = resp rtype = "application/json" return HttpResponse(rdata, content_type=rtype)
Generate update json response
### Input: Generate update json response ### Response: #vtb def update(request, ident, stateless=False, **kwargs): dash_app, app = DashApp.locate_item(ident, stateless) request_body = json.loads(request.body.decode()) if app.use_dash_dispatch(): view_func = app.locate_endpoint_function() import flask with app.test_request_context(): flask.request._cached_json = (request_body, flask.request._cached_json[True]) resp = view_func() else: app_state = request.session.get("django_plotly_dash", dict()) arg_map = {: ident, : dash_app, : request.user, : app_state} resp = app.dispatch_with_args(request_body, arg_map) request.session[] = app_state dash_app.handle_current_state() if str(resp) == : return HttpResponse("") try: rdata = resp.data rtype = resp.mimetype except: rdata = resp rtype = "application/json" return HttpResponse(rdata, content_type=rtype)
#vtb def processes(self, processes): if self._processes > 1: self._pool.close() self._pool.join() self._pool = multiprocessing.Pool(processes) else: self._pool = None self._logger.log(, .format( processes ))
Set the number of concurrent processes the ABC will utilize for fitness function evaluation; if <= 1, single process is used Args: processes (int): number of concurrent processes
### Input: Set the number of concurrent processes the ABC will utilize for fitness function evaluation; if <= 1, single process is used Args: processes (int): number of concurrent processes ### Response: #vtb def processes(self, processes): if self._processes > 1: self._pool.close() self._pool.join() self._pool = multiprocessing.Pool(processes) else: self._pool = None self._logger.log(, .format( processes ))
#vtb def load_path(path, overrides=None, **kwargs): f = open(path, ) content = .join(f.readlines()) f.close() if not isinstance(content, str): raise AssertionError("Expected content to be of type str but it is "+str(type(content))) return load(content, **kwargs)
Convenience function for loading a YAML configuration from a file. Parameters ---------- path : str The path to the file to load on disk. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`.
### Input: Convenience function for loading a YAML configuration from a file. Parameters ---------- path : str The path to the file to load on disk. overrides : dict, optional A dictionary containing overrides to apply. The location of the override is specified in the key as a dot-delimited path to the desired parameter, e.g. "model.corruptor.corruption_level". Returns ------- graph : dict or object The dictionary or object (if the top-level element specified an Python object to instantiate). Notes ----- Other keyword arguments are passed on to `yaml.load`. ### Response: #vtb def load_path(path, overrides=None, **kwargs): f = open(path, ) content = .join(f.readlines()) f.close() if not isinstance(content, str): raise AssertionError("Expected content to be of type str but it is "+str(type(content))) return load(content, **kwargs)
#vtb def stop_app(self): try: if self._conn: finally: self.clear_host_port()
Overrides superclass.
### Input: Overrides superclass. ### Response: #vtb def stop_app(self): try: if self._conn: finally: self.clear_host_port()
#vtb def get_evidence_by_hash(self, evidence_hash: str) -> Optional[Evidence]: return self.session.query(Evidence).filter(Evidence.sha512 == evidence_hash).one_or_none()
Look up an evidence by its hash.
### Input: Look up an evidence by its hash. ### Response: #vtb def get_evidence_by_hash(self, evidence_hash: str) -> Optional[Evidence]: return self.session.query(Evidence).filter(Evidence.sha512 == evidence_hash).one_or_none()
#vtb def reduce_data_frame (df, chunk_slicers, avg_cols=(), uavg_cols=(), minmax_cols=(), nchunk_colname=, uncert_prefix=, min_points_per_chunk=3): subds = [df.iloc[idx] for idx in chunk_slicers] subds = [sd for sd in subds if sd.shape[0] >= min_points_per_chunk] chunked = df.__class__ ({nchunk_colname: np.zeros (len (subds), dtype=np.int)}) uncert_col_name = lambda c: uncert_prefix + c for i, subd in enumerate (subds): label = chunked.index[i] chunked.loc[label,nchunk_colname] = subd.shape[0] for col in avg_cols: chunked.loc[label,col] = subd[col].mean () for col in uavg_cols: ucol = uncert_col_name (col) v, u = weighted_mean (subd[col], subd[ucol]) chunked.loc[label,col] = v chunked.loc[label,ucol] = u for col in minmax_cols: chunked.loc[label, +col] = subd[col].min () chunked.loc[label, +col] = subd[col].max () return chunked
Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another DataFrame with similar columns but fewer rows. Arguments: df The input :class:`pandas.DataFrame`. chunk_slicers An iterable that returns values that are used to slice *df* with its :meth:`pandas.DataFrame.iloc` indexer. An example value might be the generator returned from :func:`slice_evenly_with_gaps`. avg_cols An iterable of names of columns that are to be reduced by taking the mean. uavg_cols An iterable of names of columns that are to be reduced by taking a weighted mean. minmax_cols An iterable of names of columns that are to be reduced by reporting minimum and maximum values. nchunk_colname The name of a column to create reporting the number of rows contributing to each chunk. uncert_prefix The column name prefix for locating uncertainty estimates. By default, the uncertainty on the column ``"temp"`` is given in the column ``"utemp"``. min_points_per_chunk Require at least this many rows in each chunk. Smaller chunks are discarded. Returns a new :class:`pandas.DataFrame`.
### Input: Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another DataFrame with similar columns but fewer rows. Arguments: df The input :class:`pandas.DataFrame`. chunk_slicers An iterable that returns values that are used to slice *df* with its :meth:`pandas.DataFrame.iloc` indexer. An example value might be the generator returned from :func:`slice_evenly_with_gaps`. avg_cols An iterable of names of columns that are to be reduced by taking the mean. uavg_cols An iterable of names of columns that are to be reduced by taking a weighted mean. minmax_cols An iterable of names of columns that are to be reduced by reporting minimum and maximum values. nchunk_colname The name of a column to create reporting the number of rows contributing to each chunk. uncert_prefix The column name prefix for locating uncertainty estimates. By default, the uncertainty on the column ``"temp"`` is given in the column ``"utemp"``. min_points_per_chunk Require at least this many rows in each chunk. Smaller chunks are discarded. Returns a new :class:`pandas.DataFrame`. ### Response: #vtb def reduce_data_frame (df, chunk_slicers, avg_cols=(), uavg_cols=(), minmax_cols=(), nchunk_colname=, uncert_prefix=, min_points_per_chunk=3): subds = [df.iloc[idx] for idx in chunk_slicers] subds = [sd for sd in subds if sd.shape[0] >= min_points_per_chunk] chunked = df.__class__ ({nchunk_colname: np.zeros (len (subds), dtype=np.int)}) uncert_col_name = lambda c: uncert_prefix + c for i, subd in enumerate (subds): label = chunked.index[i] chunked.loc[label,nchunk_colname] = subd.shape[0] for col in avg_cols: chunked.loc[label,col] = subd[col].mean () for col in uavg_cols: ucol = uncert_col_name (col) v, u = weighted_mean (subd[col], subd[ucol]) chunked.loc[label,col] = v chunked.loc[label,ucol] = u for col in minmax_cols: chunked.loc[label, +col] = subd[col].min () chunked.loc[label, +col] = subd[col].max () return chunked
#vtb def invoke_ssh_shell(cls, *args, **kwargs): pty = kwargs.pop(, True) echo = kwargs.pop(, False) client = cls.connect_ssh(*args, **kwargs) f = client.invoke_shell(pty=pty, echo=echo) f.client = client return f
invoke_ssh(arguments..., pty=False, echo=False) Star a new shell on a remote server. It first calls :meth:`Flow.connect_ssh` using all positional and keyword arguments, then calls :meth:`SSHClient.invoke_shell` with the pty / echo options. Args: arguments...: The options for the SSH connection. pty(bool): Request a pseudo-terminal from the server. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the SSH channel.
### Input: invoke_ssh(arguments..., pty=False, echo=False) Star a new shell on a remote server. It first calls :meth:`Flow.connect_ssh` using all positional and keyword arguments, then calls :meth:`SSHClient.invoke_shell` with the pty / echo options. Args: arguments...: The options for the SSH connection. pty(bool): Request a pseudo-terminal from the server. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the SSH channel. ### Response: #vtb def invoke_ssh_shell(cls, *args, **kwargs): pty = kwargs.pop(, True) echo = kwargs.pop(, False) client = cls.connect_ssh(*args, **kwargs) f = client.invoke_shell(pty=pty, echo=echo) f.client = client return f
#vtb def post_fork_child(self): entry_point = .format(__name__) exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point)) cmd = [sys.executable] + sys.argv self._logger.debug(.format(entry_point, .join(cmd))) os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
Post-fork() child callback for ProcessManager.daemon_spawn().
### Input: Post-fork() child callback for ProcessManager.daemon_spawn(). ### Response: #vtb def post_fork_child(self): entry_point = .format(__name__) exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point)) cmd = [sys.executable] + sys.argv self._logger.debug(.format(entry_point, .join(cmd))) os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
#vtb def indication(self, pdu): if _debug: StreamToPacket._debug("indication %r", pdu) for packet in self.packetize(pdu, self.downstreamBuffer): self.request(packet)
Message going downstream.
### Input: Message going downstream. ### Response: #vtb def indication(self, pdu): if _debug: StreamToPacket._debug("indication %r", pdu) for packet in self.packetize(pdu, self.downstreamBuffer): self.request(packet)
#vtb def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None): if MgtKeyType == None or MgtKeyType == : raise PopbillException(-99999999, "세금계산서 발행유형이 입력되지 않았습니다.") if ItemKey == None or ItemKey == : raise PopbillException(-99999999, "아이템키가 입력되지 않았습니다.") if MgtKey == None or MgtKey == : raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") postDate = "MgtKey=" + MgtKey return self._httppost( + ItemKey + + MgtKeyType, postDate, CorpNum, UserID, "", "application/x-www-form-urlencoded; charset=utf-8")
관리번호할당 args CorpNum : 팝빌회원 사업자번호 MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁 ItemKey : 아이템키 (Search API로 조회 가능) MgtKey : 세금계산서에 할당할 파트너 관리 번호 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException
### Input: 관리번호할당 args CorpNum : 팝빌회원 사업자번호 MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁 ItemKey : 아이템키 (Search API로 조회 가능) MgtKey : 세금계산서에 할당할 파트너 관리 번호 UserID : 팝빌회원 아이디 return 처리결과. consist of code and message raise PopbillException ### Response: #vtb def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None): if MgtKeyType == None or MgtKeyType == : raise PopbillException(-99999999, "세금계산서 발행유형이 입력되지 않았습니다.") if ItemKey == None or ItemKey == : raise PopbillException(-99999999, "아이템키가 입력되지 않았습니다.") if MgtKey == None or MgtKey == : raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") postDate = "MgtKey=" + MgtKey return self._httppost( + ItemKey + + MgtKeyType, postDate, CorpNum, UserID, "", "application/x-www-form-urlencoded; charset=utf-8")
#vtb def get_conditional_probs(self, source=None): c_probs = np.zeros((self.m * (self.k + 1), self.k)) mu = self.mu.detach().clone().numpy() for i in range(self.m): mu_i = mu[i * self.k : (i + 1) * self.k, :] c_probs[i * (self.k + 1) + 1 : (i + 1) * (self.k + 1), :] = mu_i c_probs[i * (self.k + 1), :] = 1 - mu_i.sum(axis=0) c_probs = np.clip(c_probs, 0.01, 0.99) if source is not None: return c_probs[source * (self.k + 1) : (source + 1) * (self.k + 1)] else: return c_probs
Returns the full conditional probabilities table as a numpy array, where row i*(k+1) + ly is the conditional probabilities of source i emmiting label ly (including abstains 0), conditioned on different values of Y, i.e.: c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y) Note that this simply involves inferring the kth row by law of total probability and adding in to mu. If `source` is not None, returns only the corresponding block.
### Input: Returns the full conditional probabilities table as a numpy array, where row i*(k+1) + ly is the conditional probabilities of source i emmiting label ly (including abstains 0), conditioned on different values of Y, i.e.: c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y) Note that this simply involves inferring the kth row by law of total probability and adding in to mu. If `source` is not None, returns only the corresponding block. ### Response: #vtb def get_conditional_probs(self, source=None): c_probs = np.zeros((self.m * (self.k + 1), self.k)) mu = self.mu.detach().clone().numpy() for i in range(self.m): mu_i = mu[i * self.k : (i + 1) * self.k, :] c_probs[i * (self.k + 1) + 1 : (i + 1) * (self.k + 1), :] = mu_i c_probs[i * (self.k + 1), :] = 1 - mu_i.sum(axis=0) c_probs = np.clip(c_probs, 0.01, 0.99) if source is not None: return c_probs[source * (self.k + 1) : (source + 1) * (self.k + 1)] else: return c_probs
#vtb def _build_register_function(universe: bool, in_place: bool): def register(func): return _register_function(func.__name__, func, universe, in_place) return register
Build a decorator function to tag transformation functions. :param universe: Does the first positional argument of this function correspond to a universe graph? :param in_place: Does this function return a new graph, or just modify it in-place?
### Input: Build a decorator function to tag transformation functions. :param universe: Does the first positional argument of this function correspond to a universe graph? :param in_place: Does this function return a new graph, or just modify it in-place? ### Response: #vtb def _build_register_function(universe: bool, in_place: bool): def register(func): return _register_function(func.__name__, func, universe, in_place) return register
#vtb def p_expr_usr(p): if p[2].type_ == TYPE.string: p[0] = make_builtin(p.lineno(1), , p[2], type_=TYPE.uinteger) else: p[0] = make_builtin(p.lineno(1), , make_typecast(TYPE.uinteger, p[2], p.lineno(1)), type_=TYPE.uinteger)
bexpr : USR bexpr %prec UMINUS
### Input: bexpr : USR bexpr %prec UMINUS ### Response: #vtb def p_expr_usr(p): if p[2].type_ == TYPE.string: p[0] = make_builtin(p.lineno(1), , p[2], type_=TYPE.uinteger) else: p[0] = make_builtin(p.lineno(1), , make_typecast(TYPE.uinteger, p[2], p.lineno(1)), type_=TYPE.uinteger)
#vtb def WaitHotKeyReleased(hotkey: tuple) -> None: mod = {ModifierKey.Alt: Keys.VK_MENU, ModifierKey.Control: Keys.VK_CONTROL, ModifierKey.Shift: Keys.VK_SHIFT, ModifierKey.Win: Keys.VK_LWIN } while True: time.sleep(0.05) if IsKeyPressed(hotkey[1]): continue for k, v in mod.items(): if k & hotkey[0]: if IsKeyPressed(v): break else: break
hotkey: tuple, two ints tuple(modifierKey, key)
### Input: hotkey: tuple, two ints tuple(modifierKey, key) ### Response: #vtb def WaitHotKeyReleased(hotkey: tuple) -> None: mod = {ModifierKey.Alt: Keys.VK_MENU, ModifierKey.Control: Keys.VK_CONTROL, ModifierKey.Shift: Keys.VK_SHIFT, ModifierKey.Win: Keys.VK_LWIN } while True: time.sleep(0.05) if IsKeyPressed(hotkey[1]): continue for k, v in mod.items(): if k & hotkey[0]: if IsKeyPressed(v): break else: break
#vtb def _separate(self): if self.total_free_space is None: return 0 else: sepa = self.default_column_space if self.default_column_space_remainder > 0: sepa += 1 self.default_column_space_remainder -= 1 logger.debug("remainder: %d, separator: %d", self.default_column_space_remainder, sepa) return sepa
get a width of separator for current column :return: int
### Input: get a width of separator for current column :return: int ### Response: #vtb def _separate(self): if self.total_free_space is None: return 0 else: sepa = self.default_column_space if self.default_column_space_remainder > 0: sepa += 1 self.default_column_space_remainder -= 1 logger.debug("remainder: %d, separator: %d", self.default_column_space_remainder, sepa) return sepa
#vtb def p_expression_sla(self, p): p[0] = Sll(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : expression LSHIFTA expression
### Input: expression : expression LSHIFTA expression ### Response: #vtb def p_expression_sla(self, p): p[0] = Sll(p[1], p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
#vtb def _cb_inform_interface_change(self, msg): self._logger.debug(, msg) self._interface_changed.set()
Update the sensors and requests available.
### Input: Update the sensors and requests available. ### Response: #vtb def _cb_inform_interface_change(self, msg): self._logger.debug(, msg) self._interface_changed.set()
#vtb def to_dict(mapreduce_yaml): all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries.
### Input: Converts a MapReduceYaml file into a JSON-encodable dictionary. For use in user-visible UI and internal methods for interfacing with user code (like param validation). as a list Args: mapreduce_yaml: The Pyton representation of the mapreduce.yaml document. Returns: A list of configuration dictionaries. ### Response: #vtb def to_dict(mapreduce_yaml): all_configs = [] for config in mapreduce_yaml.mapreduce: out = { "name": config.name, "mapper_input_reader": config.mapper.input_reader, "mapper_handler": config.mapper.handler, } if config.mapper.params_validator: out["mapper_params_validator"] = config.mapper.params_validator if config.mapper.params: param_defaults = {} for param in config.mapper.params: param_defaults[param.name] = param.default or param.value out["mapper_params"] = param_defaults if config.params: param_defaults = {} for param in config.params: param_defaults[param.name] = param.default or param.value out["params"] = param_defaults if config.mapper.output_writer: out["mapper_output_writer"] = config.mapper.output_writer all_configs.append(out) return all_configs
#vtb def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor: "Explained variance between `pred` and `targ`." pred,targ = flatten_check(pred,targ) var_pct = torch.var(targ - pred) / torch.var(targ) return 1 - var_pct
Explained variance between `pred` and `targ`.
### Input: Explained variance between `pred` and `targ`. ### Response: #vtb def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor: "Explained variance between `pred` and `targ`." pred,targ = flatten_check(pred,targ) var_pct = torch.var(targ - pred) / torch.var(targ) return 1 - var_pct
#vtb def tracefunc_xml(func): funcname = meta_util_six.get_funcname(func) def wrp_tracefunc2(*args, **kwargs): verbose = kwargs.get(, True) if verbose: print( % (funcname,)) with util_print.Indenter(): ret = func(*args, **kwargs) if verbose: print( % (funcname,)) return ret wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2) wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func) return wrp_tracefunc2_
Causes output of function to be printed in an XML style block
### Input: Causes output of function to be printed in an XML style block ### Response: #vtb def tracefunc_xml(func): funcname = meta_util_six.get_funcname(func) def wrp_tracefunc2(*args, **kwargs): verbose = kwargs.get(, True) if verbose: print( % (funcname,)) with util_print.Indenter(): ret = func(*args, **kwargs) if verbose: print( % (funcname,)) return ret wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2) wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func) return wrp_tracefunc2_
#vtb def tokenize_sents(string): string = six.text_type(string) spans = [] for match in re.finditer(, string): spans.append(match) spans_count = len(spans) rez = [] off = 0 for i in range(spans_count): tok = string[spans[i].start():spans[i].end()] if i == spans_count - 1: rez.append(string[off:spans[i].end()]) elif tok[-1] in [, , , , ]: tok1 = tok[re.search(, tok).start()-1] next_tok = string[spans[i + 1].start():spans[i + 1].end()] if (next_tok[0].isupper() and not tok1.isupper() and not (tok[-1] != or tok1[0] == or tok in ABBRS)): rez.append(string[off:spans[i].end()]) off = spans[i + 1].start() return rez
Tokenize input text to sentences. :param string: Text to tokenize :type string: str or unicode :return: sentences :rtype: list of strings
### Input: Tokenize input text to sentences. :param string: Text to tokenize :type string: str or unicode :return: sentences :rtype: list of strings ### Response: #vtb def tokenize_sents(string): string = six.text_type(string) spans = [] for match in re.finditer(, string): spans.append(match) spans_count = len(spans) rez = [] off = 0 for i in range(spans_count): tok = string[spans[i].start():spans[i].end()] if i == spans_count - 1: rez.append(string[off:spans[i].end()]) elif tok[-1] in [, , , , ]: tok1 = tok[re.search(, tok).start()-1] next_tok = string[spans[i + 1].start():spans[i + 1].end()] if (next_tok[0].isupper() and not tok1.isupper() and not (tok[-1] != or tok1[0] == or tok in ABBRS)): rez.append(string[off:spans[i].end()]) off = spans[i + 1].start() return rez
#vtb def turb_ice(turbice: [str], unit: str = ) -> str: if not turbice: return if turbice[0][0] == : conditions = TURBULANCE_CONDITIONS elif turbice[0][0] == : conditions = ICING_CONDITIONS else: return split = [] for item in turbice: if len(item) == 6: split.append([item[1:2], item[2:5], item[5]]) for i in reversed(range(len(split) - 1)): if split[i][2] == and split[i][0] == split[i + 1][0] \ and int(split[i + 1][1]) == (int(split[i][1]) + int(split[i][2]) * 10): split[i][2] = str(int(split[i][2]) + int(split[i + 1][2])) split.pop(i + 1) return .join([.format( conditions=conditions[item[0]], low_alt=int(item[1]) * 100, high_alt=int(item[1]) * 100 + int(item[2]) * 1000, unit=unit) for item in split])
Translate the list of turbulance or icing into a readable sentence Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft
### Input: Translate the list of turbulance or icing into a readable sentence Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft ### Response: #vtb def turb_ice(turbice: [str], unit: str = ) -> str: if not turbice: return if turbice[0][0] == : conditions = TURBULANCE_CONDITIONS elif turbice[0][0] == : conditions = ICING_CONDITIONS else: return split = [] for item in turbice: if len(item) == 6: split.append([item[1:2], item[2:5], item[5]]) for i in reversed(range(len(split) - 1)): if split[i][2] == and split[i][0] == split[i + 1][0] \ and int(split[i + 1][1]) == (int(split[i][1]) + int(split[i][2]) * 10): split[i][2] = str(int(split[i][2]) + int(split[i + 1][2])) split.pop(i + 1) return .join([.format( conditions=conditions[item[0]], low_alt=int(item[1]) * 100, high_alt=int(item[1]) * 100 + int(item[2]) * 1000, unit=unit) for item in split])
#vtb def by_position(self, position): try: return self.filter_by(position=position).one() except sa.orm.exc.NoResultFound: return None
Like `.get()`, but by position number.
### Input: Like `.get()`, but by position number. ### Response: #vtb def by_position(self, position): try: return self.filter_by(position=position).one() except sa.orm.exc.NoResultFound: return None
#vtb def get_tables(self): url = self.build_url(self._endpoints.get()) response = self.session.get(url) if not response: return [] data = response.json() return [self.table_constructor(parent=self, **{self._cloud_data_key: table}) for table in data.get(, [])]
Returns a collection of this worksheet tables
### Input: Returns a collection of this worksheet tables ### Response: #vtb def get_tables(self): url = self.build_url(self._endpoints.get()) response = self.session.get(url) if not response: return [] data = response.json() return [self.table_constructor(parent=self, **{self._cloud_data_key: table}) for table in data.get(, [])]
#vtb def to_vcf(self, path, rename=None, number=None, description=None, fill=None, write_header=True): r write_vcf(path, callset=self, rename=rename, number=number, description=description, fill=fill, write_header=write_header)
r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description : dict, optional Descriptions for the INFO and FILTER headers. fill : dict, optional Fill values used for missing data in the table. write_header : bool, optional If True write VCF header. Examples -------- Setup a variant table to write out:: >>> import allel >>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3'] >>> pos = [2, 6, 3, 8, 1] >>> ids = ['a', 'b', 'c', 'd', 'e'] >>> ref = [b'A', b'C', b'T', b'G', b'N'] >>> alt = [(b'T', b'.'), ... (b'G', b'.'), ... (b'A', b'C'), ... (b'C', b'A'), ... (b'X', b'.')] >>> qual = [1.2, 2.3, 3.4, 4.5, 5.6] >>> filter_qd = [True, True, True, False, False] >>> filter_dp = [True, False, True, False, False] >>> dp = [12, 23, 34, 45, 56] >>> qd = [12.3, 23.4, 34.5, 45.6, 56.7] >>> flg = [True, False, True, False, True] >>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)] >>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9), ... (9.0, 9.9)] >>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp, ... filter_qd, dp, qd, flg, ac, xx] >>> records = list(zip(*columns)) >>> dtype = [('CHROM', 'S4'), ... ('POS', 'u4'), ... ('ID', 'S1'), ... ('REF', 'S1'), ... ('ALT', ('S1', 2)), ... ('qual', 'f4'), ... ('filter_dp', bool), ... ('filter_qd', bool), ... ('dp', int), ... ('qd', float), ... ('flg', bool), ... ('ac', (int, 2)), ... ('xx', (float, 2))] >>> vt = allel.VariantTable(records, dtype=dtype) Now write out to VCF and inspect the result:: >>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'} >>> fill = {'ALT': b'.', 'ac': -1} >>> number = {'ac': 'A'} >>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'} >>> vt.to_vcf('example.vcf', rename=rename, fill=fill, ... number=number, description=description) >>> print(open('example.vcf').read()) ##fileformat=VCFv4.1 ##fileDate=... ##source=... ##INFO=<ID=DP,Number=1,Type=Integer,Description=""> ##INFO=<ID=QD,Number=1,Type=Float,Description=""> ##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts"> ##INFO=<ID=flg,Number=0,Type=Flag,Description=""> ##INFO=<ID=xx,Number=2,Type=Float,Description=""> ##FILTER=<ID=QD,Description=""> ##FILTER=<ID=dp,Description="Low depth"> #CHROM POS ID REF ALT QUAL FILTER INFO chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=... chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5 chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x... chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7... chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=...
### Input: r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description : dict, optional Descriptions for the INFO and FILTER headers. fill : dict, optional Fill values used for missing data in the table. write_header : bool, optional If True write VCF header. Examples -------- Setup a variant table to write out:: >>> import allel >>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3'] >>> pos = [2, 6, 3, 8, 1] >>> ids = ['a', 'b', 'c', 'd', 'e'] >>> ref = [b'A', b'C', b'T', b'G', b'N'] >>> alt = [(b'T', b'.'), ... (b'G', b'.'), ... (b'A', b'C'), ... (b'C', b'A'), ... (b'X', b'.')] >>> qual = [1.2, 2.3, 3.4, 4.5, 5.6] >>> filter_qd = [True, True, True, False, False] >>> filter_dp = [True, False, True, False, False] >>> dp = [12, 23, 34, 45, 56] >>> qd = [12.3, 23.4, 34.5, 45.6, 56.7] >>> flg = [True, False, True, False, True] >>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)] >>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9), ... (9.0, 9.9)] >>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp, ... filter_qd, dp, qd, flg, ac, xx] >>> records = list(zip(*columns)) >>> dtype = [('CHROM', 'S4'), ... ('POS', 'u4'), ... ('ID', 'S1'), ... ('REF', 'S1'), ... ('ALT', ('S1', 2)), ... ('qual', 'f4'), ... ('filter_dp', bool), ... ('filter_qd', bool), ... ('dp', int), ... ('qd', float), ... ('flg', bool), ... ('ac', (int, 2)), ... ('xx', (float, 2))] >>> vt = allel.VariantTable(records, dtype=dtype) Now write out to VCF and inspect the result:: >>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'} >>> fill = {'ALT': b'.', 'ac': -1} >>> number = {'ac': 'A'} >>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'} >>> vt.to_vcf('example.vcf', rename=rename, fill=fill, ... number=number, description=description) >>> print(open('example.vcf').read()) ##fileformat=VCFv4.1 ##fileDate=... ##source=... ##INFO=<ID=DP,Number=1,Type=Integer,Description=""> ##INFO=<ID=QD,Number=1,Type=Float,Description=""> ##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts"> ##INFO=<ID=flg,Number=0,Type=Flag,Description=""> ##INFO=<ID=xx,Number=2,Type=Float,Description=""> ##FILTER=<ID=QD,Description=""> ##FILTER=<ID=dp,Description="Low depth"> #CHROM POS ID REF ALT QUAL FILTER INFO chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=... chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5 chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x... chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7... chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=... ### Response: #vtb def to_vcf(self, path, rename=None, number=None, description=None, fill=None, write_header=True): r write_vcf(path, callset=self, rename=rename, number=number, description=description, fill=fill, write_header=write_header)
#vtb def scale_dtype(arr, dtype): max_int = np.iinfo(dtype).max return (arr * max_int).astype(dtype)
Convert an array from 0..1 to dtype, scaling up linearly
### Input: Convert an array from 0..1 to dtype, scaling up linearly ### Response: #vtb def scale_dtype(arr, dtype): max_int = np.iinfo(dtype).max return (arr * max_int).astype(dtype)
#vtb def index(): institute_objs = user_institutes(store, current_user) institutes_count = ((institute_obj, store.cases(collaborator=institute_obj[]).count()) for institute_obj in institute_objs if institute_obj) return dict(institutes=institutes_count)
Display a list of all user institutes.
### Input: Display a list of all user institutes. ### Response: #vtb def index(): institute_objs = user_institutes(store, current_user) institutes_count = ((institute_obj, store.cases(collaborator=institute_obj[]).count()) for institute_obj in institute_objs if institute_obj) return dict(institutes=institutes_count)
#vtb def conv_elems_1d(x, factor, out_depth=None): out_depth = out_depth or x.get_shape().as_list()[-1] x = tf.expand_dims(x, 1) x = layers().Conv2D( filters=out_depth, kernel_size=(1, factor), strides=(1, factor), padding="valid", data_format="channels_last", )(x) x = tf.squeeze(x, 1) return x
Decrease the length and change the dimensionality. Merge/restore/compress factors positions of dim depth of the input into a single position of dim out_depth. This is basically just a strided convolution without overlap between each strides. The original length has to be divided by factor. Args: x (tf.Tensor): shape [batch_size, length, depth] factor (int): Length compression factor. out_depth (int): Output depth Returns: tf.Tensor: shape [batch_size, length//factor, out_depth]
### Input: Decrease the length and change the dimensionality. Merge/restore/compress factors positions of dim depth of the input into a single position of dim out_depth. This is basically just a strided convolution without overlap between each strides. The original length has to be divided by factor. Args: x (tf.Tensor): shape [batch_size, length, depth] factor (int): Length compression factor. out_depth (int): Output depth Returns: tf.Tensor: shape [batch_size, length//factor, out_depth] ### Response: #vtb def conv_elems_1d(x, factor, out_depth=None): out_depth = out_depth or x.get_shape().as_list()[-1] x = tf.expand_dims(x, 1) x = layers().Conv2D( filters=out_depth, kernel_size=(1, factor), strides=(1, factor), padding="valid", data_format="channels_last", )(x) x = tf.squeeze(x, 1) return x
#vtb def _make_request(session, url, argument=None, params=None, raw=False): if not params: params = {} params[] = session.auth.key try: if argument: request_url = .format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = .format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError() if resp.text == : raise VooblyError() elif resp.text == : raise VooblyError() elif not resp.text: raise VooblyError() if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError(.format(resp.text))
Make a request to API endpoint.
### Input: Make a request to API endpoint. ### Response: #vtb def _make_request(session, url, argument=None, params=None, raw=False): if not params: params = {} params[] = session.auth.key try: if argument: request_url = .format(session.auth.base_url, VOOBLY_API_URL, url, argument) else: request_url = .format(VOOBLY_API_URL, url) resp = session.get(request_url, params=params) except RequestException: raise VooblyError() if resp.text == : raise VooblyError() elif resp.text == : raise VooblyError() elif not resp.text: raise VooblyError() if raw: return resp.text try: return tablib.Dataset().load(resp.text).dict except UnsupportedFormat: raise VooblyError(.format(resp.text))
#vtb def tokenize(self, s, pattern=None, active=None): if pattern is None: if self.tokenize_pattern is None: pattern = r else: pattern = self.tokenize_pattern if active is None: active = self.active return self.group.tokenize(s, pattern=pattern, active=active)
Rewrite and tokenize the input string *s*. Args: s (str): the input string to process pattern (str, optional): the regular expression pattern on which to split tokens; defaults to `[ \t]+` active (optional): a collection of external module names that may be applied if called Returns: a :class:`~delphin.tokens.YyTokenLattice` containing the tokens and their characterization information
### Input: Rewrite and tokenize the input string *s*. Args: s (str): the input string to process pattern (str, optional): the regular expression pattern on which to split tokens; defaults to `[ \t]+` active (optional): a collection of external module names that may be applied if called Returns: a :class:`~delphin.tokens.YyTokenLattice` containing the tokens and their characterization information ### Response: #vtb def tokenize(self, s, pattern=None, active=None): if pattern is None: if self.tokenize_pattern is None: pattern = r else: pattern = self.tokenize_pattern if active is None: active = self.active return self.group.tokenize(s, pattern=pattern, active=active)
#vtb def conv_block(name, x, mid_channels, dilations=None, activation="relu", dropout=0.0): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) is_2d = len(x_shape) == 4 num_steps = x_shape[1] if is_2d: first_filter = [3, 3] second_filter = [1, 1] else: if num_steps == 1: first_filter = [1, 3, 3] else: first_filter = [2, 3, 3] second_filter = [1, 1, 1] x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter, dilations=dilations) x = tf.nn.relu(x) x = get_dropout(x, rate=dropout) if activation == "relu": x = conv("1_2", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.relu(x) elif activation == "gatu": x_tanh = conv("1_tanh", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x_sigm = conv("1_sigm", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm) x = get_dropout(x, rate=dropout) return x
2 layer conv block used in the affine coupling layer. Args: name: variable scope. x: 4-D or 5-D Tensor. mid_channels: Output channels of the second layer. dilations: Optional, list of integers. activation: relu or gatu. If relu, the second layer is relu(W*x) If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) dropout: Dropout probability. Returns: x: 4-D Tensor: Output activations.
### Input: 2 layer conv block used in the affine coupling layer. Args: name: variable scope. x: 4-D or 5-D Tensor. mid_channels: Output channels of the second layer. dilations: Optional, list of integers. activation: relu or gatu. If relu, the second layer is relu(W*x) If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) dropout: Dropout probability. Returns: x: 4-D Tensor: Output activations. ### Response: #vtb def conv_block(name, x, mid_channels, dilations=None, activation="relu", dropout=0.0): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) is_2d = len(x_shape) == 4 num_steps = x_shape[1] if is_2d: first_filter = [3, 3] second_filter = [1, 1] else: if num_steps == 1: first_filter = [1, 3, 3] else: first_filter = [2, 3, 3] second_filter = [1, 1, 1] x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter, dilations=dilations) x = tf.nn.relu(x) x = get_dropout(x, rate=dropout) if activation == "relu": x = conv("1_2", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.relu(x) elif activation == "gatu": x_tanh = conv("1_tanh", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x_sigm = conv("1_sigm", x, output_channels=mid_channels, filter_size=second_filter, dilations=dilations) x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm) x = get_dropout(x, rate=dropout) return x
#vtb def inverse_transform(self, maps): out = {} xi1 = conversions.primary_xi( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) xi2 = conversions.secondary_xi( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) out["phi_a"] = conversions.phi_a( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) out["phi_s"] = conversions.phi_s( maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) if isinstance(xi1, numpy.ndarray): mass1, mass2 = map(numpy.array, [maps[parameters.mass1], maps[parameters.mass2]]) mask_mass1_gte_mass2 = mass1 >= mass2 mask_mass1_lt_mass2 = mass1 < mass2 out["xi1"] = numpy.concatenate(( xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])) out["xi2"] = numpy.concatenate(( xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])) elif maps["mass1"] > maps["mass2"]: out["xi1"] = xi1 out["xi2"] = xi2 else: out["xi1"] = xi2 out["xi2"] = xi1 return self.format_output(maps, out)
This function transforms from component masses and cartesian spins to mass-weighted spin parameters perpendicular with the angular momentum. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
### Input: This function transforms from component masses and cartesian spins to mass-weighted spin parameters perpendicular with the angular momentum. Parameters ---------- maps : a mapping object Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values. ### Response: #vtb def inverse_transform(self, maps): out = {} xi1 = conversions.primary_xi( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) xi2 = conversions.secondary_xi( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) out["phi_a"] = conversions.phi_a( maps[parameters.mass1], maps[parameters.mass2], maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) out["phi_s"] = conversions.phi_s( maps[parameters.spin1x], maps[parameters.spin1y], maps[parameters.spin2x], maps[parameters.spin2y]) if isinstance(xi1, numpy.ndarray): mass1, mass2 = map(numpy.array, [maps[parameters.mass1], maps[parameters.mass2]]) mask_mass1_gte_mass2 = mass1 >= mass2 mask_mass1_lt_mass2 = mass1 < mass2 out["xi1"] = numpy.concatenate(( xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])) out["xi2"] = numpy.concatenate(( xi1[mask_mass1_gte_mass2], xi2[mask_mass1_lt_mass2])) elif maps["mass1"] > maps["mass2"]: out["xi1"] = xi1 out["xi2"] = xi2 else: out["xi1"] = xi2 out["xi2"] = xi1 return self.format_output(maps, out)
#vtb def run(*args): import os import sys import argparse import pkg_resources parser = argparse.ArgumentParser(prog=, description=) parser.add_argument(, help=) parser.add_argument(, dest=, action=, help=) parser.add_argument(, dest=, action=, help=) ver = pkg_resources.require()[0].version parser.add_argument(, action=, version= % ver, help=) if args: clargs, remaining = parser.parse_known_args(args=args) else: clargs, remaining = parser.parse_known_args() if clargs.verbose: logger.setLevel(logging.DEBUG) args, kwargs = parse(remaining) shovel = Shovel() shovel.extend(Task.clear()) for path in [ os.path.expanduser(), os.path.expanduser()]: if os.path.exists(path): shovel.read(path, os.path.expanduser()) shovel_home = os.environ.get() if shovel_home and os.path.exists(shovel_home): shovel.read(shovel_home, shovel_home) for path in [, ]: if os.path.exists(path): shovel.read(path) if clargs.method == : print(help.shovel_help(shovel, *args, **kwargs)) elif clargs.method == : tasks = list(v for _, v in shovel.items()) if not tasks: print() else: names = list(t.fullname for t in tasks) docs = list(t.doc for t in tasks) width = 80 import shutil try: width, _ = shutil.get_terminal_size(fallback=(0, width)) except AttributeError: pass format = % ( max(len(name) for name in names), width) for name, doc in zip(names, docs): print(format % (name, doc)) elif clargs.method: try: tasks = shovel.tasks(clargs.method) except KeyError: print( % clargs.method, file=sys.stderr) exit(1) if len(tasks) > 1: print( % clargs.method, file=sys.stderr) for task in tasks: print( % task.fullname, file=sys.stderr) exit(2) task = tasks[0] if clargs.dryRun: print(task.dry(*args, **kwargs)) else: task(*args, **kwargs)
Run the normal shovel functionality
### Input: Run the normal shovel functionality ### Response: #vtb def run(*args): import os import sys import argparse import pkg_resources parser = argparse.ArgumentParser(prog=, description=) parser.add_argument(, help=) parser.add_argument(, dest=, action=, help=) parser.add_argument(, dest=, action=, help=) ver = pkg_resources.require()[0].version parser.add_argument(, action=, version= % ver, help=) if args: clargs, remaining = parser.parse_known_args(args=args) else: clargs, remaining = parser.parse_known_args() if clargs.verbose: logger.setLevel(logging.DEBUG) args, kwargs = parse(remaining) shovel = Shovel() shovel.extend(Task.clear()) for path in [ os.path.expanduser(), os.path.expanduser()]: if os.path.exists(path): shovel.read(path, os.path.expanduser()) shovel_home = os.environ.get() if shovel_home and os.path.exists(shovel_home): shovel.read(shovel_home, shovel_home) for path in [, ]: if os.path.exists(path): shovel.read(path) if clargs.method == : print(help.shovel_help(shovel, *args, **kwargs)) elif clargs.method == : tasks = list(v for _, v in shovel.items()) if not tasks: print() else: names = list(t.fullname for t in tasks) docs = list(t.doc for t in tasks) width = 80 import shutil try: width, _ = shutil.get_terminal_size(fallback=(0, width)) except AttributeError: pass format = % ( max(len(name) for name in names), width) for name, doc in zip(names, docs): print(format % (name, doc)) elif clargs.method: try: tasks = shovel.tasks(clargs.method) except KeyError: print( % clargs.method, file=sys.stderr) exit(1) if len(tasks) > 1: print( % clargs.method, file=sys.stderr) for task in tasks: print( % task.fullname, file=sys.stderr) exit(2) task = tasks[0] if clargs.dryRun: print(task.dry(*args, **kwargs)) else: task(*args, **kwargs)
#vtb def remove(self, first, count): if first < 0 or count < 1: return new_range = [] last = first + count - 1 for r in self.__range: if first <= r.last and r.first <= last: if r.first < first: new_range.append(IdRange(r.first, first-r.first)) if last < r.last: new_range.append(IdRange(last+1, r.last-last)) else: new_range.append(r) self.__range = new_range
Remove a range of count consecutive ids starting at id first from all the ranges in the set.
### Input: Remove a range of count consecutive ids starting at id first from all the ranges in the set. ### Response: #vtb def remove(self, first, count): if first < 0 or count < 1: return new_range = [] last = first + count - 1 for r in self.__range: if first <= r.last and r.first <= last: if r.first < first: new_range.append(IdRange(r.first, first-r.first)) if last < r.last: new_range.append(IdRange(last+1, r.last-last)) else: new_range.append(r) self.__range = new_range
#vtb def _get_current_object(self): if not hasattr(self.__local, ): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError( % self.__name__)
Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context.
### Input: Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. ### Response: #vtb def _get_current_object(self): if not hasattr(self.__local, ): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError( % self.__name__)
#vtb def days_in_month(year, month): eom = _days_per_month[month - 1] if is_leap_year(year) and month == 2: eom += 1 return eom
returns number of days for the given year and month :param int year: calendar year :param int month: calendar month :return int:
### Input: returns number of days for the given year and month :param int year: calendar year :param int month: calendar month :return int: ### Response: #vtb def days_in_month(year, month): eom = _days_per_month[month - 1] if is_leap_year(year) and month == 2: eom += 1 return eom
#vtb def get_map_values(self, lons, lats, ibin=None): pix_idxs = self.get_pixel_indices(lons, lats, ibin) idxs = copy.copy(pix_idxs) m = np.empty_like(idxs[0], dtype=bool) m.fill(True) for i, p in enumerate(pix_idxs): m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i]) idxs[i][~m] = 0 vals = self.counts.T[idxs] vals[~m] = np.nan return vals
Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map
### Input: Return the map values corresponding to a set of coordinates. Parameters ---------- lons : array-like 'Longitudes' (RA or GLON) lats : array-like 'Latitidues' (DEC or GLAT) ibin : int or array-like Extract data only for a given energy bin. None -> extract data for all bins Returns ---------- vals : numpy.ndarray((n)) Values of pixels in the flattened map, np.nan used to flag coords outside of map ### Response: #vtb def get_map_values(self, lons, lats, ibin=None): pix_idxs = self.get_pixel_indices(lons, lats, ibin) idxs = copy.copy(pix_idxs) m = np.empty_like(idxs[0], dtype=bool) m.fill(True) for i, p in enumerate(pix_idxs): m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i]) idxs[i][~m] = 0 vals = self.counts.T[idxs] vals[~m] = np.nan return vals
#vtb def _add_url(self, chunk): if in chunk: return chunk public_path = chunk.get() if public_path: chunk[] = public_path else: fullpath = posixpath.join(self.state.static_view_path, chunk[]) chunk[] = self._request.static_url(fullpath) return chunk
Add a 'url' property to a chunk and return it
### Input: Add a 'url' property to a chunk and return it ### Response: #vtb def _add_url(self, chunk): if in chunk: return chunk public_path = chunk.get() if public_path: chunk[] = public_path else: fullpath = posixpath.join(self.state.static_view_path, chunk[]) chunk[] = self._request.static_url(fullpath) return chunk
#vtb def chunks(iterable, n): for i in range(0, len(iterable), n): yield iterable[i:i + n]
Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464
### Input: Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464 ### Response: #vtb def chunks(iterable, n): for i in range(0, len(iterable), n): yield iterable[i:i + n]
#vtb def unmodified_isinstance(*bases): class UnmodifiedIsInstance(type): if sys.version_info[0] == 2 and sys.version_info[1] <= 6: @classmethod def __instancecheck__(cls, instance): if cls.__name__ in (str(base.__name__) for base in bases): return isinstance(instance, bases) subclass = getattr(instance, , None) subtype = type(instance) instance_type = getattr(abc, , None) if not instance_type: class test_object: pass instance_type = type(test_object) if subtype is instance_type: subtype = subclass if subtype is subclass or subclass is None: return cls.__subclasscheck__(subtype) return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)) else: @classmethod def __instancecheck__(cls, instance): if cls.__name__ in (str(base.__name__) for base in bases): return isinstance(instance, bases) return type.__instancecheck__(cls, instance) return with_metaclass(UnmodifiedIsInstance, *bases)
When called in the form MyOverrideClass(unmodified_isinstance(BuiltInClass)) it allows calls against passed in built in instances to pass even if there not a subclass
### Input: When called in the form MyOverrideClass(unmodified_isinstance(BuiltInClass)) it allows calls against passed in built in instances to pass even if there not a subclass ### Response: #vtb def unmodified_isinstance(*bases): class UnmodifiedIsInstance(type): if sys.version_info[0] == 2 and sys.version_info[1] <= 6: @classmethod def __instancecheck__(cls, instance): if cls.__name__ in (str(base.__name__) for base in bases): return isinstance(instance, bases) subclass = getattr(instance, , None) subtype = type(instance) instance_type = getattr(abc, , None) if not instance_type: class test_object: pass instance_type = type(test_object) if subtype is instance_type: subtype = subclass if subtype is subclass or subclass is None: return cls.__subclasscheck__(subtype) return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype)) else: @classmethod def __instancecheck__(cls, instance): if cls.__name__ in (str(base.__name__) for base in bases): return isinstance(instance, bases) return type.__instancecheck__(cls, instance) return with_metaclass(UnmodifiedIsInstance, *bases)
#vtb def write(self, buffer): if type(buffer) == type(0): buffer = chr(buffer) elif not isinstance(buffer, bytes): buffer = buffer.encode(self.encoding) if IAC in buffer: buffer = buffer.replace(IAC, IAC+IAC) self.msg("send %s", repr(buffer)) self.sock.send(buffer)
Write a string to the socket, doubling any IAC characters. Can block if the connection is blocked. May raise socket.error if the connection is closed.
### Input: Write a string to the socket, doubling any IAC characters. Can block if the connection is blocked. May raise socket.error if the connection is closed. ### Response: #vtb def write(self, buffer): if type(buffer) == type(0): buffer = chr(buffer) elif not isinstance(buffer, bytes): buffer = buffer.encode(self.encoding) if IAC in buffer: buffer = buffer.replace(IAC, IAC+IAC) self.msg("send %s", repr(buffer)) self.sock.send(buffer)
#vtb def first_interval_starting(self, start: datetime.datetime) -> \ Optional[Interval]: for i in self.intervals: if i.start == start: return i return None
Returns our first interval that starts with the ``start`` parameter, or ``None``.
### Input: Returns our first interval that starts with the ``start`` parameter, or ``None``. ### Response: #vtb def first_interval_starting(self, start: datetime.datetime) -> \ Optional[Interval]: for i in self.intervals: if i.start == start: return i return None
#vtb def _convert(cls, record): if not record: return {} converted_dict = {} for field in cls.conversion: key = field[0] if len(field) >= 2 and field[1]: converted_key = field[1] else: converted_key = key if len(field) >= 3 and field[2]: conversion_method = field[2] else: conversion_method = cls.default_conversion_method if len(field) >= 4: converter = field[3] else: converter = None try: value = conversion_method(record[key]) except KeyError: continue if converter: value = converter._convert_internal(value) if converted_key is APPEND: if isinstance(value, list): for v in value: converted_dict.update(v) else: converted_dict.update(value) else: converted_dict[converted_key] = value return converted_dict
Core method of the converter. Converts a single dictionary into another dictionary.
### Input: Core method of the converter. Converts a single dictionary into another dictionary. ### Response: #vtb def _convert(cls, record): if not record: return {} converted_dict = {} for field in cls.conversion: key = field[0] if len(field) >= 2 and field[1]: converted_key = field[1] else: converted_key = key if len(field) >= 3 and field[2]: conversion_method = field[2] else: conversion_method = cls.default_conversion_method if len(field) >= 4: converter = field[3] else: converter = None try: value = conversion_method(record[key]) except KeyError: continue if converter: value = converter._convert_internal(value) if converted_key is APPEND: if isinstance(value, list): for v in value: converted_dict.update(v) else: converted_dict.update(value) else: converted_dict[converted_key] = value return converted_dict
#vtb def _set_link_error_disable(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container=, presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__link_error_disable = t if hasattr(self, ): self._set()
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_error_disable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_error_disable() directly.
### Input: Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container) If this variable is read-only (config: false) in the source YANG file, then _set_link_error_disable is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_error_disable() directly. ### Response: #vtb def _set_link_error_disable(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container=, presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__link_error_disable = t if hasattr(self, ): self._set()
#vtb def _locate_free_sectors(self, ignore_chunk=None): sectors = self._sectors(ignore_chunk=ignore_chunk) return [not i for i in sectors]
Return a list of booleans, indicating the free sectors.
### Input: Return a list of booleans, indicating the free sectors. ### Response: #vtb def _locate_free_sectors(self, ignore_chunk=None): sectors = self._sectors(ignore_chunk=ignore_chunk) return [not i for i in sectors]
#vtb def handleGetValue(self, topContainer): value = self.__referenceDict if self.__referenceDict is not None else topContainer for key in self.__dictKeyChain: value = value[key] return value
This method overrides ValueGetterBase's "pure virtual" method. It returns the referenced value. The derived class is NOT responsible for fully resolving the reference'd value in the event the value resolves to another ValueGetterBase-based instance -- this is handled automatically within ValueGetterBase implementation. topContainer: The top-level container (dict, tuple, or list [sub-]instance) within whose context the value-getter is applied. If self.__referenceDict is None, then topContainer will be used as the reference dictionary for resolving our dictionary key chain. Returns: The value referenced by this instance (which may be another value-getter instance)
### Input: This method overrides ValueGetterBase's "pure virtual" method. It returns the referenced value. The derived class is NOT responsible for fully resolving the reference'd value in the event the value resolves to another ValueGetterBase-based instance -- this is handled automatically within ValueGetterBase implementation. topContainer: The top-level container (dict, tuple, or list [sub-]instance) within whose context the value-getter is applied. If self.__referenceDict is None, then topContainer will be used as the reference dictionary for resolving our dictionary key chain. Returns: The value referenced by this instance (which may be another value-getter instance) ### Response: #vtb def handleGetValue(self, topContainer): value = self.__referenceDict if self.__referenceDict is not None else topContainer for key in self.__dictKeyChain: value = value[key] return value
#vtb def installed(name, cyg_arch=, mirrors=None): ret = {: name, : None, : , : {}} if cyg_arch not in [, ]: ret[] = False ret[] = cyg_arch\x86\x86_64\ return ret LOG.debug(, mirrors) if not __salt__[](name, cyg_arch=cyg_arch, mirrors=mirrors): ret[] = False ret[] = return ret pkgs = __salt__[](name, cyg_arch) if name in pkgs: ret[] = True ret[] = return ret if __opts__[]: ret[] = .format(name) return ret if __salt__[](name, cyg_arch=cyg_arch, mirrors=mirrors): ret[] = True ret[][name] = ret[] = else: ret[] = False ret[] = return ret
Make sure that a package is installed. name The name of the package to install cyg_arch : x86_64 The cygwin architecture to install the package into. Current options are x86 and x86_64 mirrors : None List of mirrors to check. None will use a default mirror (kernel.org) CLI Example: .. code-block:: yaml rsync: cyg.installed: - mirrors: - http://mirror/without/public/key: "" - http://mirror/with/public/key: http://url/of/public/key
### Input: Make sure that a package is installed. name The name of the package to install cyg_arch : x86_64 The cygwin architecture to install the package into. Current options are x86 and x86_64 mirrors : None List of mirrors to check. None will use a default mirror (kernel.org) CLI Example: .. code-block:: yaml rsync: cyg.installed: - mirrors: - http://mirror/without/public/key: "" - http://mirror/with/public/key: http://url/of/public/key ### Response: #vtb def installed(name, cyg_arch=, mirrors=None): ret = {: name, : None, : , : {}} if cyg_arch not in [, ]: ret[] = False ret[] = cyg_arch\x86\x86_64\ return ret LOG.debug(, mirrors) if not __salt__[](name, cyg_arch=cyg_arch, mirrors=mirrors): ret[] = False ret[] = return ret pkgs = __salt__[](name, cyg_arch) if name in pkgs: ret[] = True ret[] = return ret if __opts__[]: ret[] = .format(name) return ret if __salt__[](name, cyg_arch=cyg_arch, mirrors=mirrors): ret[] = True ret[][name] = ret[] = else: ret[] = False ret[] = return ret
#vtb def loads(cls, s): with closing(StringIO(s)) as fileobj: return cls.load(fileobj)
Load an instance of this class from YAML.
### Input: Load an instance of this class from YAML. ### Response: #vtb def loads(cls, s): with closing(StringIO(s)) as fileobj: return cls.load(fileobj)
#vtb def cmd_func(self, command: str) -> Optional[Callable]: func_name = self.cmd_func_name(command) if func_name: return getattr(self, func_name)
Get the function for a command :param command: the name of the command
### Input: Get the function for a command :param command: the name of the command ### Response: #vtb def cmd_func(self, command: str) -> Optional[Callable]: func_name = self.cmd_func_name(command) if func_name: return getattr(self, func_name)
#vtb def get_vcs_root(path): previous_path = path while get_vcs_info(path) is None: path = abspardir(path) if path == previous_path: return else: previous_path = path return osp.abspath(path)
Return VCS root directory path Return None if path is not within a supported VCS repository
### Input: Return VCS root directory path Return None if path is not within a supported VCS repository ### Response: #vtb def get_vcs_root(path): previous_path = path while get_vcs_info(path) is None: path = abspardir(path) if path == previous_path: return else: previous_path = path return osp.abspath(path)
#vtb def _do_multipart_upload(self, stream, metadata, size, num_retries): data = stream.read(size) if len(data) < size: msg = _READ_LESS_THAN_SIZE.format(size, len(data)) raise ValueError(msg) headers = _get_upload_headers(self._connection.USER_AGENT) upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project) upload = MultipartUpload(upload_url, headers=headers) if num_retries is not None: upload._retry_strategy = resumable_media.RetryStrategy( max_retries=num_retries ) response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE) return response
Perform a multipart upload. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :type metadata: dict :param metadata: The metadata associated with the upload. :type size: int :param size: The number of bytes to be uploaded (which will be read from ``stream``). If not provided, the upload will be concluded once ``stream`` is exhausted (or :data:`None`). :type num_retries: int :param num_retries: Number of upload retries. (Deprecated: This argument will be removed in a future release.) :rtype: :class:`~requests.Response` :returns: The "200 OK" response object returned after the multipart upload request. :raises: :exc:`ValueError` if the ``stream`` has fewer than ``size`` bytes remaining.
### Input: Perform a multipart upload. :type stream: IO[bytes] :param stream: A bytes IO object open for reading. :type metadata: dict :param metadata: The metadata associated with the upload. :type size: int :param size: The number of bytes to be uploaded (which will be read from ``stream``). If not provided, the upload will be concluded once ``stream`` is exhausted (or :data:`None`). :type num_retries: int :param num_retries: Number of upload retries. (Deprecated: This argument will be removed in a future release.) :rtype: :class:`~requests.Response` :returns: The "200 OK" response object returned after the multipart upload request. :raises: :exc:`ValueError` if the ``stream`` has fewer than ``size`` bytes remaining. ### Response: #vtb def _do_multipart_upload(self, stream, metadata, size, num_retries): data = stream.read(size) if len(data) < size: msg = _READ_LESS_THAN_SIZE.format(size, len(data)) raise ValueError(msg) headers = _get_upload_headers(self._connection.USER_AGENT) upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project) upload = MultipartUpload(upload_url, headers=headers) if num_retries is not None: upload._retry_strategy = resumable_media.RetryStrategy( max_retries=num_retries ) response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE) return response
#vtb def to_dict(self): rv = {: self.code} if not self.is_native(): rv[] = self.issuer rv[] = self.type else: rv[] = return rv
Generate a dict for this object's attributes. :return: A dict representing an :class:`Asset`
### Input: Generate a dict for this object's attributes. :return: A dict representing an :class:`Asset` ### Response: #vtb def to_dict(self): rv = {: self.code} if not self.is_native(): rv[] = self.issuer rv[] = self.type else: rv[] = return rv
#vtb def cli(debug, cache, incremental): settings.HTTP_CACHE = cache settings.INCREMENTAL = incremental settings.DEBUG = debug if settings.DEBUG: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) init_memorious()
Crawler framework for documents and structured scrapers.
### Input: Crawler framework for documents and structured scrapers. ### Response: #vtb def cli(debug, cache, incremental): settings.HTTP_CACHE = cache settings.INCREMENTAL = incremental settings.DEBUG = debug if settings.DEBUG: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) init_memorious()
#vtb def ensure_dir(self, mode=0777): if not self.exists() or not self.isdir(): os.makedirs(self, mode)
Make sure the directory exists, create if necessary.
### Input: Make sure the directory exists, create if necessary. ### Response: #vtb def ensure_dir(self, mode=0777): if not self.exists() or not self.isdir(): os.makedirs(self, mode)
#vtb def feed_interval_get(feed_id, parameters): val = cache.get(getkey( T_INTERVAL, key=feed_interval_key(feed_id, parameters) )) return val if isinstance(val, tuple) else (val, None)
Get adaptive interval between checks for a feed.
### Input: Get adaptive interval between checks for a feed. ### Response: #vtb def feed_interval_get(feed_id, parameters): val = cache.get(getkey( T_INTERVAL, key=feed_interval_key(feed_id, parameters) )) return val if isinstance(val, tuple) else (val, None)
#vtb def impulse_noise(x, severity=1): c = [.03, .06, .09, 0.17, 0.27][severity - 1] x = tfds.core.lazy_imports.skimage.util.random_noise( np.array(x) / 255., mode=, amount=c) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
Impulse noise corruption to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
### Input: Impulse noise corruption to images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Added impulse noise. ### Response: #vtb def impulse_noise(x, severity=1): c = [.03, .06, .09, 0.17, 0.27][severity - 1] x = tfds.core.lazy_imports.skimage.util.random_noise( np.array(x) / 255., mode=, amount=c) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
#vtb def _open(self, name=None, fileobj=None, mymap=None, block=None): if block is not None: if not name: name = self.unpack_from(block) if fileobj: fileobj.close() return self if mymap is not None: block = mymap elif fileobj: try: mymap = mmap.mmap(fileobj.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ) except: mymap = 0 block = fileobj.read() elif name: fileobj = io.open(os.path.normpath(os.path.expanduser(name)), ) else: assert False return self._open(name=name, fileobj=fileobj, mymap=mymap, block=block)
The _open function takes some form of file identifier and creates an :py:class:`CpioFile` instance from it. :param :py:class:`str` name: a file name :param :py:class:`file` fileobj: if given, this overrides *name* :param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj* :param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*) The file to be used can be specified in any of four different forms, (in reverse precedence): #. a file name #. :py:class:`file` object #. :py:mod:`mmap.mmap`, or #. a block of memory
### Input: The _open function takes some form of file identifier and creates an :py:class:`CpioFile` instance from it. :param :py:class:`str` name: a file name :param :py:class:`file` fileobj: if given, this overrides *name* :param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj* :param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*) The file to be used can be specified in any of four different forms, (in reverse precedence): #. a file name #. :py:class:`file` object #. :py:mod:`mmap.mmap`, or #. a block of memory ### Response: #vtb def _open(self, name=None, fileobj=None, mymap=None, block=None): if block is not None: if not name: name = self.unpack_from(block) if fileobj: fileobj.close() return self if mymap is not None: block = mymap elif fileobj: try: mymap = mmap.mmap(fileobj.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ) except: mymap = 0 block = fileobj.read() elif name: fileobj = io.open(os.path.normpath(os.path.expanduser(name)), ) else: assert False return self._open(name=name, fileobj=fileobj, mymap=mymap, block=block)
#vtb def vectorize(self, sentence_list): test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list, self.__seq_len) inferenced_arr = self.__rbm.inference( test_observed_arr, training_count=1, r_batch_size=-1 ) return inferenced_arr
Args: sentence_list: The list of tokenized sentences. [[`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...]] Returns: `np.ndarray` of tokens. [vector of token, vector of token, vector of token]
### Input: Args: sentence_list: The list of tokenized sentences. [[`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...], [`token`, `token`, `token`, ...]] Returns: `np.ndarray` of tokens. [vector of token, vector of token, vector of token] ### Response: #vtb def vectorize(self, sentence_list): test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list, self.__seq_len) inferenced_arr = self.__rbm.inference( test_observed_arr, training_count=1, r_batch_size=-1 ) return inferenced_arr
#vtb def obj(self): if not getattr(self, , None): self._obj = self.get_object() if self._obj is None and not self.allow_none: self.return_error(404) return self._obj
Returns the value of :meth:`ObjectMixin.get_object` and sets a private property called _obj. This property ensures the logic around allow_none is enforced across Endpoints using the Object interface. :raises: :class:`werkzeug.exceptions.BadRequest` :returns: The result of :meth:ObjectMixin.get_object`
### Input: Returns the value of :meth:`ObjectMixin.get_object` and sets a private property called _obj. This property ensures the logic around allow_none is enforced across Endpoints using the Object interface. :raises: :class:`werkzeug.exceptions.BadRequest` :returns: The result of :meth:ObjectMixin.get_object` ### Response: #vtb def obj(self): if not getattr(self, , None): self._obj = self.get_object() if self._obj is None and not self.allow_none: self.return_error(404) return self._obj
#vtb def relabel_non_zero(label_image, start = 1): r if start <= 0: raise ArgumentError() l = list(scipy.unique(label_image)) if 0 in l: l.remove(0) mapping = dict() mapping[0] = 0 for key, item in zip(l, list(range(start, len(l) + start))): mapping[key] = item return relabel_map(label_image, mapping)
r""" Relabel the regions of a label image. Re-processes the labels to make them consecutively and starting from start. Keeps all zero (0) labels, as they are considered background. Parameters ---------- label_image : array_like A nD label map. start : integer The id of the first label to assign Returns ------- relabel_map : ndarray The relabelled label map. See also -------- relabel
### Input: r""" Relabel the regions of a label image. Re-processes the labels to make them consecutively and starting from start. Keeps all zero (0) labels, as they are considered background. Parameters ---------- label_image : array_like A nD label map. start : integer The id of the first label to assign Returns ------- relabel_map : ndarray The relabelled label map. See also -------- relabel ### Response: #vtb def relabel_non_zero(label_image, start = 1): r if start <= 0: raise ArgumentError() l = list(scipy.unique(label_image)) if 0 in l: l.remove(0) mapping = dict() mapping[0] = 0 for key, item in zip(l, list(range(start, len(l) + start))): mapping[key] = item return relabel_map(label_image, mapping)
#vtb def register(self, request, **cleaned_data): if Site._meta.installed: site = Site.objects.get_current() else: site = RequestSite(request) create_user = RegistrationProfile.objects.create_inactive_user new_user = create_user( cleaned_data[], cleaned_data[], cleaned_data[], site, send_email=False ) new_user.first_name = cleaned_data[] new_user.last_name = cleaned_data[] new_user.save() user_info = UserInfo( user=new_user, company=cleaned_data[], function=cleaned_data[], address=cleaned_data[], postal_code=cleaned_data[], city=cleaned_data[], country=cleaned_data[], phone=cleaned_data[], ) user_info.save() send_activation_email(new_user, site, user_info) send_activation_pending_email(new_user, site, user_info) signals.user_registered.send(sender=self.__class__, user=new_user, request=request) return new_user
Given a username, email address and password, register a new user account, which will initially be inactive. Along with the new ``User`` object, a new ``registration.models.RegistrationProfile`` will be created, tied to that ``User``, containing the activation key which will be used for this account. Two emails will be sent. First one to the admin; this email should contain an activation link and a resume of the new user infos. Second one, to the user, for inform him that his request is pending. After the ``User`` and ``RegistrationProfile`` are created and the activation email is sent, the signal ``registration.signals.user_registered`` will be sent, with the new ``User`` as the keyword argument ``user`` and the class of this backend as the sender.
### Input: Given a username, email address and password, register a new user account, which will initially be inactive. Along with the new ``User`` object, a new ``registration.models.RegistrationProfile`` will be created, tied to that ``User``, containing the activation key which will be used for this account. Two emails will be sent. First one to the admin; this email should contain an activation link and a resume of the new user infos. Second one, to the user, for inform him that his request is pending. After the ``User`` and ``RegistrationProfile`` are created and the activation email is sent, the signal ``registration.signals.user_registered`` will be sent, with the new ``User`` as the keyword argument ``user`` and the class of this backend as the sender. ### Response: #vtb def register(self, request, **cleaned_data): if Site._meta.installed: site = Site.objects.get_current() else: site = RequestSite(request) create_user = RegistrationProfile.objects.create_inactive_user new_user = create_user( cleaned_data[], cleaned_data[], cleaned_data[], site, send_email=False ) new_user.first_name = cleaned_data[] new_user.last_name = cleaned_data[] new_user.save() user_info = UserInfo( user=new_user, company=cleaned_data[], function=cleaned_data[], address=cleaned_data[], postal_code=cleaned_data[], city=cleaned_data[], country=cleaned_data[], phone=cleaned_data[], ) user_info.save() send_activation_email(new_user, site, user_info) send_activation_pending_email(new_user, site, user_info) signals.user_registered.send(sender=self.__class__, user=new_user, request=request) return new_user
#vtb def _join_factory(cls, gap, pad): if issubclass(cls, dict): def _join(data): out = cls() data = list(data) while data: tsd = data.pop(0) out.append(tsd, gap=gap, pad=pad) del tsd return out else: from .. import TimeSeriesBaseList def _join(arrays): list_ = TimeSeriesBaseList(*arrays) return list_.join(pad=pad, gap=gap) return _join
Build a joiner for the given cls, and the given padding options
### Input: Build a joiner for the given cls, and the given padding options ### Response: #vtb def _join_factory(cls, gap, pad): if issubclass(cls, dict): def _join(data): out = cls() data = list(data) while data: tsd = data.pop(0) out.append(tsd, gap=gap, pad=pad) del tsd return out else: from .. import TimeSeriesBaseList def _join(arrays): list_ = TimeSeriesBaseList(*arrays) return list_.join(pad=pad, gap=gap) return _join
#vtb def chi2(T1, T2): rs2 = T2.sum(axis=1) rs1 = T1.sum(axis=1) rs2nz = rs2 > 0 rs1nz = rs1 > 0 dof1 = sum(rs1nz) dof2 = sum(rs2nz) rs2 = rs2 + (rs2 == 0) dof = (dof1 - 1) * (dof2 - 1) p = np.diag(1 / rs2) * np.matrix(T2) E = np.diag(rs1) * np.matrix(p) num = T1 - E num = np.multiply(num, num) E = E + (E == 0) chi2 = num / E chi2 = chi2.sum() pvalue = 1 - stats.chi2.cdf(chi2, dof) return chi2, pvalue, dof
chi-squared test of difference between two transition matrices. Parameters ---------- T1 : array (k, k), matrix of transitions (counts). T2 : array (k, k), matrix of transitions (counts) to use to form the probabilities under the null. Returns ------- : tuple (3 elements). (chi2 value, pvalue, degrees of freedom). Examples -------- >>> import libpysal >>> from giddy.markov import Spatial_Markov, chi2 >>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv")) >>> years = list(range(1929, 2010)) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read() >>> w.transform='r' >>> sm = Spatial_Markov(rpci, w, fixed=True) >>> T1 = sm.T[0] >>> T1 array([[562., 22., 1., 0.], [ 12., 201., 22., 0.], [ 0., 17., 97., 4.], [ 0., 0., 3., 19.]]) >>> T2 = sm.transitions >>> T2 array([[884., 77., 4., 0.], [ 68., 794., 87., 3.], [ 1., 92., 815., 51.], [ 1., 0., 60., 903.]]) >>> chi2(T1,T2) (23.39728441473295, 0.005363116704861337, 9) Notes ----- Second matrix is used to form the probabilities under the null. Marginal sums from first matrix are distributed across these probabilities under the null. In other words the observed transitions are taken from T1 while the expected transitions are formed as follows .. math:: E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j} Degrees of freedom corrected for any rows in either T1 or T2 that have zero total transitions.
### Input: chi-squared test of difference between two transition matrices. Parameters ---------- T1 : array (k, k), matrix of transitions (counts). T2 : array (k, k), matrix of transitions (counts) to use to form the probabilities under the null. Returns ------- : tuple (3 elements). (chi2 value, pvalue, degrees of freedom). Examples -------- >>> import libpysal >>> from giddy.markov import Spatial_Markov, chi2 >>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv")) >>> years = list(range(1929, 2010)) >>> pci = np.array([f.by_col[str(y)] for y in years]).transpose() >>> rpci = pci/(pci.mean(axis=0)) >>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read() >>> w.transform='r' >>> sm = Spatial_Markov(rpci, w, fixed=True) >>> T1 = sm.T[0] >>> T1 array([[562., 22., 1., 0.], [ 12., 201., 22., 0.], [ 0., 17., 97., 4.], [ 0., 0., 3., 19.]]) >>> T2 = sm.transitions >>> T2 array([[884., 77., 4., 0.], [ 68., 794., 87., 3.], [ 1., 92., 815., 51.], [ 1., 0., 60., 903.]]) >>> chi2(T1,T2) (23.39728441473295, 0.005363116704861337, 9) Notes ----- Second matrix is used to form the probabilities under the null. Marginal sums from first matrix are distributed across these probabilities under the null. In other words the observed transitions are taken from T1 while the expected transitions are formed as follows .. math:: E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j} Degrees of freedom corrected for any rows in either T1 or T2 that have zero total transitions. ### Response: #vtb def chi2(T1, T2): rs2 = T2.sum(axis=1) rs1 = T1.sum(axis=1) rs2nz = rs2 > 0 rs1nz = rs1 > 0 dof1 = sum(rs1nz) dof2 = sum(rs2nz) rs2 = rs2 + (rs2 == 0) dof = (dof1 - 1) * (dof2 - 1) p = np.diag(1 / rs2) * np.matrix(T2) E = np.diag(rs1) * np.matrix(p) num = T1 - E num = np.multiply(num, num) E = E + (E == 0) chi2 = num / E chi2 = chi2.sum() pvalue = 1 - stats.chi2.cdf(chi2, dof) return chi2, pvalue, dof