text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def astuple(self, encoding=None): """ Return a tuple suitable for import into a database. Attributes field and extra field jsonified into strings. The order of fields is such that they can be supplied as arguments for the query defined in :attr:`gffutils.constants._INSERT`. If `encoding` is not None, then convert string fields to unicode using the provided encoding. Returns ------- Tuple """ if not encoding: return ( self.id, self.seqid, self.source, self.featuretype, self.start, self.end, self.score, self.strand, self.frame, helpers._jsonify(self.attributes), helpers._jsonify(self.extra), self.calc_bin() ) return ( self.id.decode(encoding), self.seqid.decode(encoding), self.source.decode(encoding), self.featuretype.decode(encoding), self.start, self.end, self.score.decode(encoding), self.strand.decode(encoding), self.frame.decode(encoding), helpers._jsonify(self.attributes).decode(encoding), helpers._jsonify(self.extra).decode(encoding), self.calc_bin() )
[ "def", "astuple", "(", "self", ",", "encoding", "=", "None", ")", ":", "if", "not", "encoding", ":", "return", "(", "self", ".", "id", ",", "self", ".", "seqid", ",", "self", ".", "source", ",", "self", ".", "featuretype", ",", "self", ".", "start", ",", "self", ".", "end", ",", "self", ".", "score", ",", "self", ".", "strand", ",", "self", ".", "frame", ",", "helpers", ".", "_jsonify", "(", "self", ".", "attributes", ")", ",", "helpers", ".", "_jsonify", "(", "self", ".", "extra", ")", ",", "self", ".", "calc_bin", "(", ")", ")", "return", "(", "self", ".", "id", ".", "decode", "(", "encoding", ")", ",", "self", ".", "seqid", ".", "decode", "(", "encoding", ")", ",", "self", ".", "source", ".", "decode", "(", "encoding", ")", ",", "self", ".", "featuretype", ".", "decode", "(", "encoding", ")", ",", "self", ".", "start", ",", "self", ".", "end", ",", "self", ".", "score", ".", "decode", "(", "encoding", ")", ",", "self", ".", "strand", ".", "decode", "(", "encoding", ")", ",", "self", ".", "frame", ".", "decode", "(", "encoding", ")", ",", "helpers", ".", "_jsonify", "(", "self", ".", "attributes", ")", ".", "decode", "(", "encoding", ")", ",", "helpers", ".", "_jsonify", "(", "self", ".", "extra", ")", ".", "decode", "(", "encoding", ")", ",", "self", ".", "calc_bin", "(", ")", ")" ]
40.533333
0.001606
def step1_get_authorize_url(self, redirect_uri=None, state=None): """Returns a URI to redirect to the provider. Args: redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for a non-web-based application, or a URI that handles the callback from the authorization server. This parameter is deprecated, please move to passing the redirect_uri in via the constructor. state: string, Opaque state string which is passed through the OAuth2 flow and returned to the client as a query parameter in the callback. Returns: A URI as a string to redirect the user to begin the authorization flow. """ if redirect_uri is not None: logger.warning(( 'The redirect_uri parameter for ' 'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. ' 'Please move to passing the redirect_uri in via the ' 'constructor.')) self.redirect_uri = redirect_uri if self.redirect_uri is None: raise ValueError('The value of redirect_uri must not be None.') query_params = { 'client_id': self.client_id, 'redirect_uri': self.redirect_uri, 'scope': self.scope, } if state is not None: query_params['state'] = state if self.login_hint is not None: query_params['login_hint'] = self.login_hint if self._pkce: if not self.code_verifier: self.code_verifier = _pkce.code_verifier() challenge = _pkce.code_challenge(self.code_verifier) query_params['code_challenge'] = challenge query_params['code_challenge_method'] = 'S256' query_params.update(self.params) return _helpers.update_query_params(self.auth_uri, query_params)
[ "def", "step1_get_authorize_url", "(", "self", ",", "redirect_uri", "=", "None", ",", "state", "=", "None", ")", ":", "if", "redirect_uri", "is", "not", "None", ":", "logger", ".", "warning", "(", "(", "'The redirect_uri parameter for '", "'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. '", "'Please move to passing the redirect_uri in via the '", "'constructor.'", ")", ")", "self", ".", "redirect_uri", "=", "redirect_uri", "if", "self", ".", "redirect_uri", "is", "None", ":", "raise", "ValueError", "(", "'The value of redirect_uri must not be None.'", ")", "query_params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'redirect_uri'", ":", "self", ".", "redirect_uri", ",", "'scope'", ":", "self", ".", "scope", ",", "}", "if", "state", "is", "not", "None", ":", "query_params", "[", "'state'", "]", "=", "state", "if", "self", ".", "login_hint", "is", "not", "None", ":", "query_params", "[", "'login_hint'", "]", "=", "self", ".", "login_hint", "if", "self", ".", "_pkce", ":", "if", "not", "self", ".", "code_verifier", ":", "self", ".", "code_verifier", "=", "_pkce", ".", "code_verifier", "(", ")", "challenge", "=", "_pkce", ".", "code_challenge", "(", "self", ".", "code_verifier", ")", "query_params", "[", "'code_challenge'", "]", "=", "challenge", "query_params", "[", "'code_challenge_method'", "]", "=", "'S256'", "query_params", ".", "update", "(", "self", ".", "params", ")", "return", "_helpers", ".", "update_query_params", "(", "self", ".", "auth_uri", ",", "query_params", ")" ]
43.043478
0.000988
def filter(self, *args, **kwargs): """ See :py:meth:`nornir.core.inventory.Inventory.filter` Returns: :obj:`Nornir`: A new object with same configuration as ``self`` but filtered inventory. """ b = Nornir(**self.__dict__) b.inventory = self.inventory.filter(*args, **kwargs) return b
[ "def", "filter", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b", "=", "Nornir", "(", "*", "*", "self", ".", "__dict__", ")", "b", ".", "inventory", "=", "self", ".", "inventory", ".", "filter", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "b" ]
34.3
0.008523
def parseWord(word): """ Split given attribute word to key, value pair. Values are casted to python equivalents. :param word: API word. :returns: Key, value pair. """ mapping = {'yes': True, 'true': True, 'no': False, 'false': False} _, key, value = word.split('=', 2) try: value = int(value) except ValueError: value = mapping.get(value, value) return (key, value)
[ "def", "parseWord", "(", "word", ")", ":", "mapping", "=", "{", "'yes'", ":", "True", ",", "'true'", ":", "True", ",", "'no'", ":", "False", ",", "'false'", ":", "False", "}", "_", ",", "key", ",", "value", "=", "word", ".", "split", "(", "'='", ",", "2", ")", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "value", "=", "mapping", ".", "get", "(", "value", ",", "value", ")", "return", "(", "key", ",", "value", ")" ]
25.75
0.002342
def trigger_on_off(request, trigger_id): """ enable/disable the status of the trigger then go back home :param request: request object :param trigger_id: the trigger ID to switch the status to True or False :type request: HttpRequest object :type trigger_id: int :return render :rtype HttpResponse """ now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') trigger = get_object_or_404(TriggerService, pk=trigger_id) if trigger.status: title = 'disabled' title_trigger = _('Set this trigger on') btn = 'success' trigger.status = False else: title = _('Edit your service') title_trigger = _('Set this trigger off') btn = 'primary' trigger.status = True # set the trigger to the current date when the # the trigger is back online trigger.date_triggered = now trigger.save() return render(request, 'triggers/trigger_line.html', {'trigger': trigger, 'title': title, 'title_trigger': title_trigger, 'btn': btn, 'fire': settings.DJANGO_TH.get('fire', False) } )
[ "def", "trigger_on_off", "(", "request", ",", "trigger_id", ")", ":", "now", "=", "arrow", ".", "utcnow", "(", ")", ".", "to", "(", "settings", ".", "TIME_ZONE", ")", ".", "format", "(", "'YYYY-MM-DD HH:mm:ssZZ'", ")", "trigger", "=", "get_object_or_404", "(", "TriggerService", ",", "pk", "=", "trigger_id", ")", "if", "trigger", ".", "status", ":", "title", "=", "'disabled'", "title_trigger", "=", "_", "(", "'Set this trigger on'", ")", "btn", "=", "'success'", "trigger", ".", "status", "=", "False", "else", ":", "title", "=", "_", "(", "'Edit your service'", ")", "title_trigger", "=", "_", "(", "'Set this trigger off'", ")", "btn", "=", "'primary'", "trigger", ".", "status", "=", "True", "# set the trigger to the current date when the", "# the trigger is back online", "trigger", ".", "date_triggered", "=", "now", "trigger", ".", "save", "(", ")", "return", "render", "(", "request", ",", "'triggers/trigger_line.html'", ",", "{", "'trigger'", ":", "trigger", ",", "'title'", ":", "title", ",", "'title_trigger'", ":", "title_trigger", ",", "'btn'", ":", "btn", ",", "'fire'", ":", "settings", ".", "DJANGO_TH", ".", "get", "(", "'fire'", ",", "False", ")", "}", ")" ]
37.228571
0.000748
def lines_diff(before_lines, after_lines, check_modified=False): '''Diff the lines in two strings. Parameters ---------- before_lines : iterable Iterable containing lines used as the baseline version. after_lines : iterable Iterable containing lines to be compared against the baseline. Returns ------- diff_result : A list of dictionaries containing diff information. ''' before_comps = [ LineComparator(line, check_modified=check_modified) for line in before_lines ] after_comps = [ LineComparator(line, check_modified=check_modified) for line in after_lines ] diff_result = diff( before_comps, after_comps, check_modified=check_modified ) return diff_result
[ "def", "lines_diff", "(", "before_lines", ",", "after_lines", ",", "check_modified", "=", "False", ")", ":", "before_comps", "=", "[", "LineComparator", "(", "line", ",", "check_modified", "=", "check_modified", ")", "for", "line", "in", "before_lines", "]", "after_comps", "=", "[", "LineComparator", "(", "line", ",", "check_modified", "=", "check_modified", ")", "for", "line", "in", "after_lines", "]", "diff_result", "=", "diff", "(", "before_comps", ",", "after_comps", ",", "check_modified", "=", "check_modified", ")", "return", "diff_result" ]
27.535714
0.001253
def get_residue_mapping(self): """this function maps the chain and res ids "A 234" to values from [1-N]""" resid_list = self.aa_resids() # resid_set = set(resid_list) # resid_lst1 = list(resid_set) # resid_lst1.sort() map_res_id = {} x = 1 for resid in resid_list: # map_res_id[ int(resid[1:].strip()) ] = x map_res_id[ resid ] = x x+=1 return map_res_id
[ "def", "get_residue_mapping", "(", "self", ")", ":", "resid_list", "=", "self", ".", "aa_resids", "(", ")", "# resid_set = set(resid_list)", "# resid_lst1 = list(resid_set)", "# resid_lst1.sort()", "map_res_id", "=", "{", "}", "x", "=", "1", "for", "resid", "in", "resid_list", ":", "# map_res_id[ int(resid[1:].strip()) ] = x", "map_res_id", "[", "resid", "]", "=", "x", "x", "+=", "1", "return", "map_res_id" ]
30.066667
0.012903
def get_title(brain_or_object): """Get the Title for this object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Title :rtype: string """ if is_brain(brain_or_object) and base_hasattr(brain_or_object, "Title"): return brain_or_object.Title return get_object(brain_or_object).Title()
[ "def", "get_title", "(", "brain_or_object", ")", ":", "if", "is_brain", "(", "brain_or_object", ")", "and", "base_hasattr", "(", "brain_or_object", ",", "\"Title\"", ")", ":", "return", "brain_or_object", ".", "Title", "return", "get_object", "(", "brain_or_object", ")", ".", "Title", "(", ")" ]
37.363636
0.002375
def _chk_truncate(self): ''' Checks whether the frame should be truncated. If so, slices the frame up. ''' # Column of which first element is used to determine width of a dot col self.tr_size_col = -1 # Cut the data to the information actually printed max_cols = self.max_cols max_rows = self.max_rows if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0) (w, h) = get_terminal_size() self.w = w self.h = h if self.max_rows == 0: dot_row = 1 prompt_row = 1 if self.show_dimensions: show_dimension_rows = 3 n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row max_rows_adj = self.h - n_add_rows # rows available to fill with actual data self.max_rows_adj = max_rows_adj # Format only rows and columns that could potentially fit the screen if max_cols == 0 and len(self.frame.columns) > w: max_cols = w if max_rows == 0 and len(self.frame) > h: max_rows = h if not hasattr(self, 'max_rows_adj'): self.max_rows_adj = max_rows if not hasattr(self, 'max_cols_adj'): self.max_cols_adj = max_cols max_cols_adj = self.max_cols_adj max_rows_adj = self.max_rows_adj truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj) truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj) frame = self.frame if truncate_h: if max_cols_adj == 0: col_num = len(frame.columns) elif max_cols_adj == 1: frame = frame[:, :max_cols] col_num = max_cols else: col_num = (max_cols_adj // 2) frame = frame[:, :col_num].concat(frame[:, -col_num:], axis=1) self.tr_col_num = col_num if truncate_v: if max_rows_adj == 0: row_num = len(frame) if max_rows_adj == 1: row_num = max_rows frame = frame[:max_rows, :] else: row_num = max_rows_adj // 2 frame = frame[:row_num, :].concat(frame[-row_num:, :]) self.tr_row_num = row_num self.tr_frame = frame self.truncate_h = truncate_h self.truncate_v = truncate_v self.is_truncated = self.truncate_h or self.truncate_v
[ "def", "_chk_truncate", "(", "self", ")", ":", "# Column of which first element is used to determine width of a dot col", "self", ".", "tr_size_col", "=", "-", "1", "# Cut the data to the information actually printed", "max_cols", "=", "self", ".", "max_cols", "max_rows", "=", "self", ".", "max_rows", "if", "max_cols", "==", "0", "or", "max_rows", "==", "0", ":", "# assume we are in the terminal (why else = 0)", "(", "w", ",", "h", ")", "=", "get_terminal_size", "(", ")", "self", ".", "w", "=", "w", "self", ".", "h", "=", "h", "if", "self", ".", "max_rows", "==", "0", ":", "dot_row", "=", "1", "prompt_row", "=", "1", "if", "self", ".", "show_dimensions", ":", "show_dimension_rows", "=", "3", "n_add_rows", "=", "self", ".", "header", "+", "dot_row", "+", "show_dimension_rows", "+", "prompt_row", "max_rows_adj", "=", "self", ".", "h", "-", "n_add_rows", "# rows available to fill with actual data", "self", ".", "max_rows_adj", "=", "max_rows_adj", "# Format only rows and columns that could potentially fit the screen", "if", "max_cols", "==", "0", "and", "len", "(", "self", ".", "frame", ".", "columns", ")", ">", "w", ":", "max_cols", "=", "w", "if", "max_rows", "==", "0", "and", "len", "(", "self", ".", "frame", ")", ">", "h", ":", "max_rows", "=", "h", "if", "not", "hasattr", "(", "self", ",", "'max_rows_adj'", ")", ":", "self", ".", "max_rows_adj", "=", "max_rows", "if", "not", "hasattr", "(", "self", ",", "'max_cols_adj'", ")", ":", "self", ".", "max_cols_adj", "=", "max_cols", "max_cols_adj", "=", "self", ".", "max_cols_adj", "max_rows_adj", "=", "self", ".", "max_rows_adj", "truncate_h", "=", "max_cols_adj", "and", "(", "len", "(", "self", ".", "columns", ")", ">", "max_cols_adj", ")", "truncate_v", "=", "max_rows_adj", "and", "(", "len", "(", "self", ".", "frame", ")", ">", "max_rows_adj", ")", "frame", "=", "self", ".", "frame", "if", "truncate_h", ":", "if", "max_cols_adj", "==", "0", ":", "col_num", "=", "len", "(", "frame", ".", "columns", ")", "elif", "max_cols_adj", "==", "1", ":", "frame", "=", "frame", "[", ":", ",", ":", "max_cols", "]", "col_num", "=", "max_cols", "else", ":", "col_num", "=", "(", "max_cols_adj", "//", "2", ")", "frame", "=", "frame", "[", ":", ",", ":", "col_num", "]", ".", "concat", "(", "frame", "[", ":", ",", "-", "col_num", ":", "]", ",", "axis", "=", "1", ")", "self", ".", "tr_col_num", "=", "col_num", "if", "truncate_v", ":", "if", "max_rows_adj", "==", "0", ":", "row_num", "=", "len", "(", "frame", ")", "if", "max_rows_adj", "==", "1", ":", "row_num", "=", "max_rows", "frame", "=", "frame", "[", ":", "max_rows", ",", ":", "]", "else", ":", "row_num", "=", "max_rows_adj", "//", "2", "frame", "=", "frame", "[", ":", "row_num", ",", ":", "]", ".", "concat", "(", "frame", "[", "-", "row_num", ":", ",", ":", "]", ")", "self", ".", "tr_row_num", "=", "row_num", "self", ".", "tr_frame", "=", "frame", "self", ".", "truncate_h", "=", "truncate_h", "self", ".", "truncate_v", "=", "truncate_v", "self", ".", "is_truncated", "=", "self", ".", "truncate_h", "or", "self", ".", "truncate_v" ]
36.594203
0.002314
def create_statement_inspection_table(sts: List[Influence]): """ Display an HTML representation of a table with INDRA statements to manually inspect for validity. Args: sts: A list of INDRA statements to be manually inspected for validity. """ columns = [ "un_groundings", "subj_polarity", "obj_polarity", "Sentence", "Source API", ] polarity_to_str = lambda x: "+" if x == 1 else "-" if x == -1 else "None" l = [] for s in sts: subj_un_grounding = s.subj.db_refs["UN"][0][0].split("/")[-1] obj_un_grounding = s.obj.db_refs["UN"][0][0].split("/")[-1] subj_polarity = s.subj_delta["polarity"] obj_polarity = s.obj_delta["polarity"] subj_adjectives = s.subj_delta["adjectives"] for e in s.evidence: l.append( ( (subj_un_grounding, obj_un_grounding), subj_polarity, obj_polarity, e.text, e.source_api, ) ) df = pd.DataFrame(l, columns=columns) df = df.pivot_table(index=["un_groundings", "Source API", "Sentence"]) def hover(hover_color="#ffff99"): return dict( selector="tr:hover", props=[("background-color", "%s" % hover_color)], ) styles = [ hover(), dict(props=[("font-size", "100%"), ("font-family", "Gill Sans")]), ] return df.style.set_table_styles(styles)
[ "def", "create_statement_inspection_table", "(", "sts", ":", "List", "[", "Influence", "]", ")", ":", "columns", "=", "[", "\"un_groundings\"", ",", "\"subj_polarity\"", ",", "\"obj_polarity\"", ",", "\"Sentence\"", ",", "\"Source API\"", ",", "]", "polarity_to_str", "=", "lambda", "x", ":", "\"+\"", "if", "x", "==", "1", "else", "\"-\"", "if", "x", "==", "-", "1", "else", "\"None\"", "l", "=", "[", "]", "for", "s", "in", "sts", ":", "subj_un_grounding", "=", "s", ".", "subj", ".", "db_refs", "[", "\"UN\"", "]", "[", "0", "]", "[", "0", "]", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "obj_un_grounding", "=", "s", ".", "obj", ".", "db_refs", "[", "\"UN\"", "]", "[", "0", "]", "[", "0", "]", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "subj_polarity", "=", "s", ".", "subj_delta", "[", "\"polarity\"", "]", "obj_polarity", "=", "s", ".", "obj_delta", "[", "\"polarity\"", "]", "subj_adjectives", "=", "s", ".", "subj_delta", "[", "\"adjectives\"", "]", "for", "e", "in", "s", ".", "evidence", ":", "l", ".", "append", "(", "(", "(", "subj_un_grounding", ",", "obj_un_grounding", ")", ",", "subj_polarity", ",", "obj_polarity", ",", "e", ".", "text", ",", "e", ".", "source_api", ",", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "l", ",", "columns", "=", "columns", ")", "df", "=", "df", ".", "pivot_table", "(", "index", "=", "[", "\"un_groundings\"", ",", "\"Source API\"", ",", "\"Sentence\"", "]", ")", "def", "hover", "(", "hover_color", "=", "\"#ffff99\"", ")", ":", "return", "dict", "(", "selector", "=", "\"tr:hover\"", ",", "props", "=", "[", "(", "\"background-color\"", ",", "\"%s\"", "%", "hover_color", ")", "]", ",", ")", "styles", "=", "[", "hover", "(", ")", ",", "dict", "(", "props", "=", "[", "(", "\"font-size\"", ",", "\"100%\"", ")", ",", "(", "\"font-family\"", ",", "\"Gill Sans\"", ")", "]", ")", ",", "]", "return", "df", ".", "style", ".", "set_table_styles", "(", "styles", ")" ]
29.74
0.001953
def save_figures(block, block_vars, gallery_conf): """Save all open figures of the example code-block. Parameters ---------- block : tuple A tuple containing the (label, content, line_number) of the block. block_vars : dict Dict of block variables. gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- images_rst : str rst code to embed the images in the document. """ image_path_iterator = block_vars['image_path_iterator'] all_rst = u'' prev_count = len(image_path_iterator) for scraper in gallery_conf['image_scrapers']: rst = scraper(block, block_vars, gallery_conf) if not isinstance(rst, basestring): raise TypeError('rst from scraper %r was not a string, ' 'got type %s:\n%r' % (scraper, type(rst), rst)) n_new = len(image_path_iterator) - prev_count for ii in range(n_new): current_path, _ = _find_image_ext( image_path_iterator.paths[prev_count + ii]) if not os.path.isfile(current_path): raise RuntimeError('Scraper %s did not produce expected image:' '\n%s' % (scraper, current_path)) all_rst += rst return all_rst
[ "def", "save_figures", "(", "block", ",", "block_vars", ",", "gallery_conf", ")", ":", "image_path_iterator", "=", "block_vars", "[", "'image_path_iterator'", "]", "all_rst", "=", "u''", "prev_count", "=", "len", "(", "image_path_iterator", ")", "for", "scraper", "in", "gallery_conf", "[", "'image_scrapers'", "]", ":", "rst", "=", "scraper", "(", "block", ",", "block_vars", ",", "gallery_conf", ")", "if", "not", "isinstance", "(", "rst", ",", "basestring", ")", ":", "raise", "TypeError", "(", "'rst from scraper %r was not a string, '", "'got type %s:\\n%r'", "%", "(", "scraper", ",", "type", "(", "rst", ")", ",", "rst", ")", ")", "n_new", "=", "len", "(", "image_path_iterator", ")", "-", "prev_count", "for", "ii", "in", "range", "(", "n_new", ")", ":", "current_path", ",", "_", "=", "_find_image_ext", "(", "image_path_iterator", ".", "paths", "[", "prev_count", "+", "ii", "]", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "current_path", ")", ":", "raise", "RuntimeError", "(", "'Scraper %s did not produce expected image:'", "'\\n%s'", "%", "(", "scraper", ",", "current_path", ")", ")", "all_rst", "+=", "rst", "return", "all_rst" ]
37.485714
0.000743
def hdbscan(X, min_cluster_size=5, min_samples=None, alpha=1.0, metric='minkowski', p=2, leaf_size=40, algorithm='best', memory=Memory(cachedir=None, verbose=0), approx_min_span_tree=True, gen_min_span_tree=False, core_dist_n_jobs=4, cluster_selection_method='eom', allow_single_cluster=False, match_reference_implementation=False, **kwargs): """Perform HDBSCAN clustering from a vector array or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. min_cluster_size : int, optional (default=5) The minimum number of samples in a group for that group to be considered a cluster; groupings smaller than this size will be left as noise. min_samples : int, optional (default=None) The number of samples in a neighborhood for a point to be considered as a core point. This includes the point itself. defaults to the min_cluster_size. alpha : float, optional (default=1.0) A distance scaling parameter as used in robust single linkage. See [2]_ for more information. metric : string or callable, optional (default='minkowski') The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. p : int, optional (default=2) p value to use if using the minkowski metric. leaf_size : int, optional (default=40) Leaf size for trees responsible for fast nearest neighbour queries. algorithm : string, optional (default='best') Exactly which algorithm to use; hdbscan has variants specialised for different characteristics of the data. By default this is set to ``best`` which chooses the "best" algorithm given the nature of the data. You can force other options if you believe you know better. Options are: * ``best`` * ``generic`` * ``prims_kdtree`` * ``prims_balltree`` * ``boruvka_kdtree`` * ``boruvka_balltree`` memory : instance of joblib.Memory or string, optional Used to cache the output of the computation of the tree. By default, no caching is done. If a string is given, it is the path to the caching directory. approx_min_span_tree : bool, optional (default=True) Whether to accept an only approximate minimum spanning tree. For some algorithms this can provide a significant speedup, but the resulting clustering may be of marginally lower quality. If you are willing to sacrifice speed for correctness you may want to explore this; in general this should be left at the default True. gen_min_span_tree : bool, optional (default=False) Whether to generate the minimum spanning tree for later analysis. core_dist_n_jobs : int, optional (default=4) Number of parallel jobs to run in core distance computations (if supported by the specific algorithm). For ``core_dist_n_jobs`` below -1, (n_cpus + 1 + core_dist_n_jobs) are used. cluster_selection_method : string, optional (default='eom') The method used to select clusters from the condensed tree. The standard approach for HDBSCAN* is to use an Excess of Mass algorithm to find the most persistent clusters. Alternatively you can instead select the clusters at the leaves of the tree -- this provides the most fine grained and homogeneous clusters. Options are: * ``eom`` * ``leaf`` allow_single_cluster : bool, optional (default=False) By default HDBSCAN* will not produce a single cluster, setting this to t=True will override this and allow single cluster results in the case that you feel this is a valid result for your dataset. (default False) match_reference_implementation : bool, optional (default=False) There exist some interpretational differences between this HDBSCAN* implementation and the original authors reference implementation in Java. This can result in very minor differences in clustering results. Setting this flag to True will, at a some performance cost, ensure that the clustering results match the reference implementation. **kwargs : optional Arguments passed to the distance metric Returns ------- labels : ndarray, shape (n_samples, ) Cluster labels for each point. Noisy samples are given the label -1. probabilities : ndarray, shape (n_samples, ) Cluster membership strengths for each point. Noisy samples are assigned 0. cluster_persistence : array, shape (n_clusters, ) A score of how persistent each cluster is. A score of 1.0 represents a perfectly stable cluster that persists over all distance scales, while a score of 0.0 represents a perfectly ephemeral cluster. These scores can be guage the relative coherence of the clusters output by the algorithm. condensed_tree : record array The condensed cluster hierarchy used to generate clusters. single_linkage_tree : ndarray, shape (n_samples - 1, 4) The single linkage tree produced during clustering in scipy hierarchical clustering format (see http://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html). min_spanning_tree : ndarray, shape (n_samples - 1, 3) The minimum spanning as an edgelist. If gen_min_span_tree was False this will be None. References ---------- .. [1] Campello, R. J., Moulavi, D., & Sander, J. (2013, April). Density-based clustering based on hierarchical density estimates. In Pacific-Asia Conference on Knowledge Discovery and Data Mining (pp. 160-172). Springer Berlin Heidelberg. .. [2] Chaudhuri, K., & Dasgupta, S. (2010). Rates of convergence for the cluster tree. In Advances in Neural Information Processing Systems (pp. 343-351). """ if min_samples is None: min_samples = min_cluster_size if type(min_samples) is not int or type(min_cluster_size) is not int: raise ValueError('Min samples and min cluster size must be integers!') if min_samples <= 0 or min_cluster_size <= 0: raise ValueError('Min samples and Min cluster size must be positive' ' integers') if min_cluster_size == 1: raise ValueError('Min cluster size must be greater than one') if not isinstance(alpha, float) or alpha <= 0.0: raise ValueError('Alpha must be a positive float value greater than' ' 0!') if leaf_size < 1: raise ValueError('Leaf size must be greater than 0!') if metric == 'minkowski': if p is None: raise TypeError('Minkowski metric given but no p value supplied!') if p < 0: raise ValueError('Minkowski metric with negative p value is not' ' defined!') if match_reference_implementation: min_samples = min_samples - 1 min_cluster_size = min_cluster_size + 1 approx_min_span_tree = False if cluster_selection_method not in ('eom', 'leaf'): raise ValueError('Invalid Cluster Selection Method: %s\n' 'Should be one of: "eom", "leaf"\n') # Checks input and converts to an nd-array where possible if metric != 'precomputed' or issparse(X): X = check_array(X, accept_sparse='csr') else: # Only non-sparse, precomputed distance matrices are handled here # and thereby allowed to contain numpy.inf for missing distances check_precomputed_distance_matrix(X) # Python 2 and 3 compliant string_type checking if isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) size = X.shape[0] min_samples = min(size - 1, min_samples) if min_samples == 0: min_samples = 1 if algorithm != 'best': if metric != 'precomputed' and issparse(X) and metric != 'generic': raise ValueError("Sparse data matrices only support algorithm 'generic'.") if algorithm == 'generic': (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_generic)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif algorithm == 'prims_kdtree': if metric not in KDTree.valid_metrics: raise ValueError("Cannot use Prim's with KDTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_kdtree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif algorithm == 'prims_balltree': if metric not in BallTree.valid_metrics: raise ValueError("Cannot use Prim's with BallTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_balltree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif algorithm == 'boruvka_kdtree': if metric not in BallTree.valid_metrics: raise ValueError("Cannot use Boruvka with KDTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_kdtree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) elif algorithm == 'boruvka_balltree': if metric not in BallTree.valid_metrics: raise ValueError("Cannot use Boruvka with BallTree for this" " metric!") (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_balltree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) else: raise TypeError('Unknown algorithm type %s specified' % algorithm) else: if issparse(X) or metric not in FAST_METRICS: # We can't do much with sparse matrices ... (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_generic)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) elif metric in KDTree.valid_metrics: # TO DO: Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 60: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_kdtree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) else: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_kdtree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) else: # Metric is a valid BallTree metric # TO DO: Need heuristic to decide when to go to boruvka; # still debugging for now if X.shape[1] > 60: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_prims_balltree)(X, min_samples, alpha, metric, p, leaf_size, gen_min_span_tree, **kwargs) else: (single_linkage_tree, result_min_span_tree) = memory.cache( _hdbscan_boruvka_balltree)(X, min_samples, alpha, metric, p, leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs, **kwargs) return _tree_to_labels(X, single_linkage_tree, min_cluster_size, cluster_selection_method, allow_single_cluster, match_reference_implementation) + \ (result_min_span_tree,)
[ "def", "hdbscan", "(", "X", ",", "min_cluster_size", "=", "5", ",", "min_samples", "=", "None", ",", "alpha", "=", "1.0", ",", "metric", "=", "'minkowski'", ",", "p", "=", "2", ",", "leaf_size", "=", "40", ",", "algorithm", "=", "'best'", ",", "memory", "=", "Memory", "(", "cachedir", "=", "None", ",", "verbose", "=", "0", ")", ",", "approx_min_span_tree", "=", "True", ",", "gen_min_span_tree", "=", "False", ",", "core_dist_n_jobs", "=", "4", ",", "cluster_selection_method", "=", "'eom'", ",", "allow_single_cluster", "=", "False", ",", "match_reference_implementation", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "min_samples", "is", "None", ":", "min_samples", "=", "min_cluster_size", "if", "type", "(", "min_samples", ")", "is", "not", "int", "or", "type", "(", "min_cluster_size", ")", "is", "not", "int", ":", "raise", "ValueError", "(", "'Min samples and min cluster size must be integers!'", ")", "if", "min_samples", "<=", "0", "or", "min_cluster_size", "<=", "0", ":", "raise", "ValueError", "(", "'Min samples and Min cluster size must be positive'", "' integers'", ")", "if", "min_cluster_size", "==", "1", ":", "raise", "ValueError", "(", "'Min cluster size must be greater than one'", ")", "if", "not", "isinstance", "(", "alpha", ",", "float", ")", "or", "alpha", "<=", "0.0", ":", "raise", "ValueError", "(", "'Alpha must be a positive float value greater than'", "' 0!'", ")", "if", "leaf_size", "<", "1", ":", "raise", "ValueError", "(", "'Leaf size must be greater than 0!'", ")", "if", "metric", "==", "'minkowski'", ":", "if", "p", "is", "None", ":", "raise", "TypeError", "(", "'Minkowski metric given but no p value supplied!'", ")", "if", "p", "<", "0", ":", "raise", "ValueError", "(", "'Minkowski metric with negative p value is not'", "' defined!'", ")", "if", "match_reference_implementation", ":", "min_samples", "=", "min_samples", "-", "1", "min_cluster_size", "=", "min_cluster_size", "+", "1", "approx_min_span_tree", "=", "False", "if", "cluster_selection_method", "not", "in", "(", "'eom'", ",", "'leaf'", ")", ":", "raise", "ValueError", "(", "'Invalid Cluster Selection Method: %s\\n'", "'Should be one of: \"eom\", \"leaf\"\\n'", ")", "# Checks input and converts to an nd-array where possible", "if", "metric", "!=", "'precomputed'", "or", "issparse", "(", "X", ")", ":", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "'csr'", ")", "else", ":", "# Only non-sparse, precomputed distance matrices are handled here", "# and thereby allowed to contain numpy.inf for missing distances", "check_precomputed_distance_matrix", "(", "X", ")", "# Python 2 and 3 compliant string_type checking", "if", "isinstance", "(", "memory", ",", "six", ".", "string_types", ")", ":", "memory", "=", "Memory", "(", "cachedir", "=", "memory", ",", "verbose", "=", "0", ")", "size", "=", "X", ".", "shape", "[", "0", "]", "min_samples", "=", "min", "(", "size", "-", "1", ",", "min_samples", ")", "if", "min_samples", "==", "0", ":", "min_samples", "=", "1", "if", "algorithm", "!=", "'best'", ":", "if", "metric", "!=", "'precomputed'", "and", "issparse", "(", "X", ")", "and", "metric", "!=", "'generic'", ":", "raise", "ValueError", "(", "\"Sparse data matrices only support algorithm 'generic'.\"", ")", "if", "algorithm", "==", "'generic'", ":", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_generic", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "gen_min_span_tree", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'prims_kdtree'", ":", "if", "metric", "not", "in", "KDTree", ".", "valid_metrics", ":", "raise", "ValueError", "(", "\"Cannot use Prim's with KDTree for this\"", "\" metric!\"", ")", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_prims_kdtree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "gen_min_span_tree", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'prims_balltree'", ":", "if", "metric", "not", "in", "BallTree", ".", "valid_metrics", ":", "raise", "ValueError", "(", "\"Cannot use Prim's with BallTree for this\"", "\" metric!\"", ")", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_prims_balltree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "gen_min_span_tree", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'boruvka_kdtree'", ":", "if", "metric", "not", "in", "BallTree", ".", "valid_metrics", ":", "raise", "ValueError", "(", "\"Cannot use Boruvka with KDTree for this\"", "\" metric!\"", ")", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_boruvka_kdtree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "approx_min_span_tree", ",", "gen_min_span_tree", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "elif", "algorithm", "==", "'boruvka_balltree'", ":", "if", "metric", "not", "in", "BallTree", ".", "valid_metrics", ":", "raise", "ValueError", "(", "\"Cannot use Boruvka with BallTree for this\"", "\" metric!\"", ")", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_boruvka_balltree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "approx_min_span_tree", ",", "gen_min_span_tree", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "TypeError", "(", "'Unknown algorithm type %s specified'", "%", "algorithm", ")", "else", ":", "if", "issparse", "(", "X", ")", "or", "metric", "not", "in", "FAST_METRICS", ":", "# We can't do much with sparse matrices ...", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_generic", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "gen_min_span_tree", ",", "*", "*", "kwargs", ")", "elif", "metric", "in", "KDTree", ".", "valid_metrics", ":", "# TO DO: Need heuristic to decide when to go to boruvka;", "# still debugging for now", "if", "X", ".", "shape", "[", "1", "]", ">", "60", ":", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_prims_kdtree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "gen_min_span_tree", ",", "*", "*", "kwargs", ")", "else", ":", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_boruvka_kdtree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "approx_min_span_tree", ",", "gen_min_span_tree", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "else", ":", "# Metric is a valid BallTree metric", "# TO DO: Need heuristic to decide when to go to boruvka;", "# still debugging for now", "if", "X", ".", "shape", "[", "1", "]", ">", "60", ":", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_prims_balltree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "gen_min_span_tree", ",", "*", "*", "kwargs", ")", "else", ":", "(", "single_linkage_tree", ",", "result_min_span_tree", ")", "=", "memory", ".", "cache", "(", "_hdbscan_boruvka_balltree", ")", "(", "X", ",", "min_samples", ",", "alpha", ",", "metric", ",", "p", ",", "leaf_size", ",", "approx_min_span_tree", ",", "gen_min_span_tree", ",", "core_dist_n_jobs", ",", "*", "*", "kwargs", ")", "return", "_tree_to_labels", "(", "X", ",", "single_linkage_tree", ",", "min_cluster_size", ",", "cluster_selection_method", ",", "allow_single_cluster", ",", "match_reference_implementation", ")", "+", "(", "result_min_span_tree", ",", ")" ]
46.367698
0.00029
def _get_securitygroupname_id(securitygroupname_list): ''' Returns the SecurityGroupId of a SecurityGroupName to use ''' securitygroupid_set = set() if not isinstance(securitygroupname_list, list): securitygroupname_list = [securitygroupname_list] params = {'Action': 'DescribeSecurityGroups'} for sg in aws.query(params, location=get_location(), provider=get_provider(), opts=__opts__, sigver='4'): if sg['groupName'] in securitygroupname_list: log.debug( 'AWS SecurityGroup ID of %s is %s', sg['groupName'], sg['groupId'] ) securitygroupid_set.add(sg['groupId']) return list(securitygroupid_set)
[ "def", "_get_securitygroupname_id", "(", "securitygroupname_list", ")", ":", "securitygroupid_set", "=", "set", "(", ")", "if", "not", "isinstance", "(", "securitygroupname_list", ",", "list", ")", ":", "securitygroupname_list", "=", "[", "securitygroupname_list", "]", "params", "=", "{", "'Action'", ":", "'DescribeSecurityGroups'", "}", "for", "sg", "in", "aws", ".", "query", "(", "params", ",", "location", "=", "get_location", "(", ")", ",", "provider", "=", "get_provider", "(", ")", ",", "opts", "=", "__opts__", ",", "sigver", "=", "'4'", ")", ":", "if", "sg", "[", "'groupName'", "]", "in", "securitygroupname_list", ":", "log", ".", "debug", "(", "'AWS SecurityGroup ID of %s is %s'", ",", "sg", "[", "'groupName'", "]", ",", "sg", "[", "'groupId'", "]", ")", "securitygroupid_set", ".", "add", "(", "sg", "[", "'groupId'", "]", ")", "return", "list", "(", "securitygroupid_set", ")" ]
42.411765
0.001357
def search_tags_as_filters(tags): """Get different tags as dicts ready to use as dropdown lists.""" # set dicts actions = {} contacts = {} formats = {} inspire = {} keywords = {} licenses = {} md_types = dict() owners = defaultdict(str) srs = {} unused = {} # 0/1 values compliance = 0 type_dataset = 0 # parsing tags print(len(tags.keys())) i = 0 for tag in sorted(tags.keys()): i += 1 # actions if tag.startswith("action"): actions[tags.get(tag, tag)] = tag continue # compliance INSPIRE elif tag.startswith("conformity"): compliance = 1 continue # contacts elif tag.startswith("contact"): contacts[tags.get(tag)] = tag continue # formats elif tag.startswith("format"): formats[tags.get(tag)] = tag continue # INSPIRE themes elif tag.startswith("keyword:inspire"): inspire[tags.get(tag)] = tag continue # keywords elif tag.startswith("keyword:isogeo"): keywords[tags.get(tag)] = tag continue # licenses elif tag.startswith("license"): licenses[tags.get(tag)] = tag continue # owners elif tag.startswith("owner"): owners[tags.get(tag)] = tag continue # SRS elif tag.startswith("coordinate-system"): srs[tags.get(tag)] = tag continue # types elif tag.startswith("type"): md_types[tags.get(tag)] = tag if tag in ("type:vector-dataset", "type:raster-dataset"): type_dataset += 1 else: pass continue # ignored tags else: unused[tags.get(tag)] = tag continue # override API tags to allow all datasets filter - see # if type_dataset == 2: md_types["Donnée"] = "type:dataset" else: pass # printing # print("There are:" # "\n{} actions" # "\n{} contacts" # "\n{} formats" # "\n{} INSPIRE themes" # "\n{} keywords" # "\n{} licenses" # "\n{} owners" # "\n{} SRS" # "\n{} types" # "\n{} unused".format(len(actions), # len(contacts), # len(formats), # len(inspire), # len(keywords), # len(licenses), # len(owners), # len(srs), # len(md_types), # len(unused) # )) # storing dicts tags_parsed = { "actions": actions, "compliance": compliance, "contacts": contacts, "formats": formats, "inspire": inspire, "keywords": keywords, "licenses": licenses, "owners": owners, "srs": srs, "types": md_types, "unused": unused, } # method ending return tags_parsed
[ "def", "search_tags_as_filters", "(", "tags", ")", ":", "# set dicts", "actions", "=", "{", "}", "contacts", "=", "{", "}", "formats", "=", "{", "}", "inspire", "=", "{", "}", "keywords", "=", "{", "}", "licenses", "=", "{", "}", "md_types", "=", "dict", "(", ")", "owners", "=", "defaultdict", "(", "str", ")", "srs", "=", "{", "}", "unused", "=", "{", "}", "# 0/1 values", "compliance", "=", "0", "type_dataset", "=", "0", "# parsing tags", "print", "(", "len", "(", "tags", ".", "keys", "(", ")", ")", ")", "i", "=", "0", "for", "tag", "in", "sorted", "(", "tags", ".", "keys", "(", ")", ")", ":", "i", "+=", "1", "# actions", "if", "tag", ".", "startswith", "(", "\"action\"", ")", ":", "actions", "[", "tags", ".", "get", "(", "tag", ",", "tag", ")", "]", "=", "tag", "continue", "# compliance INSPIRE", "elif", "tag", ".", "startswith", "(", "\"conformity\"", ")", ":", "compliance", "=", "1", "continue", "# contacts", "elif", "tag", ".", "startswith", "(", "\"contact\"", ")", ":", "contacts", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# formats", "elif", "tag", ".", "startswith", "(", "\"format\"", ")", ":", "formats", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# INSPIRE themes", "elif", "tag", ".", "startswith", "(", "\"keyword:inspire\"", ")", ":", "inspire", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# keywords", "elif", "tag", ".", "startswith", "(", "\"keyword:isogeo\"", ")", ":", "keywords", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# licenses", "elif", "tag", ".", "startswith", "(", "\"license\"", ")", ":", "licenses", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# owners", "elif", "tag", ".", "startswith", "(", "\"owner\"", ")", ":", "owners", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# SRS", "elif", "tag", ".", "startswith", "(", "\"coordinate-system\"", ")", ":", "srs", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# types", "elif", "tag", ".", "startswith", "(", "\"type\"", ")", ":", "md_types", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "if", "tag", "in", "(", "\"type:vector-dataset\"", ",", "\"type:raster-dataset\"", ")", ":", "type_dataset", "+=", "1", "else", ":", "pass", "continue", "# ignored tags", "else", ":", "unused", "[", "tags", ".", "get", "(", "tag", ")", "]", "=", "tag", "continue", "# override API tags to allow all datasets filter - see #", "if", "type_dataset", "==", "2", ":", "md_types", "[", "\"Donnée\"]", " ", " ", "type:dataset\"", "else", ":", "pass", "# printing", "# print(\"There are:\"", "# \"\\n{} actions\"", "# \"\\n{} contacts\"", "# \"\\n{} formats\"", "# \"\\n{} INSPIRE themes\"", "# \"\\n{} keywords\"", "# \"\\n{} licenses\"", "# \"\\n{} owners\"", "# \"\\n{} SRS\"", "# \"\\n{} types\"", "# \"\\n{} unused\".format(len(actions),", "# len(contacts),", "# len(formats),", "# len(inspire),", "# len(keywords),", "# len(licenses),", "# len(owners),", "# len(srs),", "# len(md_types),", "# len(unused)", "# ))", "# storing dicts", "tags_parsed", "=", "{", "\"actions\"", ":", "actions", ",", "\"compliance\"", ":", "compliance", ",", "\"contacts\"", ":", "contacts", ",", "\"formats\"", ":", "formats", ",", "\"inspire\"", ":", "inspire", ",", "\"keywords\"", ":", "keywords", ",", "\"licenses\"", ":", "licenses", ",", "\"owners\"", ":", "owners", ",", "\"srs\"", ":", "srs", ",", "\"types\"", ":", "md_types", ",", "\"unused\"", ":", "unused", ",", "}", "# method ending", "return", "tags_parsed" ]
27.736842
0.000305
def get_easyui_context(self, **kwargs): """ 初始化一个空的context """ context = {} queryset = self.get_queryset() limit_queryset = self.get_limit_queryset() data = model_serialize(limit_queryset, self.extra_fields, self.remove_fields) count = queryset.count() # datagrid 返回的数据中,total是总的行数,rows是查询到的结果集 context.update(rows=data) context.update(total=count) return context
[ "def", "get_easyui_context", "(", "self", ",", "*", "*", "kwargs", ")", ":", "context", "=", "{", "}", "queryset", "=", "self", ".", "get_queryset", "(", ")", "limit_queryset", "=", "self", ".", "get_limit_queryset", "(", ")", "data", "=", "model_serialize", "(", "limit_queryset", ",", "self", ".", "extra_fields", ",", "self", ".", "remove_fields", ")", "count", "=", "queryset", ".", "count", "(", ")", "# datagrid 返回的数据中,total是总的行数,rows是查询到的结果集", "context", ".", "update", "(", "rows", "=", "data", ")", "context", ".", "update", "(", "total", "=", "count", ")", "return", "context" ]
34.461538
0.008696
def older_message(m, lastm): '''return true if m is older than lastm by timestamp''' atts = {'time_boot_ms' : 1.0e-3, 'time_unix_usec' : 1.0e-6, 'time_usec' : 1.0e-6} for a in atts.keys(): if hasattr(m, a): mul = atts[a] t1 = m.getattr(a) * mul t2 = lastm.getattr(a) * mul if t2 >= t1 and t2 - t1 < 60: return True return False
[ "def", "older_message", "(", "m", ",", "lastm", ")", ":", "atts", "=", "{", "'time_boot_ms'", ":", "1.0e-3", ",", "'time_unix_usec'", ":", "1.0e-6", ",", "'time_usec'", ":", "1.0e-6", "}", "for", "a", "in", "atts", ".", "keys", "(", ")", ":", "if", "hasattr", "(", "m", ",", "a", ")", ":", "mul", "=", "atts", "[", "a", "]", "t1", "=", "m", ".", "getattr", "(", "a", ")", "*", "mul", "t2", "=", "lastm", ".", "getattr", "(", "a", ")", "*", "mul", "if", "t2", ">=", "t1", "and", "t2", "-", "t1", "<", "60", ":", "return", "True", "return", "False" ]
32.846154
0.009112
def generate(self): '''Generate the whole static site. Iterates through all existing s2 pages, rendering and writing them (and copying all common files along). It also generates the toc, a sitemap, and the atom feed etc. (in the future it should handle tags and categories) ''' if self._dirs['base'] == None or not self._tree_ready: #there's NO base here or up the chain raise ValueError #cannot generate! # wipe www dir & recreate self._wipe_www_dir()#copy common files #shutil.copytree(self.dirs['common'], # os.path.join(self.dirs['www'],"common")) slist = glob.glob(os.path.join(self.dirs['common'],"*")) for fo in slist: rfn = os.path.split(fo)[1] if os.path.isdir(fo): shutil.copytree(fo, os.path.join(self.dirs['www'], rfn)) else: shutil.copy(fo, self.dirs['www']) # init atom file title = self.site_config['site_title'] if title == '': title = "<No title>" feed = AtomFeed(title=title, subtitle=self.site_config['site_subtitle'], feed_url= os.path.join( self.site_config['site_url'],"atom.xml"), url=self.site_config['site_url'], author=self.site_config['default_author']) themes_to_copy = [] # full paths! generated_page_info = [] for slug in self._pages_to_generate(): #this list of pages is in reverse chrono order p = s2page.Page(self, slug, isslug=True) generated_page_info.append( {'slug': p.slug, 'title':p.title, 'date': p.creation_date, 'in_toc': p.in_toc }) t = p.theme_path if not t in themes_to_copy: themes_to_copy.append(t) # wipe destination. self._wipe_www_page(slug) pg_content = p.generate() #generate page # add atom entry try: cdd = datetime.strptime(p.creation_date, '%Y-%m-%d') # feed.add needs the dat in datetime format except: print "Wrong date format in page '%s'. It should be YYYY-MM-DD."%p.slug print "Site Generation stopped!! correct the date and generate again." self._wipe_www_dir() sys.exit() feed.add(title= p.title, content=pg_content, content_type="html", author=p.author, url=os.path.join( self.site_config['site_url'],"atom.xml") , updated=cdd) # copy themes wthemesdir = os.path.join(self.dirs['www'],"themes") os.mkdir(wthemesdir) for d in themes_to_copy: dname = os.path.split(d)[1] destpath = os.path.join(wthemesdir, dname) shutil.copytree(d, destpath) # delete tpl files ttr = glob.glob(os.path.join(destpath,"*tpl")) for f in ttr: os.remove(f) # write atom file atomfile= codecs.open(os.path.join(self.dirs['www'],"atom.xml"), "w", encoding="utf-8", errors="xmlcharrefreplace") atomfile.write(feed.to_string()) atomfile.close() # create front page/s #print "generated_page_info for gf ",generated_page_info ff = self.site_config['fixed_frontpage'] if ff != None and ff != '': self._set_fixed_frontpage(ff) else: self.generate_front(generated_page_info) self._generate_site_map(generated_page_info)
[ "def", "generate", "(", "self", ")", ":", "if", "self", ".", "_dirs", "[", "'base'", "]", "==", "None", "or", "not", "self", ".", "_tree_ready", ":", "#there's NO base here or up the chain", "raise", "ValueError", "#cannot generate!", "# wipe www dir & recreate", "self", ".", "_wipe_www_dir", "(", ")", "#copy common files", "#shutil.copytree(self.dirs['common'],", "# os.path.join(self.dirs['www'],\"common\"))", "slist", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dirs", "[", "'common'", "]", ",", "\"*\"", ")", ")", "for", "fo", "in", "slist", ":", "rfn", "=", "os", ".", "path", ".", "split", "(", "fo", ")", "[", "1", "]", "if", "os", ".", "path", ".", "isdir", "(", "fo", ")", ":", "shutil", ".", "copytree", "(", "fo", ",", "os", ".", "path", ".", "join", "(", "self", ".", "dirs", "[", "'www'", "]", ",", "rfn", ")", ")", "else", ":", "shutil", ".", "copy", "(", "fo", ",", "self", ".", "dirs", "[", "'www'", "]", ")", "# init atom file", "title", "=", "self", ".", "site_config", "[", "'site_title'", "]", "if", "title", "==", "''", ":", "title", "=", "\"<No title>\"", "feed", "=", "AtomFeed", "(", "title", "=", "title", ",", "subtitle", "=", "self", ".", "site_config", "[", "'site_subtitle'", "]", ",", "feed_url", "=", "os", ".", "path", ".", "join", "(", "self", ".", "site_config", "[", "'site_url'", "]", ",", "\"atom.xml\"", ")", ",", "url", "=", "self", ".", "site_config", "[", "'site_url'", "]", ",", "author", "=", "self", ".", "site_config", "[", "'default_author'", "]", ")", "themes_to_copy", "=", "[", "]", "# full paths!", "generated_page_info", "=", "[", "]", "for", "slug", "in", "self", ".", "_pages_to_generate", "(", ")", ":", "#this list of pages is in reverse chrono order", "p", "=", "s2page", ".", "Page", "(", "self", ",", "slug", ",", "isslug", "=", "True", ")", "generated_page_info", ".", "append", "(", "{", "'slug'", ":", "p", ".", "slug", ",", "'title'", ":", "p", ".", "title", ",", "'date'", ":", "p", ".", "creation_date", ",", "'in_toc'", ":", "p", ".", "in_toc", "}", ")", "t", "=", "p", ".", "theme_path", "if", "not", "t", "in", "themes_to_copy", ":", "themes_to_copy", ".", "append", "(", "t", ")", "# wipe destination.", "self", ".", "_wipe_www_page", "(", "slug", ")", "pg_content", "=", "p", ".", "generate", "(", ")", "#generate page", "# add atom entry", "try", ":", "cdd", "=", "datetime", ".", "strptime", "(", "p", ".", "creation_date", ",", "'%Y-%m-%d'", ")", "# feed.add needs the dat in datetime format", "except", ":", "print", "\"Wrong date format in page '%s'. It should be YYYY-MM-DD.\"", "%", "p", ".", "slug", "print", "\"Site Generation stopped!! correct the date and generate again.\"", "self", ".", "_wipe_www_dir", "(", ")", "sys", ".", "exit", "(", ")", "feed", ".", "add", "(", "title", "=", "p", ".", "title", ",", "content", "=", "pg_content", ",", "content_type", "=", "\"html\"", ",", "author", "=", "p", ".", "author", ",", "url", "=", "os", ".", "path", ".", "join", "(", "self", ".", "site_config", "[", "'site_url'", "]", ",", "\"atom.xml\"", ")", ",", "updated", "=", "cdd", ")", "# copy themes", "wthemesdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dirs", "[", "'www'", "]", ",", "\"themes\"", ")", "os", ".", "mkdir", "(", "wthemesdir", ")", "for", "d", "in", "themes_to_copy", ":", "dname", "=", "os", ".", "path", ".", "split", "(", "d", ")", "[", "1", "]", "destpath", "=", "os", ".", "path", ".", "join", "(", "wthemesdir", ",", "dname", ")", "shutil", ".", "copytree", "(", "d", ",", "destpath", ")", "# delete tpl files", "ttr", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "destpath", ",", "\"*tpl\"", ")", ")", "for", "f", "in", "ttr", ":", "os", ".", "remove", "(", "f", ")", "# write atom file", "atomfile", "=", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "dirs", "[", "'www'", "]", ",", "\"atom.xml\"", ")", ",", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ",", "errors", "=", "\"xmlcharrefreplace\"", ")", "atomfile", ".", "write", "(", "feed", ".", "to_string", "(", ")", ")", "atomfile", ".", "close", "(", ")", "# create front page/s", "#print \"generated_page_info for gf \",generated_page_info", "ff", "=", "self", ".", "site_config", "[", "'fixed_frontpage'", "]", "if", "ff", "!=", "None", "and", "ff", "!=", "''", ":", "self", ".", "_set_fixed_frontpage", "(", "ff", ")", "else", ":", "self", ".", "generate_front", "(", "generated_page_info", ")", "self", ".", "_generate_site_map", "(", "generated_page_info", ")" ]
41.111111
0.011876
def generic_insert_module(module_name, args, **kwargs): """ In general we have a initial template and then insert new data, so we dont repeat the schema for each module :param module_name: String with module name :paran **kwargs: Args to be rendered in template """ file = create_or_open( '{}.py'.format(module_name), os.path.join( BASE_TEMPLATES_DIR, '{}_initial.py.tmpl'.format(module_name) ), args ) render_template_with_args_in_file( file, os.path.join( BASE_TEMPLATES_DIR, '{}.py.tmpl'.format(module_name) ), **kwargs ) file.close()
[ "def", "generic_insert_module", "(", "module_name", ",", "args", ",", "*", "*", "kwargs", ")", ":", "file", "=", "create_or_open", "(", "'{}.py'", ".", "format", "(", "module_name", ")", ",", "os", ".", "path", ".", "join", "(", "BASE_TEMPLATES_DIR", ",", "'{}_initial.py.tmpl'", ".", "format", "(", "module_name", ")", ")", ",", "args", ")", "render_template_with_args_in_file", "(", "file", ",", "os", ".", "path", ".", "join", "(", "BASE_TEMPLATES_DIR", ",", "'{}.py.tmpl'", ".", "format", "(", "module_name", ")", ")", ",", "*", "*", "kwargs", ")", "file", ".", "close", "(", ")" ]
28.541667
0.012712
def calculate_infixop(self, node, previous, next_node): """Create new node for infixop""" previous_position = (previous.last_line, previous.last_col - 1) position = (next_node.first_line, next_node.first_col + 1) possible = [] for ch in OPERATORS[node.__class__]: try: pos = self.operators[ch].find_previous(position) if previous_position < pos[1] < position: possible.append(pos) except KeyError: pass if not possible: raise ValueError("not a single {} between {} and {}".format( OPERATORS[node.__class__], previous_position, position)) return NodeWithPosition( *min(possible, key=lambda x: tuple(map(sub, position, x[0]))) )
[ "def", "calculate_infixop", "(", "self", ",", "node", ",", "previous", ",", "next_node", ")", ":", "previous_position", "=", "(", "previous", ".", "last_line", ",", "previous", ".", "last_col", "-", "1", ")", "position", "=", "(", "next_node", ".", "first_line", ",", "next_node", ".", "first_col", "+", "1", ")", "possible", "=", "[", "]", "for", "ch", "in", "OPERATORS", "[", "node", ".", "__class__", "]", ":", "try", ":", "pos", "=", "self", ".", "operators", "[", "ch", "]", ".", "find_previous", "(", "position", ")", "if", "previous_position", "<", "pos", "[", "1", "]", "<", "position", ":", "possible", ".", "append", "(", "pos", ")", "except", "KeyError", ":", "pass", "if", "not", "possible", ":", "raise", "ValueError", "(", "\"not a single {} between {} and {}\"", ".", "format", "(", "OPERATORS", "[", "node", ".", "__class__", "]", ",", "previous_position", ",", "position", ")", ")", "return", "NodeWithPosition", "(", "*", "min", "(", "possible", ",", "key", "=", "lambda", "x", ":", "tuple", "(", "map", "(", "sub", ",", "position", ",", "x", "[", "0", "]", ")", ")", ")", ")" ]
40.25
0.002427
def nla_get_u64(nla): """Return value of 64 bit integer attribute as an int(). https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L649 Positional arguments: nla -- 64 bit integer attribute (nlattr class instance). Returns: Payload as an int(). """ tmp = c_uint64(0) if nla and nla_len(nla) >= sizeof(tmp): tmp = c_uint64.from_buffer(nla_data(nla)[:SIZEOF_U64]) return int(tmp.value)
[ "def", "nla_get_u64", "(", "nla", ")", ":", "tmp", "=", "c_uint64", "(", "0", ")", "if", "nla", "and", "nla_len", "(", "nla", ")", ">=", "sizeof", "(", "tmp", ")", ":", "tmp", "=", "c_uint64", ".", "from_buffer", "(", "nla_data", "(", "nla", ")", "[", ":", "SIZEOF_U64", "]", ")", "return", "int", "(", "tmp", ".", "value", ")" ]
28.6
0.002257
def set_min_level_to_mail(self, level): """Allow to change mail level after creation """ self.min_log_level_to_mail = level handler_class = AlkiviEmailHandler self._set_min_level(handler_class, level)
[ "def", "set_min_level_to_mail", "(", "self", ",", "level", ")", ":", "self", ".", "min_log_level_to_mail", "=", "level", "handler_class", "=", "AlkiviEmailHandler", "self", ".", "_set_min_level", "(", "handler_class", ",", "level", ")" ]
39.166667
0.008333
async def open(self): """Register with the publisher.""" self.store.register(self) while not self.finished: message = await self.messages.get() await self.publish(message)
[ "async", "def", "open", "(", "self", ")", ":", "self", ".", "store", ".", "register", "(", "self", ")", "while", "not", "self", ".", "finished", ":", "message", "=", "await", "self", ".", "messages", ".", "get", "(", ")", "await", "self", ".", "publish", "(", "message", ")" ]
36.5
0.008929
def writefits(self, filename, clobber=True, trimzero=True, binned=False, precision=None, hkeys=None): """Write the spectrum to a FITS table. Primary header in EXT 0. ``FILENAME``, ``ORIGIN``, and any extra keyword(s) from ``hkeys`` will also be added. Table header and data are in EXT 1. The table has 2 columns, i.e., ``WAVELENGTH`` and ``FLUX``. Data are stored in user units. Its header also will have these additional keywords: * ``EXPR`` - Description of the spectrum. * ``TDISP1`` and ``TDISP2`` - Columns display format, always "G15.7". * ``GRFTABLE`` and ``CMPTABLE`` - Graph and component table names to use with associated observation mode. These are only added if applicable. If data is already double-precision but user explicitly set output precision to single, ``pysynphot.spectrum.syn_epsilon`` defines the allowed minimum wavelength separation. This limit (:math:`3.2 \\times 10^{-4}`) was taken from IRAF STSDAS SYNPHOT FAQ. Values equal or smaller than this limit are considered as the same, and duplicates are ignored, resulting in data loss. In the way that this comparison is coded, when such precision clash happens, even when no duplicates are detected, the last row is always omitted (also data loss). Therefore, it is *not* recommended for user to force single-precision when the data is in double-precision. Parameters ---------- filename : str Output filename. clobber : bool Overwrite existing file. Default is `True`. trimzero : bool Trim off duplicate rows with flux values of zero from both ends of the spectrum. This keeps one row of zero-flux at each end, if it exists; However, it does not add a zero-flux row if it does not. Default is `True`. binned : bool Write ``self.binwave`` and ``self.binflux`` (binned) dataset, instead of ``self.wave`` and ``self.flux`` (native). Using this option when object does not have binned data will cause an exception to be raised. Default is `False`. precision : {'s', 'd', `None`} Write data out in single (``'s'``) or double (``'d'``) precision. Default is `None`, which will enforce native precision from ``self.flux``. hkeys : dict Additional keyword(s) to be added to primary FITS header, in the format of ``{keyword:(value,comment)}``. """ pcodes={'d':'D', 's':'E'} if precision is None: precision = self.flux.dtype.char _precision = precision.lower()[0] pcodes = {'d':'D','s':'E','f':'E'} if clobber: try: os.remove(filename) except OSError: pass if binned: wave = self.binwave flux = self.binflux else: wave = self.wave flux = self.flux # Add a check for single/double precision clash, so # that if written out in single precision, the wavelength table # will still be sorted with no duplicates # The value of epsilon is taken from the Synphot FAQ. if wave.dtype == N.float64 and _precision == 's': idx = N.where(abs(wave[1:]-wave[:-1]) > syn_epsilon) else: idx = N.where(wave) #=> idx=[:] wave = wave[idx] flux = flux[idx] first, last = 0, len(flux) if trimzero: # Keep one zero at each end nz = flux.nonzero()[0] try: first = max(nz[0] - 1, first) last = min(nz[-1] + 2, last) except IndexError: pass # Construct the columns and HDUlist cw = pyfits.Column(name='WAVELENGTH', array=wave[first:last], unit=self.waveunits.name, format=pcodes[_precision]) cf = pyfits.Column(name='FLUX', array=flux[first:last], unit=self.fluxunits.name, format=pcodes[_precision]) # Make the primary header hdu = pyfits.PrimaryHDU() hdulist = pyfits.HDUList([hdu]) # User-provided keys are written to the primary header # so are filename and origin bkeys = dict(filename=(os.path.basename(filename), 'name of file'), origin=('pysynphot', 'Version (%s, %s)' % (__version__, __svn_revision__))) # User-values if present may override default values if hkeys is not None: bkeys.update(hkeys) # Now update the primary header for key, val in bkeys.items(): hdu.header[key] = val # Make the extension HDU cols = pyfits.ColDefs([cw, cf]) hdu = pyfits.BinTableHDU.from_columns(cols) # There are some standard keywords that should be added # to the extension header. bkeys = dict(expr=(str(self), 'pysyn expression'), tdisp1=('G15.7',), tdisp2=('G15.7',)) try: bkeys['grftable'] = (self.bandpass.obsmode.gtname,) bkeys['cmptable'] = (self.bandpass.obsmode.ctname,) except AttributeError: pass # Not all spectra have these for key, val in bkeys.items(): hdu.header[key] = val # Add the header to the list, and write the file hdulist.append(hdu) hdulist.writeto(filename)
[ "def", "writefits", "(", "self", ",", "filename", ",", "clobber", "=", "True", ",", "trimzero", "=", "True", ",", "binned", "=", "False", ",", "precision", "=", "None", ",", "hkeys", "=", "None", ")", ":", "pcodes", "=", "{", "'d'", ":", "'D'", ",", "'s'", ":", "'E'", "}", "if", "precision", "is", "None", ":", "precision", "=", "self", ".", "flux", ".", "dtype", ".", "char", "_precision", "=", "precision", ".", "lower", "(", ")", "[", "0", "]", "pcodes", "=", "{", "'d'", ":", "'D'", ",", "'s'", ":", "'E'", ",", "'f'", ":", "'E'", "}", "if", "clobber", ":", "try", ":", "os", ".", "remove", "(", "filename", ")", "except", "OSError", ":", "pass", "if", "binned", ":", "wave", "=", "self", ".", "binwave", "flux", "=", "self", ".", "binflux", "else", ":", "wave", "=", "self", ".", "wave", "flux", "=", "self", ".", "flux", "# Add a check for single/double precision clash, so", "# that if written out in single precision, the wavelength table", "# will still be sorted with no duplicates", "# The value of epsilon is taken from the Synphot FAQ.", "if", "wave", ".", "dtype", "==", "N", ".", "float64", "and", "_precision", "==", "'s'", ":", "idx", "=", "N", ".", "where", "(", "abs", "(", "wave", "[", "1", ":", "]", "-", "wave", "[", ":", "-", "1", "]", ")", ">", "syn_epsilon", ")", "else", ":", "idx", "=", "N", ".", "where", "(", "wave", ")", "#=> idx=[:]", "wave", "=", "wave", "[", "idx", "]", "flux", "=", "flux", "[", "idx", "]", "first", ",", "last", "=", "0", ",", "len", "(", "flux", ")", "if", "trimzero", ":", "# Keep one zero at each end", "nz", "=", "flux", ".", "nonzero", "(", ")", "[", "0", "]", "try", ":", "first", "=", "max", "(", "nz", "[", "0", "]", "-", "1", ",", "first", ")", "last", "=", "min", "(", "nz", "[", "-", "1", "]", "+", "2", ",", "last", ")", "except", "IndexError", ":", "pass", "# Construct the columns and HDUlist", "cw", "=", "pyfits", ".", "Column", "(", "name", "=", "'WAVELENGTH'", ",", "array", "=", "wave", "[", "first", ":", "last", "]", ",", "unit", "=", "self", ".", "waveunits", ".", "name", ",", "format", "=", "pcodes", "[", "_precision", "]", ")", "cf", "=", "pyfits", ".", "Column", "(", "name", "=", "'FLUX'", ",", "array", "=", "flux", "[", "first", ":", "last", "]", ",", "unit", "=", "self", ".", "fluxunits", ".", "name", ",", "format", "=", "pcodes", "[", "_precision", "]", ")", "# Make the primary header", "hdu", "=", "pyfits", ".", "PrimaryHDU", "(", ")", "hdulist", "=", "pyfits", ".", "HDUList", "(", "[", "hdu", "]", ")", "# User-provided keys are written to the primary header", "# so are filename and origin", "bkeys", "=", "dict", "(", "filename", "=", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "'name of file'", ")", ",", "origin", "=", "(", "'pysynphot'", ",", "'Version (%s, %s)'", "%", "(", "__version__", ",", "__svn_revision__", ")", ")", ")", "# User-values if present may override default values", "if", "hkeys", "is", "not", "None", ":", "bkeys", ".", "update", "(", "hkeys", ")", "# Now update the primary header", "for", "key", ",", "val", "in", "bkeys", ".", "items", "(", ")", ":", "hdu", ".", "header", "[", "key", "]", "=", "val", "# Make the extension HDU", "cols", "=", "pyfits", ".", "ColDefs", "(", "[", "cw", ",", "cf", "]", ")", "hdu", "=", "pyfits", ".", "BinTableHDU", ".", "from_columns", "(", "cols", ")", "# There are some standard keywords that should be added", "# to the extension header.", "bkeys", "=", "dict", "(", "expr", "=", "(", "str", "(", "self", ")", ",", "'pysyn expression'", ")", ",", "tdisp1", "=", "(", "'G15.7'", ",", ")", ",", "tdisp2", "=", "(", "'G15.7'", ",", ")", ")", "try", ":", "bkeys", "[", "'grftable'", "]", "=", "(", "self", ".", "bandpass", ".", "obsmode", ".", "gtname", ",", ")", "bkeys", "[", "'cmptable'", "]", "=", "(", "self", ".", "bandpass", ".", "obsmode", ".", "ctname", ",", ")", "except", "AttributeError", ":", "pass", "# Not all spectra have these", "for", "key", ",", "val", "in", "bkeys", ".", "items", "(", ")", ":", "hdu", ".", "header", "[", "key", "]", "=", "val", "# Add the header to the list, and write the file", "hdulist", ".", "append", "(", "hdu", ")", "hdulist", ".", "writeto", "(", "filename", ")" ]
37.013072
0.002236
def upload_from_shared_memory(self, location, bbox, order='F', cutout_bbox=None): """ Upload from a shared memory array. https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Shared-Memory tip: If you want to use slice notation, np.s_[...] will help in a pinch. MEMORY LIFECYCLE WARNING: You are responsible for managing the lifecycle of the shared memory. CloudVolume will merely read from it, it will not unlink the memory automatically. To fully clear the shared memory you must unlink the location and close any mmap file handles. You can use `cloudvolume.sharedmemory.unlink(...)` to help you unlink the shared memory file. EXPERT MODE WARNING: If you aren't sure you need this function (e.g. to relieve memory pressure or improve performance in some way) you should use the ordinary upload method of vol[:] = img. A typical use case is transferring arrays between different processes without making copies. For reference, this feature was created for uploading a 62 GB array that originated in Julia. Required: location: (str) Shared memory location e.g. 'cloudvolume-shm-RANDOM-STRING' This typically corresponds to a file in `/dev/shm` or `/run/shm/`. It can also be a file if you're using that for mmap. bbox: (Bbox or list of slices) the bounding box the shared array represents. For instance if you have a 1024x1024x128 volume and you're uploading only a 512x512x64 corner touching the origin, your Bbox would be `Bbox( (0,0,0), (512,512,64) )`. Optional: cutout_bbox: (bbox or list of slices) If you only want to upload a section of the array, give the bbox in volume coordinates (not image coordinates) that should be cut out. For example, if you only want to upload 256x256x32 of the upper rightmost corner of the above example but the entire 512x512x64 array is stored in memory, you would provide: `Bbox( (256, 256, 32), (512, 512, 64) )` By default, just upload the entire image. Returns: void """ def tobbox(x): if type(x) == Bbox: return x return Bbox.from_slices(x) bbox = tobbox(bbox) cutout_bbox = tobbox(cutout_bbox) if cutout_bbox else bbox.clone() if not bbox.contains_bbox(cutout_bbox): raise exceptions.AlignmentError(""" The provided cutout is not wholly contained in the given array. Bbox: {} Cutout: {} """.format(bbox, cutout_bbox)) if self.autocrop: cutout_bbox = Bbox.intersection(cutout_bbox, self.bounds) if cutout_bbox.subvoxel(): return shape = list(bbox.size3()) + [ self.num_channels ] mmap_handle, shared_image = sharedmemory.ndarray( location=location, shape=shape, dtype=self.dtype, order=order, readonly=True) delta_box = cutout_bbox.clone() - bbox.minpt cutout_image = shared_image[ delta_box.to_slices() ] txrx.upload_image(self, cutout_image, cutout_bbox.minpt, parallel=self.parallel, manual_shared_memory_id=location, manual_shared_memory_bbox=bbox, manual_shared_memory_order=order) mmap_handle.close()
[ "def", "upload_from_shared_memory", "(", "self", ",", "location", ",", "bbox", ",", "order", "=", "'F'", ",", "cutout_bbox", "=", "None", ")", ":", "def", "tobbox", "(", "x", ")", ":", "if", "type", "(", "x", ")", "==", "Bbox", ":", "return", "x", "return", "Bbox", ".", "from_slices", "(", "x", ")", "bbox", "=", "tobbox", "(", "bbox", ")", "cutout_bbox", "=", "tobbox", "(", "cutout_bbox", ")", "if", "cutout_bbox", "else", "bbox", ".", "clone", "(", ")", "if", "not", "bbox", ".", "contains_bbox", "(", "cutout_bbox", ")", ":", "raise", "exceptions", ".", "AlignmentError", "(", "\"\"\"\n The provided cutout is not wholly contained in the given array. \n Bbox: {}\n Cutout: {}\n \"\"\"", ".", "format", "(", "bbox", ",", "cutout_bbox", ")", ")", "if", "self", ".", "autocrop", ":", "cutout_bbox", "=", "Bbox", ".", "intersection", "(", "cutout_bbox", ",", "self", ".", "bounds", ")", "if", "cutout_bbox", ".", "subvoxel", "(", ")", ":", "return", "shape", "=", "list", "(", "bbox", ".", "size3", "(", ")", ")", "+", "[", "self", ".", "num_channels", "]", "mmap_handle", ",", "shared_image", "=", "sharedmemory", ".", "ndarray", "(", "location", "=", "location", ",", "shape", "=", "shape", ",", "dtype", "=", "self", ".", "dtype", ",", "order", "=", "order", ",", "readonly", "=", "True", ")", "delta_box", "=", "cutout_bbox", ".", "clone", "(", ")", "-", "bbox", ".", "minpt", "cutout_image", "=", "shared_image", "[", "delta_box", ".", "to_slices", "(", ")", "]", "txrx", ".", "upload_image", "(", "self", ",", "cutout_image", ",", "cutout_bbox", ".", "minpt", ",", "parallel", "=", "self", ".", "parallel", ",", "manual_shared_memory_id", "=", "location", ",", "manual_shared_memory_bbox", "=", "bbox", ",", "manual_shared_memory_order", "=", "order", ")", "mmap_handle", ".", "close", "(", ")" ]
45.652174
0.014916
def angle(v1,v2, cos=False): """ Find the angle between two vectors. :param cos: If True, the cosine of the angle will be returned. False by default. """ n = (norm(v1)*norm(v2)) _ = dot(v1,v2)/n return _ if cos else N.arccos(_)
[ "def", "angle", "(", "v1", ",", "v2", ",", "cos", "=", "False", ")", ":", "n", "=", "(", "norm", "(", "v1", ")", "*", "norm", "(", "v2", ")", ")", "_", "=", "dot", "(", "v1", ",", "v2", ")", "/", "n", "return", "_", "if", "cos", "else", "N", ".", "arccos", "(", "_", ")" ]
25.1
0.011538
def to_csc(self): """Convert Dataset to scipy's Compressed Sparse Column matrix.""" self._X_train = csc_matrix(self._X_train) self._X_test = csc_matrix(self._X_test)
[ "def", "to_csc", "(", "self", ")", ":", "self", ".", "_X_train", "=", "csc_matrix", "(", "self", ".", "_X_train", ")", "self", ".", "_X_test", "=", "csc_matrix", "(", "self", ".", "_X_test", ")" ]
46.5
0.010582
def search_user_group_for_facets(self, **kwargs): # noqa: E501 """Lists the values of one or more facets over the customer's user groups # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_user_group_for_facets(async_req=True) >>> result = thread.get() :param async_req bool :param FacetsSearchRequestContainer body: :return: ResponseContainerFacetsResponseContainer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_user_group_for_facets_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "search_user_group_for_facets", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "search_user_group_for_facets_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "search_user_group_for_facets_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
46
0.002028
def generate_signing_key(args): """ Generate an ECDSA signing key for signing secure boot images (post-bootloader) """ if os.path.exists(args.keyfile): raise esptool.FatalError("ERROR: Key file %s already exists" % args.keyfile) sk = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p) with open(args.keyfile, "wb") as f: f.write(sk.to_pem()) print("ECDSA NIST256p private key in PEM format written to %s" % args.keyfile)
[ "def", "generate_signing_key", "(", "args", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "args", ".", "keyfile", ")", ":", "raise", "esptool", ".", "FatalError", "(", "\"ERROR: Key file %s already exists\"", "%", "args", ".", "keyfile", ")", "sk", "=", "ecdsa", ".", "SigningKey", ".", "generate", "(", "curve", "=", "ecdsa", ".", "NIST256p", ")", "with", "open", "(", "args", ".", "keyfile", ",", "\"wb\"", ")", "as", "f", ":", "f", ".", "write", "(", "sk", ".", "to_pem", "(", ")", ")", "print", "(", "\"ECDSA NIST256p private key in PEM format written to %s\"", "%", "args", ".", "keyfile", ")" ]
55.75
0.00883
def get_day(): """Function for retrieving the wanted day""" day = datetime.datetime.today().weekday() if len(sys.argv) == 3: if sys.argv[2] == "mon": day = 0 elif sys.argv[2] == "tue": day = 1 elif sys.argv[2] == "wed": day = 2 elif sys.argv[2] == "thur": day = 3 elif sys.argv[2] == "fri": day = 4 else: day = 5 if day > 4: print("There is no information about the menu today.") exit(5) return day
[ "def", "get_day", "(", ")", ":", "day", "=", "datetime", ".", "datetime", ".", "today", "(", ")", ".", "weekday", "(", ")", "if", "len", "(", "sys", ".", "argv", ")", "==", "3", ":", "if", "sys", ".", "argv", "[", "2", "]", "==", "\"mon\"", ":", "day", "=", "0", "elif", "sys", ".", "argv", "[", "2", "]", "==", "\"tue\"", ":", "day", "=", "1", "elif", "sys", ".", "argv", "[", "2", "]", "==", "\"wed\"", ":", "day", "=", "2", "elif", "sys", ".", "argv", "[", "2", "]", "==", "\"thur\"", ":", "day", "=", "3", "elif", "sys", ".", "argv", "[", "2", "]", "==", "\"fri\"", ":", "day", "=", "4", "else", ":", "day", "=", "5", "if", "day", ">", "4", ":", "print", "(", "\"There is no information about the menu today.\"", ")", "exit", "(", "5", ")", "return", "day" ]
26.75
0.001805
def _IAC_parser(self, buf, network_reader, network_writer, connection): """ Processes and removes any Telnet commands from the buffer. :param buf: buffer :returns: buffer minus Telnet commands """ skip_to = 0 while True: # Locate an IAC to process iac_loc = buf.find(IAC, skip_to) if iac_loc < 0: break # Get the TELNET command iac_cmd = bytearray([IAC]) try: iac_cmd.append(buf[iac_loc + 1]) except IndexError: d = yield from network_reader.read(1) buf.extend(d) iac_cmd.append(buf[iac_loc + 1]) # Is this just a 2-byte TELNET command? if iac_cmd[1] not in [WILL, WONT, DO, DONT, SB]: if iac_cmd[1] == AYT: log.debug("Telnet server received Are-You-There (AYT)") network_writer.write(b'\r\nYour Are-You-There received. I am here.\r\n') elif iac_cmd[1] == IAC: # It's data, not an IAC iac_cmd.pop() # This prevents the 0xff from being # interrupted as yet another IAC skip_to = iac_loc + 1 log.debug("Received IAC IAC") elif iac_cmd[1] == NOP: pass else: log.debug("Unhandled telnet command: " "{0:#x} {1:#x}".format(*iac_cmd)) elif iac_cmd[1] == SB: # starts negotiation commands negotiation = [] for pos in range(2, self.MAX_NEGOTIATION_READ): op = yield from self._read(iac_cmd, buf, iac_loc + pos, network_reader) negotiation.append(op) if op == SE: # ends negotiation commands break # SE command is followed by IAC, remove the last two operations from stack self._negotiate(negotiation[0:-2], connection) # This must be a 3-byte TELNET command else: try: iac_cmd.append(buf[iac_loc + 2]) except IndexError: d = yield from network_reader.read(1) buf.extend(d) iac_cmd.append(buf[iac_loc + 2]) # We do ECHO, SGA, and BINARY. Period. if iac_cmd[1] == DO: if iac_cmd[2] not in [ECHO, SGA, BINARY]: network_writer.write(bytes([IAC, WONT, iac_cmd[2]])) log.debug("Telnet WON'T {:#x}".format(iac_cmd[2])) else: if iac_cmd[2] == SGA: if self._binary: network_writer.write(bytes([IAC, WILL, iac_cmd[2]])) else: network_writer.write(bytes([IAC, WONT, iac_cmd[2]])) log.debug("Telnet WON'T {:#x}".format(iac_cmd[2])) elif iac_cmd[1] == DONT: log.debug("Unhandled DONT telnet command: " "{0:#x} {1:#x} {2:#x}".format(*iac_cmd)) elif iac_cmd[1] == WILL: if iac_cmd[2] not in [BINARY, NAWS]: log.debug("Unhandled WILL telnet command: " "{0:#x} {1:#x} {2:#x}".format(*iac_cmd)) elif iac_cmd[1] == WONT: log.debug("Unhandled WONT telnet command: " "{0:#x} {1:#x} {2:#x}".format(*iac_cmd)) else: log.debug("Unhandled telnet command: " "{0:#x} {1:#x} {2:#x}".format(*iac_cmd)) # Remove the entire TELNET command from the buffer buf = buf.replace(iac_cmd, b'', 1) yield from network_writer.drain() # Return the new copy of the buffer, minus telnet commands return buf
[ "def", "_IAC_parser", "(", "self", ",", "buf", ",", "network_reader", ",", "network_writer", ",", "connection", ")", ":", "skip_to", "=", "0", "while", "True", ":", "# Locate an IAC to process", "iac_loc", "=", "buf", ".", "find", "(", "IAC", ",", "skip_to", ")", "if", "iac_loc", "<", "0", ":", "break", "# Get the TELNET command", "iac_cmd", "=", "bytearray", "(", "[", "IAC", "]", ")", "try", ":", "iac_cmd", ".", "append", "(", "buf", "[", "iac_loc", "+", "1", "]", ")", "except", "IndexError", ":", "d", "=", "yield", "from", "network_reader", ".", "read", "(", "1", ")", "buf", ".", "extend", "(", "d", ")", "iac_cmd", ".", "append", "(", "buf", "[", "iac_loc", "+", "1", "]", ")", "# Is this just a 2-byte TELNET command?", "if", "iac_cmd", "[", "1", "]", "not", "in", "[", "WILL", ",", "WONT", ",", "DO", ",", "DONT", ",", "SB", "]", ":", "if", "iac_cmd", "[", "1", "]", "==", "AYT", ":", "log", ".", "debug", "(", "\"Telnet server received Are-You-There (AYT)\"", ")", "network_writer", ".", "write", "(", "b'\\r\\nYour Are-You-There received. I am here.\\r\\n'", ")", "elif", "iac_cmd", "[", "1", "]", "==", "IAC", ":", "# It's data, not an IAC", "iac_cmd", ".", "pop", "(", ")", "# This prevents the 0xff from being", "# interrupted as yet another IAC", "skip_to", "=", "iac_loc", "+", "1", "log", ".", "debug", "(", "\"Received IAC IAC\"", ")", "elif", "iac_cmd", "[", "1", "]", "==", "NOP", ":", "pass", "else", ":", "log", ".", "debug", "(", "\"Unhandled telnet command: \"", "\"{0:#x} {1:#x}\"", ".", "format", "(", "*", "iac_cmd", ")", ")", "elif", "iac_cmd", "[", "1", "]", "==", "SB", ":", "# starts negotiation commands", "negotiation", "=", "[", "]", "for", "pos", "in", "range", "(", "2", ",", "self", ".", "MAX_NEGOTIATION_READ", ")", ":", "op", "=", "yield", "from", "self", ".", "_read", "(", "iac_cmd", ",", "buf", ",", "iac_loc", "+", "pos", ",", "network_reader", ")", "negotiation", ".", "append", "(", "op", ")", "if", "op", "==", "SE", ":", "# ends negotiation commands", "break", "# SE command is followed by IAC, remove the last two operations from stack", "self", ".", "_negotiate", "(", "negotiation", "[", "0", ":", "-", "2", "]", ",", "connection", ")", "# This must be a 3-byte TELNET command", "else", ":", "try", ":", "iac_cmd", ".", "append", "(", "buf", "[", "iac_loc", "+", "2", "]", ")", "except", "IndexError", ":", "d", "=", "yield", "from", "network_reader", ".", "read", "(", "1", ")", "buf", ".", "extend", "(", "d", ")", "iac_cmd", ".", "append", "(", "buf", "[", "iac_loc", "+", "2", "]", ")", "# We do ECHO, SGA, and BINARY. Period.", "if", "iac_cmd", "[", "1", "]", "==", "DO", ":", "if", "iac_cmd", "[", "2", "]", "not", "in", "[", "ECHO", ",", "SGA", ",", "BINARY", "]", ":", "network_writer", ".", "write", "(", "bytes", "(", "[", "IAC", ",", "WONT", ",", "iac_cmd", "[", "2", "]", "]", ")", ")", "log", ".", "debug", "(", "\"Telnet WON'T {:#x}\"", ".", "format", "(", "iac_cmd", "[", "2", "]", ")", ")", "else", ":", "if", "iac_cmd", "[", "2", "]", "==", "SGA", ":", "if", "self", ".", "_binary", ":", "network_writer", ".", "write", "(", "bytes", "(", "[", "IAC", ",", "WILL", ",", "iac_cmd", "[", "2", "]", "]", ")", ")", "else", ":", "network_writer", ".", "write", "(", "bytes", "(", "[", "IAC", ",", "WONT", ",", "iac_cmd", "[", "2", "]", "]", ")", ")", "log", ".", "debug", "(", "\"Telnet WON'T {:#x}\"", ".", "format", "(", "iac_cmd", "[", "2", "]", ")", ")", "elif", "iac_cmd", "[", "1", "]", "==", "DONT", ":", "log", ".", "debug", "(", "\"Unhandled DONT telnet command: \"", "\"{0:#x} {1:#x} {2:#x}\"", ".", "format", "(", "*", "iac_cmd", ")", ")", "elif", "iac_cmd", "[", "1", "]", "==", "WILL", ":", "if", "iac_cmd", "[", "2", "]", "not", "in", "[", "BINARY", ",", "NAWS", "]", ":", "log", ".", "debug", "(", "\"Unhandled WILL telnet command: \"", "\"{0:#x} {1:#x} {2:#x}\"", ".", "format", "(", "*", "iac_cmd", ")", ")", "elif", "iac_cmd", "[", "1", "]", "==", "WONT", ":", "log", ".", "debug", "(", "\"Unhandled WONT telnet command: \"", "\"{0:#x} {1:#x} {2:#x}\"", ".", "format", "(", "*", "iac_cmd", ")", ")", "else", ":", "log", ".", "debug", "(", "\"Unhandled telnet command: \"", "\"{0:#x} {1:#x} {2:#x}\"", ".", "format", "(", "*", "iac_cmd", ")", ")", "# Remove the entire TELNET command from the buffer", "buf", "=", "buf", ".", "replace", "(", "iac_cmd", ",", "b''", ",", "1", ")", "yield", "from", "network_writer", ".", "drain", "(", ")", "# Return the new copy of the buffer, minus telnet commands", "return", "buf" ]
42.375
0.002162
def search_dashboard_deleted_entities(self, **kwargs): # noqa: E501 """Search over a customer's deleted dashboards # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_dashboard_deleted_entities(async_req=True) >>> result = thread.get() :param async_req bool :param SortableSearchRequest body: :return: ResponseContainerPagedDashboard If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_dashboard_deleted_entities_with_http_info(**kwargs) # noqa: E501 else: (data) = self.search_dashboard_deleted_entities_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "search_dashboard_deleted_entities", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "search_dashboard_deleted_entities_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "search_dashboard_deleted_entities_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
44.904762
0.002077
def DyStrData(cls, name, regx, index = 0): ''' set dynamic value from the string data of response @param name: glob parameter name @param regx: re._pattern_type e.g. DyStrData("a",re.compile('123')) ''' text = Web.PageSource() if not text: return if not isinstance(regx, re._pattern_type): raise Exception("DyStrData need the arg which have compiled the regular expression.") values = regx.findall(text) result = "" if len(values)>index: result = values[index] cls.glob.update({name:result})
[ "def", "DyStrData", "(", "cls", ",", "name", ",", "regx", ",", "index", "=", "0", ")", ":", "text", "=", "Web", ".", "PageSource", "(", ")", "if", "not", "text", ":", "return", "if", "not", "isinstance", "(", "regx", ",", "re", ".", "_pattern_type", ")", ":", "raise", "Exception", "(", "\"DyStrData need the arg which have compiled the regular expression.\"", ")", "values", "=", "regx", ".", "findall", "(", "text", ")", "result", "=", "\"\"", "if", "len", "(", "values", ")", ">", "index", ":", "result", "=", "values", "[", "index", "]", "cls", ".", "glob", ".", "update", "(", "{", "name", ":", "result", "}", ")" ]
36.777778
0.014728
def enqueue(self, message, *, delay=None): """Enqueue a message. Parameters: message(Message): The message to enqueue. delay(int): The minimum amount of time, in milliseconds, to delay the message by. Raises: QueueNotFound: If the queue the message is being enqueued on doesn't exist. """ queue_name = message.queue_name if delay is not None: queue_name = dq_name(queue_name) message_eta = current_millis() + delay message = message.copy( queue_name=queue_name, options={ "eta": message_eta, }, ) if queue_name not in self.queues: raise QueueNotFound(queue_name) self.emit_before("enqueue", message, delay) self.queues[queue_name].put(message.encode()) self.emit_after("enqueue", message, delay) return message
[ "def", "enqueue", "(", "self", ",", "message", ",", "*", ",", "delay", "=", "None", ")", ":", "queue_name", "=", "message", ".", "queue_name", "if", "delay", "is", "not", "None", ":", "queue_name", "=", "dq_name", "(", "queue_name", ")", "message_eta", "=", "current_millis", "(", ")", "+", "delay", "message", "=", "message", ".", "copy", "(", "queue_name", "=", "queue_name", ",", "options", "=", "{", "\"eta\"", ":", "message_eta", ",", "}", ",", ")", "if", "queue_name", "not", "in", "self", ".", "queues", ":", "raise", "QueueNotFound", "(", "queue_name", ")", "self", ".", "emit_before", "(", "\"enqueue\"", ",", "message", ",", "delay", ")", "self", ".", "queues", "[", "queue_name", "]", ".", "put", "(", "message", ".", "encode", "(", ")", ")", "self", ".", "emit_after", "(", "\"enqueue\"", ",", "message", ",", "delay", ")", "return", "message" ]
31.833333
0.002033
def delete(self, custom_field, params={}, **options): """A specific, existing custom field can be deleted by making a DELETE request on the URL for that custom field. Returns an empty data record. Parameters ---------- custom_field : {Id} Globally unique identifier for the custom field. """ path = "/custom_fields/%s" % (custom_field) return self.client.delete(path, params, **options)
[ "def", "delete", "(", "self", ",", "custom_field", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/custom_fields/%s\"", "%", "(", "custom_field", ")", "return", "self", ".", "client", ".", "delete", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
41
0.010846
def from_df(cls, path, df:DataFrame, dep_var:str, valid_idx:Collection[int], procs:OptTabTfms=None, cat_names:OptStrList=None, cont_names:OptStrList=None, classes:Collection=None, test_df=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False)->DataBunch: "Create a `DataBunch` from `df` and `valid_idx` with `dep_var`. `kwargs` are passed to `DataBunch.create`." cat_names = ifnone(cat_names, []).copy() cont_names = ifnone(cont_names, list(set(df)-set(cat_names)-{dep_var})) procs = listify(procs) src = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(valid_idx)) src = src.label_from_df(cols=dep_var) if classes is None else src.label_from_df(cols=dep_var, classes=classes) if test_df is not None: src.add_test(TabularList.from_df(test_df, cat_names=cat_names, cont_names=cont_names, processor = src.train.x.processor)) return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check)
[ "def", "from_df", "(", "cls", ",", "path", ",", "df", ":", "DataFrame", ",", "dep_var", ":", "str", ",", "valid_idx", ":", "Collection", "[", "int", "]", ",", "procs", ":", "OptTabTfms", "=", "None", ",", "cat_names", ":", "OptStrList", "=", "None", ",", "cont_names", ":", "OptStrList", "=", "None", ",", "classes", ":", "Collection", "=", "None", ",", "test_df", "=", "None", ",", "bs", ":", "int", "=", "64", ",", "val_bs", ":", "int", "=", "None", ",", "num_workers", ":", "int", "=", "defaults", ".", "cpus", ",", "dl_tfms", ":", "Optional", "[", "Collection", "[", "Callable", "]", "]", "=", "None", ",", "device", ":", "torch", ".", "device", "=", "None", ",", "collate_fn", ":", "Callable", "=", "data_collate", ",", "no_check", ":", "bool", "=", "False", ")", "->", "DataBunch", ":", "cat_names", "=", "ifnone", "(", "cat_names", ",", "[", "]", ")", ".", "copy", "(", ")", "cont_names", "=", "ifnone", "(", "cont_names", ",", "list", "(", "set", "(", "df", ")", "-", "set", "(", "cat_names", ")", "-", "{", "dep_var", "}", ")", ")", "procs", "=", "listify", "(", "procs", ")", "src", "=", "(", "TabularList", ".", "from_df", "(", "df", ",", "path", "=", "path", ",", "cat_names", "=", "cat_names", ",", "cont_names", "=", "cont_names", ",", "procs", "=", "procs", ")", ".", "split_by_idx", "(", "valid_idx", ")", ")", "src", "=", "src", ".", "label_from_df", "(", "cols", "=", "dep_var", ")", "if", "classes", "is", "None", "else", "src", ".", "label_from_df", "(", "cols", "=", "dep_var", ",", "classes", "=", "classes", ")", "if", "test_df", "is", "not", "None", ":", "src", ".", "add_test", "(", "TabularList", ".", "from_df", "(", "test_df", ",", "cat_names", "=", "cat_names", ",", "cont_names", "=", "cont_names", ",", "processor", "=", "src", ".", "train", ".", "x", ".", "processor", ")", ")", "return", "src", ".", "databunch", "(", "path", "=", "path", ",", "bs", "=", "bs", ",", "val_bs", "=", "val_bs", ",", "num_workers", "=", "num_workers", ",", "device", "=", "device", ",", "collate_fn", "=", "collate_fn", ",", "no_check", "=", "no_check", ")" ]
91.6
0.042507
def registration_error(self, stanza): """Handle in-band registration error. [client only] :Parameters: - `stanza`: the error stanza received or `None` on timeout. :Types: - `stanza`: `pyxmpp.stanza.Stanza`""" self.lock.acquire() try: err=stanza.get_error() ae=err.xpath_eval("e:*",{"e":"jabber:iq:auth:error"}) if ae: ae=ae[0].name else: ae=err.get_condition().name raise RegistrationError("Authentication error condition: %s" % (ae,)) finally: self.lock.release()
[ "def", "registration_error", "(", "self", ",", "stanza", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "err", "=", "stanza", ".", "get_error", "(", ")", "ae", "=", "err", ".", "xpath_eval", "(", "\"e:*\"", ",", "{", "\"e\"", ":", "\"jabber:iq:auth:error\"", "}", ")", "if", "ae", ":", "ae", "=", "ae", "[", "0", "]", ".", "name", "else", ":", "ae", "=", "err", ".", "get_condition", "(", ")", ".", "name", "raise", "RegistrationError", "(", "\"Authentication error condition: %s\"", "%", "(", "ae", ",", ")", ")", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
31.55
0.013846
def _annotate_groups(self): """ Annotate the objects belonging to separate (non-connected) graphs with individual indices. """ g = {} for x in self.metadata: g[x.id] = x idx = 0 for x in self.metadata: if not hasattr(x, 'group'): x.group = idx idx += 1 neighbors = set() for e in self.edges: if e.src == x.id: neighbors.add(e.dst) if e.dst == x.id: neighbors.add(e.src) for nb in neighbors: g[nb].group = min(x.group, getattr(g[nb], 'group', idx)) # Assign the edges to the respective groups. Both "ends" of the edge # should share the same group so just use the first object's group. for e in self.edges: e.group = g[e.src].group self._max_group = idx
[ "def", "_annotate_groups", "(", "self", ")", ":", "g", "=", "{", "}", "for", "x", "in", "self", ".", "metadata", ":", "g", "[", "x", ".", "id", "]", "=", "x", "idx", "=", "0", "for", "x", "in", "self", ".", "metadata", ":", "if", "not", "hasattr", "(", "x", ",", "'group'", ")", ":", "x", ".", "group", "=", "idx", "idx", "+=", "1", "neighbors", "=", "set", "(", ")", "for", "e", "in", "self", ".", "edges", ":", "if", "e", ".", "src", "==", "x", ".", "id", ":", "neighbors", ".", "add", "(", "e", ".", "dst", ")", "if", "e", ".", "dst", "==", "x", ".", "id", ":", "neighbors", ".", "add", "(", "e", ".", "src", ")", "for", "nb", "in", "neighbors", ":", "g", "[", "nb", "]", ".", "group", "=", "min", "(", "x", ".", "group", ",", "getattr", "(", "g", "[", "nb", "]", ",", "'group'", ",", "idx", ")", ")", "# Assign the edges to the respective groups. Both \"ends\" of the edge", "# should share the same group so just use the first object's group.", "for", "e", "in", "self", ".", "edges", ":", "e", ".", "group", "=", "g", "[", "e", ".", "src", "]", ".", "group", "self", ".", "_max_group", "=", "idx" ]
31.551724
0.002121
def delete_collection_mutating_webhook_configuration(self, **kwargs): # noqa: E501 """delete_collection_mutating_webhook_configuration # noqa: E501 delete collection of MutatingWebhookConfiguration # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_mutating_webhook_configuration(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501 else: (data) = self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "delete_collection_mutating_webhook_configuration", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_collection_mutating_webhook_configuration_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "delete_collection_mutating_webhook_configuration_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
167.137931
0.00041
def flag_inner_classes(obj): """ Mutates any attributes on ``obj`` which are classes, with link to ``obj``. Adds a convenience accessor which instantiates ``obj`` and then calls its ``setup`` method. Recurses on those objects as well. """ for tup in class_members(obj): tup[1]._parent = obj tup[1]._parent_inst = None tup[1].__getattr__ = my_getattr flag_inner_classes(tup[1])
[ "def", "flag_inner_classes", "(", "obj", ")", ":", "for", "tup", "in", "class_members", "(", "obj", ")", ":", "tup", "[", "1", "]", ".", "_parent", "=", "obj", "tup", "[", "1", "]", ".", "_parent_inst", "=", "None", "tup", "[", "1", "]", ".", "__getattr__", "=", "my_getattr", "flag_inner_classes", "(", "tup", "[", "1", "]", ")" ]
30.357143
0.002283
def save(self): """ :return: save this team on Ariane server (create or update) """ LOGGER.debug("Team.save") post_payload = {} consolidated_osi_id = [] consolidated_app_id = [] if self.id is not None: post_payload['teamID'] = self.id if self.name is not None: post_payload['teamName'] = self.name if self.description is not None: post_payload['teamDescription'] = self.description if self.color_code is not None: post_payload['teamColorCode'] = self.color_code if self.osi_ids is not None: consolidated_osi_id = copy.deepcopy(self.osi_ids) if self.osi_2_rm is not None: for osi_2_rm in self.osi_2_rm: if osi_2_rm.id is None: osi_2_rm.sync() consolidated_osi_id.remove(osi_2_rm.id) if self.osi_2_add is not None: for osi_id_2_add in self.osi_2_add: if osi_id_2_add.id is None: osi_id_2_add.save() consolidated_osi_id.append(osi_id_2_add.id) post_payload['teamOSInstancesID'] = consolidated_osi_id if self.app_ids is not None: consolidated_app_id = copy.deepcopy(self.app_ids) if self.app_2_rm is not None: for app_2_rm in self.app_2_rm: if app_2_rm.id is None: app_2_rm.sync() consolidated_app_id.remove(app_2_rm.id) if self.app_2_add is not None: for app_id_2_add in self.app_2_add: if app_id_2_add.id is None: app_id_2_add.save() consolidated_app_id.append(app_id_2_add.id) post_payload['teamApplicationsID'] = consolidated_app_id args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}} response = TeamService.requester.call(args) if response.rc != 0: LOGGER.warning( 'Team.save - Problem while saving team ' + self.name + '. Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.id = response.response_content['teamID'] if self.osi_2_add is not None: for osi_2_add in self.osi_2_add: osi_2_add.sync() if self.osi_2_rm is not None: for osi_2_rm in self.osi_2_rm: osi_2_rm.sync() if self.app_2_add is not None: for app_2_add in self.app_2_add: app_2_add.sync() if self.app_2_rm is not None: for app_2_rm in self.app_2_rm: app_2_rm.sync() self.osi_2_add.clear() self.osi_2_rm.clear() self.app_2_add.clear() self.app_2_rm.clear() self.sync() return self
[ "def", "save", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"Team.save\"", ")", "post_payload", "=", "{", "}", "consolidated_osi_id", "=", "[", "]", "consolidated_app_id", "=", "[", "]", "if", "self", ".", "id", "is", "not", "None", ":", "post_payload", "[", "'teamID'", "]", "=", "self", ".", "id", "if", "self", ".", "name", "is", "not", "None", ":", "post_payload", "[", "'teamName'", "]", "=", "self", ".", "name", "if", "self", ".", "description", "is", "not", "None", ":", "post_payload", "[", "'teamDescription'", "]", "=", "self", ".", "description", "if", "self", ".", "color_code", "is", "not", "None", ":", "post_payload", "[", "'teamColorCode'", "]", "=", "self", ".", "color_code", "if", "self", ".", "osi_ids", "is", "not", "None", ":", "consolidated_osi_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "osi_ids", ")", "if", "self", ".", "osi_2_rm", "is", "not", "None", ":", "for", "osi_2_rm", "in", "self", ".", "osi_2_rm", ":", "if", "osi_2_rm", ".", "id", "is", "None", ":", "osi_2_rm", ".", "sync", "(", ")", "consolidated_osi_id", ".", "remove", "(", "osi_2_rm", ".", "id", ")", "if", "self", ".", "osi_2_add", "is", "not", "None", ":", "for", "osi_id_2_add", "in", "self", ".", "osi_2_add", ":", "if", "osi_id_2_add", ".", "id", "is", "None", ":", "osi_id_2_add", ".", "save", "(", ")", "consolidated_osi_id", ".", "append", "(", "osi_id_2_add", ".", "id", ")", "post_payload", "[", "'teamOSInstancesID'", "]", "=", "consolidated_osi_id", "if", "self", ".", "app_ids", "is", "not", "None", ":", "consolidated_app_id", "=", "copy", ".", "deepcopy", "(", "self", ".", "app_ids", ")", "if", "self", ".", "app_2_rm", "is", "not", "None", ":", "for", "app_2_rm", "in", "self", ".", "app_2_rm", ":", "if", "app_2_rm", ".", "id", "is", "None", ":", "app_2_rm", ".", "sync", "(", ")", "consolidated_app_id", ".", "remove", "(", "app_2_rm", ".", "id", ")", "if", "self", ".", "app_2_add", "is", "not", "None", ":", "for", "app_id_2_add", "in", "self", ".", "app_2_add", ":", "if", "app_id_2_add", ".", "id", "is", "None", ":", "app_id_2_add", ".", "save", "(", ")", "consolidated_app_id", ".", "append", "(", "app_id_2_add", ".", "id", ")", "post_payload", "[", "'teamApplicationsID'", "]", "=", "consolidated_app_id", "args", "=", "{", "'http_operation'", ":", "'POST'", ",", "'operation_path'", ":", "''", ",", "'parameters'", ":", "{", "'payload'", ":", "json", ".", "dumps", "(", "post_payload", ")", "}", "}", "response", "=", "TeamService", ".", "requester", ".", "call", "(", "args", ")", "if", "response", ".", "rc", "!=", "0", ":", "LOGGER", ".", "warning", "(", "'Team.save - Problem while saving team '", "+", "self", ".", "name", "+", "'. Reason: '", "+", "str", "(", "response", ".", "response_content", ")", "+", "'-'", "+", "str", "(", "response", ".", "error_message", ")", "+", "\" (\"", "+", "str", "(", "response", ".", "rc", ")", "+", "\")\"", ")", "else", ":", "self", ".", "id", "=", "response", ".", "response_content", "[", "'teamID'", "]", "if", "self", ".", "osi_2_add", "is", "not", "None", ":", "for", "osi_2_add", "in", "self", ".", "osi_2_add", ":", "osi_2_add", ".", "sync", "(", ")", "if", "self", ".", "osi_2_rm", "is", "not", "None", ":", "for", "osi_2_rm", "in", "self", ".", "osi_2_rm", ":", "osi_2_rm", ".", "sync", "(", ")", "if", "self", ".", "app_2_add", "is", "not", "None", ":", "for", "app_2_add", "in", "self", ".", "app_2_add", ":", "app_2_add", ".", "sync", "(", ")", "if", "self", ".", "app_2_rm", "is", "not", "None", ":", "for", "app_2_rm", "in", "self", ".", "app_2_rm", ":", "app_2_rm", ".", "sync", "(", ")", "self", ".", "osi_2_add", ".", "clear", "(", ")", "self", ".", "osi_2_rm", ".", "clear", "(", ")", "self", ".", "app_2_add", ".", "clear", "(", ")", "self", ".", "app_2_rm", ".", "clear", "(", ")", "self", ".", "sync", "(", ")", "return", "self" ]
38.207792
0.001325
def generate_all_deps(self, target: Target): """Generate all dependencies of `target` (the target nodes).""" yield from (self.targets[dep_name] for dep_name in self.generate_dep_names(target))
[ "def", "generate_all_deps", "(", "self", ",", "target", ":", "Target", ")", ":", "yield", "from", "(", "self", ".", "targets", "[", "dep_name", "]", "for", "dep_name", "in", "self", ".", "generate_dep_names", "(", "target", ")", ")" ]
56.25
0.008772
def load_data(self): """Load data in bulk for each embed block.""" for embed_type in self.ids.keys(): self.load_instances(embed_type, self.ids[embed_type])
[ "def", "load_data", "(", "self", ")", ":", "for", "embed_type", "in", "self", ".", "ids", ".", "keys", "(", ")", ":", "self", ".", "load_instances", "(", "embed_type", ",", "self", ".", "ids", "[", "embed_type", "]", ")" ]
36
0.01087
def resample(sig, old=1, new=1, order=3, zero=0.): """ Generic resampler based on Waring-Lagrange interpolators. Parameters ---------- sig : Input signal (any iterable). old : Time duration reference (defaults to 1, allowing percentages to the ``new`` keyword argument). This can be float number, or perhaps a Stream instance. new : Time duration that the reference will have after resampling. For example, if ``old = 1, new = 2``, then there will be 2 samples yielded for each sample from input. This can be a float number, or perhaps a Stream instance. order : Lagrange interpolator order. The amount of neighboring samples to be used by the interpolator is ``order + 1``. zero : The input should be thought as zero-padded from the left with this value. Returns ------- The first value will be the first sample from ``sig``, and then the interpolator will find the next samples towards the end of the ``sig``. The actual sampling interval (or time step) for this interpolator obeys to the ``old / new`` relationship. Hint ---- The time step can also be time-varying, although that's certainly difficult to synchonize (one sample is needed for each output sample). Perhaps the best approach for this case would be a ControlStream keeping the desired value at any time. Note ---- The input isn't zero-padded at right. It means that the last output will be one with interpolated with known data. For endless inputs that's ok, this makes no difference, but for finite inputs that may be undesirable. """ sig = Stream(sig) threshold = .5 * (order + 1) step = old / new data = deque([zero] * (order + 1), maxlen=order + 1) data.extend(sig.take(rint(threshold))) idx = int(threshold) isig = iter(sig) if isinstance(step, Iterable): step = iter(step) while True: yield lagrange(enumerate(data))(idx) idx += next(step) while idx > threshold: data.append(next(isig)) idx -= 1 else: while True: yield lagrange(enumerate(data))(idx) idx += step while idx > threshold: data.append(next(isig)) idx -= 1
[ "def", "resample", "(", "sig", ",", "old", "=", "1", ",", "new", "=", "1", ",", "order", "=", "3", ",", "zero", "=", "0.", ")", ":", "sig", "=", "Stream", "(", "sig", ")", "threshold", "=", ".5", "*", "(", "order", "+", "1", ")", "step", "=", "old", "/", "new", "data", "=", "deque", "(", "[", "zero", "]", "*", "(", "order", "+", "1", ")", ",", "maxlen", "=", "order", "+", "1", ")", "data", ".", "extend", "(", "sig", ".", "take", "(", "rint", "(", "threshold", ")", ")", ")", "idx", "=", "int", "(", "threshold", ")", "isig", "=", "iter", "(", "sig", ")", "if", "isinstance", "(", "step", ",", "Iterable", ")", ":", "step", "=", "iter", "(", "step", ")", "while", "True", ":", "yield", "lagrange", "(", "enumerate", "(", "data", ")", ")", "(", "idx", ")", "idx", "+=", "next", "(", "step", ")", "while", "idx", ">", "threshold", ":", "data", ".", "append", "(", "next", "(", "isig", ")", ")", "idx", "-=", "1", "else", ":", "while", "True", ":", "yield", "lagrange", "(", "enumerate", "(", "data", ")", ")", "(", "idx", ")", "idx", "+=", "step", "while", "idx", ">", "threshold", ":", "data", ".", "append", "(", "next", "(", "isig", ")", ")", "idx", "-=", "1" ]
32.661538
0.00823
def _generate_pyephem_snapshot( mjd, log, magLimit): """* generate pyephem snapshot* **Key Arguments:** - ``mjd`` -- the mjd to generate the pyephem snapshot database for - ``xephemOE`` -- a list of xephem database format strings for use with pyephem **Return:** - ``pyephemDB`` -- the pyephem solar-system snapshot database """ log.info('starting the ``_generate_pyephem_snapshot`` method') print "generating pyephem database for MJD %(mjd)s" % locals() global xephemOE global DEG_TO_RAD_FACTOR global RAD_TO_DEG_FACTOR global nside # THE PYEPHEM OBSERVER obs = ephem.Observer() # PYEPHEM WORKS IN DUBLIN JD, TO CONVERT FROM MJD SUBTRACT 15019.5 obs.date = float(mjd) - 15019.5 pyephemDB = { "ra_deg": [], "dec_deg": [], "mpc_number": [], "object_name": [], "healpix": [], "mag": [] } for d in xephemOE: # GENERATE EPHEMERIS FOR THIS OBJECT minorPlanet = ephem.readdb(d["pyephem_string"]) minorPlanet.compute(obs) if minorPlanet.mag > magLimit: continue if d["mpc_number"]: d["mpc_number"] = int(d["mpc_number"]) thisRa = minorPlanet.a_ra * RAD_TO_DEG_FACTOR thisDec = minorPlanet.a_dec * RAD_TO_DEG_FACTOR pyephemDB["mag"].append(minorPlanet.mag) pyephemDB["ra_deg"].append(thisRa) pyephemDB["dec_deg"].append(thisDec) pyephemDB["mpc_number"].append(d["mpc_number"]) pyephemDB["object_name"].append(d["name"]) pyephemDB["healpix"].append(hp.ang2pix( nside, theta=thisRa, phi=thisDec, lonlat=True)) log.info('completed the ``_generate_pyephem_snapshot`` method') return pyephemDB
[ "def", "_generate_pyephem_snapshot", "(", "mjd", ",", "log", ",", "magLimit", ")", ":", "log", ".", "info", "(", "'starting the ``_generate_pyephem_snapshot`` method'", ")", "print", "\"generating pyephem database for MJD %(mjd)s\"", "%", "locals", "(", ")", "global", "xephemOE", "global", "DEG_TO_RAD_FACTOR", "global", "RAD_TO_DEG_FACTOR", "global", "nside", "# THE PYEPHEM OBSERVER", "obs", "=", "ephem", ".", "Observer", "(", ")", "# PYEPHEM WORKS IN DUBLIN JD, TO CONVERT FROM MJD SUBTRACT 15019.5", "obs", ".", "date", "=", "float", "(", "mjd", ")", "-", "15019.5", "pyephemDB", "=", "{", "\"ra_deg\"", ":", "[", "]", ",", "\"dec_deg\"", ":", "[", "]", ",", "\"mpc_number\"", ":", "[", "]", ",", "\"object_name\"", ":", "[", "]", ",", "\"healpix\"", ":", "[", "]", ",", "\"mag\"", ":", "[", "]", "}", "for", "d", "in", "xephemOE", ":", "# GENERATE EPHEMERIS FOR THIS OBJECT", "minorPlanet", "=", "ephem", ".", "readdb", "(", "d", "[", "\"pyephem_string\"", "]", ")", "minorPlanet", ".", "compute", "(", "obs", ")", "if", "minorPlanet", ".", "mag", ">", "magLimit", ":", "continue", "if", "d", "[", "\"mpc_number\"", "]", ":", "d", "[", "\"mpc_number\"", "]", "=", "int", "(", "d", "[", "\"mpc_number\"", "]", ")", "thisRa", "=", "minorPlanet", ".", "a_ra", "*", "RAD_TO_DEG_FACTOR", "thisDec", "=", "minorPlanet", ".", "a_dec", "*", "RAD_TO_DEG_FACTOR", "pyephemDB", "[", "\"mag\"", "]", ".", "append", "(", "minorPlanet", ".", "mag", ")", "pyephemDB", "[", "\"ra_deg\"", "]", ".", "append", "(", "thisRa", ")", "pyephemDB", "[", "\"dec_deg\"", "]", ".", "append", "(", "thisDec", ")", "pyephemDB", "[", "\"mpc_number\"", "]", ".", "append", "(", "d", "[", "\"mpc_number\"", "]", ")", "pyephemDB", "[", "\"object_name\"", "]", ".", "append", "(", "d", "[", "\"name\"", "]", ")", "pyephemDB", "[", "\"healpix\"", "]", ".", "append", "(", "hp", ".", "ang2pix", "(", "nside", ",", "theta", "=", "thisRa", ",", "phi", "=", "thisDec", ",", "lonlat", "=", "True", ")", ")", "log", ".", "info", "(", "'completed the ``_generate_pyephem_snapshot`` method'", ")", "return", "pyephemDB" ]
29.576271
0.001109
def rgb_2_hex(self, r, g, b): """ convert a rgb color to hex """ return "#{:02X}{:02X}{:02X}".format(int(r * 255), int(g * 255), int(b * 255))
[ "def", "rgb_2_hex", "(", "self", ",", "r", ",", "g", ",", "b", ")", ":", "return", "\"#{:02X}{:02X}{:02X}\"", ".", "format", "(", "int", "(", "r", "*", "255", ")", ",", "int", "(", "g", "*", "255", ")", ",", "int", "(", "b", "*", "255", ")", ")" ]
34
0.017241
def version_dict(version): """Turn a version string into a dict with major/minor/... info.""" match = version_re.match(str(version) or '') letters = 'alpha pre'.split() numbers = 'major minor1 minor2 minor3 alpha_ver pre_ver'.split() if match: d = match.groupdict() for letter in letters: d[letter] = d[letter] if d[letter] else None for num in numbers: if d[num] == '*': d[num] = 99 else: d[num] = int(d[num]) if d[num] else None else: d = dict((k, None) for k in numbers) d.update((k, None) for k in letters) return d
[ "def", "version_dict", "(", "version", ")", ":", "match", "=", "version_re", ".", "match", "(", "str", "(", "version", ")", "or", "''", ")", "letters", "=", "'alpha pre'", ".", "split", "(", ")", "numbers", "=", "'major minor1 minor2 minor3 alpha_ver pre_ver'", ".", "split", "(", ")", "if", "match", ":", "d", "=", "match", ".", "groupdict", "(", ")", "for", "letter", "in", "letters", ":", "d", "[", "letter", "]", "=", "d", "[", "letter", "]", "if", "d", "[", "letter", "]", "else", "None", "for", "num", "in", "numbers", ":", "if", "d", "[", "num", "]", "==", "'*'", ":", "d", "[", "num", "]", "=", "99", "else", ":", "d", "[", "num", "]", "=", "int", "(", "d", "[", "num", "]", ")", "if", "d", "[", "num", "]", "else", "None", "else", ":", "d", "=", "dict", "(", "(", "k", ",", "None", ")", "for", "k", "in", "numbers", ")", "d", ".", "update", "(", "(", "k", ",", "None", ")", "for", "k", "in", "letters", ")", "return", "d" ]
35.444444
0.001527
def collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None): """ Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. Setting fcn1 and/or fcn2 to point to a function rather than None (e.g., stats.sterr, len) will append those results (e.g., the sterr, N) after each calculated mean. cfcn is the collapse function to apply (defaults to mean, defined here in the pstat module to avoid circular imports with stats.py, but harmonicmean or others could be passed). Usage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None) Returns: a list of lists with all unique permutations of entries appearing in columns ("conditions") specified by keepcols, abutted with the result of cfcn (if cfcn=None, defaults to the mean) of each column specified by collapsecols. """ def collmean (inlist): s = 0 for item in inlist: s = s + item return s/float(len(inlist)) if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols] if type(collapsecols) not in [ListType,TupleType]: collapsecols = [collapsecols] if cfcn == None: cfcn = collmean if keepcols == []: means = [0]*len(collapsecols) for i in range(len(collapsecols)): avgcol = colex(listoflists,collapsecols[i]) means[i] = cfcn(avgcol) if fcn1: try: test = fcn1(avgcol) except: test = 'N/A' means[i] = [means[i], test] if fcn2: try: test = fcn2(avgcol) except: test = 'N/A' try: means[i] = means[i] + [len(avgcol)] except TypeError: means[i] = [means[i],len(avgcol)] return means else: values = colex(listoflists,keepcols) uniques = unique(values) uniques.sort() newlist = [] if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols] for item in uniques: if type(item) not in [ListType,TupleType]: item =[item] tmprows = linexand(listoflists,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if fcn1 != None: try: test = fcn1(avgcol) except: test = 'N/A' item.append(test) if fcn2 != None: try: test = fcn2(avgcol) except: test = 'N/A' item.append(test) newlist.append(item) return newlist
[ "def", "collapse", "(", "listoflists", ",", "keepcols", ",", "collapsecols", ",", "fcn1", "=", "None", ",", "fcn2", "=", "None", ",", "cfcn", "=", "None", ")", ":", "def", "collmean", "(", "inlist", ")", ":", "s", "=", "0", "for", "item", "in", "inlist", ":", "s", "=", "s", "+", "item", "return", "s", "/", "float", "(", "len", "(", "inlist", ")", ")", "if", "type", "(", "keepcols", ")", "not", "in", "[", "ListType", ",", "TupleType", "]", ":", "keepcols", "=", "[", "keepcols", "]", "if", "type", "(", "collapsecols", ")", "not", "in", "[", "ListType", ",", "TupleType", "]", ":", "collapsecols", "=", "[", "collapsecols", "]", "if", "cfcn", "==", "None", ":", "cfcn", "=", "collmean", "if", "keepcols", "==", "[", "]", ":", "means", "=", "[", "0", "]", "*", "len", "(", "collapsecols", ")", "for", "i", "in", "range", "(", "len", "(", "collapsecols", ")", ")", ":", "avgcol", "=", "colex", "(", "listoflists", ",", "collapsecols", "[", "i", "]", ")", "means", "[", "i", "]", "=", "cfcn", "(", "avgcol", ")", "if", "fcn1", ":", "try", ":", "test", "=", "fcn1", "(", "avgcol", ")", "except", ":", "test", "=", "'N/A'", "means", "[", "i", "]", "=", "[", "means", "[", "i", "]", ",", "test", "]", "if", "fcn2", ":", "try", ":", "test", "=", "fcn2", "(", "avgcol", ")", "except", ":", "test", "=", "'N/A'", "try", ":", "means", "[", "i", "]", "=", "means", "[", "i", "]", "+", "[", "len", "(", "avgcol", ")", "]", "except", "TypeError", ":", "means", "[", "i", "]", "=", "[", "means", "[", "i", "]", ",", "len", "(", "avgcol", ")", "]", "return", "means", "else", ":", "values", "=", "colex", "(", "listoflists", ",", "keepcols", ")", "uniques", "=", "unique", "(", "values", ")", "uniques", ".", "sort", "(", ")", "newlist", "=", "[", "]", "if", "type", "(", "keepcols", ")", "not", "in", "[", "ListType", ",", "TupleType", "]", ":", "keepcols", "=", "[", "keepcols", "]", "for", "item", "in", "uniques", ":", "if", "type", "(", "item", ")", "not", "in", "[", "ListType", ",", "TupleType", "]", ":", "item", "=", "[", "item", "]", "tmprows", "=", "linexand", "(", "listoflists", ",", "keepcols", ",", "item", ")", "for", "col", "in", "collapsecols", ":", "avgcol", "=", "colex", "(", "tmprows", ",", "col", ")", "item", ".", "append", "(", "cfcn", "(", "avgcol", ")", ")", "if", "fcn1", "!=", "None", ":", "try", ":", "test", "=", "fcn1", "(", "avgcol", ")", "except", ":", "test", "=", "'N/A'", "item", ".", "append", "(", "test", ")", "if", "fcn2", "!=", "None", ":", "try", ":", "test", "=", "fcn2", "(", "avgcol", ")", "except", ":", "test", "=", "'N/A'", "item", ".", "append", "(", "test", ")", "newlist", ".", "append", "(", "item", ")", "return", "newlist" ]
39.026316
0.029267
def DiffAnys(obj1, obj2, looseMatch=False, ignoreArrayOrder=True): """Diff any two objects. Objects can either be primitive type or DataObjects""" differ = Differ(looseMatch = looseMatch, ignoreArrayOrder = ignoreArrayOrder) return differ.DiffAnyObjects(obj1, obj2)
[ "def", "DiffAnys", "(", "obj1", ",", "obj2", ",", "looseMatch", "=", "False", ",", "ignoreArrayOrder", "=", "True", ")", ":", "differ", "=", "Differ", "(", "looseMatch", "=", "looseMatch", ",", "ignoreArrayOrder", "=", "ignoreArrayOrder", ")", "return", "differ", ".", "DiffAnyObjects", "(", "obj1", ",", "obj2", ")" ]
55.2
0.032143
def analytical(src, rec, res, freqtime, solution='fs', signal=None, ab=11, aniso=None, epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2): r"""Return the analytical full- or half-space solution. Calculate the electromagnetic frequency- or time-domain field due to infinitesimal small electric or magnetic dipole source(s), measured by infinitesimal small electric or magnetic dipole receiver(s); sources and receivers are directed along the principal directions x, y, or z, and all sources are at the same depth, as well as all receivers are at the same depth. In the case of a halfspace the air-interface is located at z = 0 m. You can call the functions ``fullspace`` and ``halfspace`` in ``kernel.py`` directly. This interface is just to provide a consistent interface with the same input parameters as for instance for ``dipole``. This function yields the same result if ``solution='fs'`` as ``dipole``, if the model is a fullspace. Included are: - Full fullspace solution (``solution='fs'``) for ee-, me-, em-, mm-fields, only frequency domain, [HuTS15]_. - Diffusive fullspace solution (``solution='dfs'``) for ee-fields, [SlHM10]_. - Diffusive halfspace solution (``solution='dhs'``) for ee-fields, [SlHM10]_. - Diffusive direct- and reflected field and airwave (``solution='dsplit'``) for ee-fields, [SlHM10]_. - Diffusive direct- and reflected field and airwave (``solution='dtetm'``) for ee-fields, split into TE and TM mode [SlHM10]_. Parameters ---------- src, rec : list of floats or arrays Source and receiver coordinates (m): [x, y, z]. The x- and y-coordinates can be arrays, z is a single value. The x- and y-coordinates must have the same dimension. res : float Horizontal resistivity rho_h (Ohm.m). Alternatively, res can be a dictionary. See the main manual of empymod too see how to exploit this hook to re-calculate etaH, etaV, zetaH, and zetaV, which can be used to, for instance, use the Cole-Cole model for IP. freqtime : array_like Frequencies f (Hz) if ``signal`` == None, else times t (s); (f, t > 0). solution : str, optional Defines which solution is returned: - 'fs' : Full fullspace solution (ee-, me-, em-, mm-fields); f-domain. - 'dfs' : Diffusive fullspace solution (ee-fields only). - 'dhs' : Diffusive halfspace solution (ee-fields only). - 'dsplit' : Diffusive direct- and reflected field and airwave (ee-fields only). - 'dtetm' : as dsplit, but direct fielt TE, TM; reflected field TE, TM, and airwave (ee-fields only). signal : {None, 0, 1, -1}, optional Source signal, default is None: - None: Frequency-domain response - -1 : Switch-off time-domain response - 0 : Impulse time-domain response - +1 : Switch-on time-domain response ab : int, optional Source-receiver configuration, defaults to 11. +---------------+-------+------+------+------+------+------+------+ | | electric source | magnetic source | +===============+=======+======+======+======+======+======+======+ | | **x**| **y**| **z**| **x**| **y**| **z**| +---------------+-------+------+------+------+------+------+------+ | | **x** | 11 | 12 | 13 | 14 | 15 | 16 | + **electric** +-------+------+------+------+------+------+------+ | | **y** | 21 | 22 | 23 | 24 | 25 | 26 | + **receiver** +-------+------+------+------+------+------+------+ | | **z** | 31 | 32 | 33 | 34 | 35 | 36 | +---------------+-------+------+------+------+------+------+------+ | | **x** | 41 | 42 | 43 | 44 | 45 | 46 | + **magnetic** +-------+------+------+------+------+------+------+ | | **y** | 51 | 52 | 53 | 54 | 55 | 56 | + **receiver** +-------+------+------+------+------+------+------+ | | **z** | 61 | 62 | 63 | 64 | 65 | 66 | +---------------+-------+------+------+------+------+------+------+ aniso : float, optional Anisotropy lambda = sqrt(rho_v/rho_h) (-); defaults to one. epermH, epermV : float, optional Relative horizontal/vertical electric permittivity epsilon_h/epsilon_v (-); default is one. Ignored for the diffusive solution. mpermH, mpermV : float, optional Relative horizontal/vertical magnetic permeability mu_h/mu_v (-); default is one. Ignored for the diffusive solution. verb : {0, 1, 2, 3, 4}, optional Level of verbosity, default is 2: - 0: Print nothing. - 1: Print warnings. - 2: Print additional runtime - 3: Print additional start/stop, condensed parameter information. - 4: Print additional full parameter information Returns ------- EM : ndarray, (nfreq, nrec, nsrc) Frequency- or time-domain EM field (depending on ``signal``): - If rec is electric, returns E [V/m]. - If rec is magnetic, returns B [T] (not H [A/m]!). However, source and receiver are normalised. So for instance in the electric case the source strength is 1 A and its length is 1 m. So the electric field could also be written as [V/(A.m2)]. The shape of EM is (nfreq, nrec, nsrc). However, single dimensions are removed. If ``solution='dsplit'``, three ndarrays are returned: direct, reflect, air. If ``solution='dtetm'``, five ndarrays are returned: direct_TE, direct_TM, reflect_TE, reflect_TM, air. Examples -------- >>> import numpy as np >>> from empymod import analytical >>> src = [0, 0, 0] >>> rec = [np.arange(1, 11)*500, np.zeros(10), 200] >>> res = 50 >>> EMfield = analytical(src, rec, res, freqtime=1, verb=0) >>> print(EMfield) [ 4.03091405e-08 -9.69163818e-10j 6.97630362e-09 -4.88342150e-10j 2.15205979e-09 -2.97489809e-10j 8.90394459e-10 -1.99313433e-10j 4.32915802e-10 -1.40741644e-10j 2.31674165e-10 -1.02579391e-10j 1.31469130e-10 -7.62770461e-11j 7.72342470e-11 -5.74534125e-11j 4.61480481e-11 -4.36275540e-11j 2.76174038e-11 -3.32860932e-11j] """ # === 1. LET'S START ============ t0 = printstartfinish(verb) # === 2. CHECK INPUT ============ # Check times or frequencies if signal is not None: freqtime = check_time_only(freqtime, signal, verb) # Check layer parameters model = check_model([], res, aniso, epermH, epermV, mpermH, mpermV, True, verb) depth, res, aniso, epermH, epermV, mpermH, mpermV, _ = model # Check frequency => get etaH, etaV, zetaH, and zetaV frequency = check_frequency(freqtime, res, aniso, epermH, epermV, mpermH, mpermV, verb) freqtime, etaH, etaV, zetaH, zetaV = frequency # Update etaH/etaV and zetaH/zetaV according to user-provided model if isinstance(res, dict) and 'func_eta' in res: etaH, etaV = res['func_eta'](res, locals()) if isinstance(res, dict) and 'func_zeta' in res: zetaH, zetaV = res['func_zeta'](res, locals()) # Check src-rec configuration # => Get flags if src or rec or both are magnetic (msrc, mrec) ab_calc, msrc, mrec = check_ab(ab, verb) # Check src and rec src, nsrc = check_dipole(src, 'src', verb) rec, nrec = check_dipole(rec, 'rec', verb) # Get offsets and angles (off, angle) off, angle = get_off_ang(src, rec, nsrc, nrec, verb) # Get layer number in which src and rec reside (lsrc/lrec) _, zsrc = get_layer_nr(src, depth) _, zrec = get_layer_nr(rec, depth) # Check possibilities check_solution(solution, signal, ab, msrc, mrec) # === 3. EM-FIELD CALCULATION ============ if solution[0] == 'd': EM = kernel.halfspace(off, angle, zsrc, zrec, etaH, etaV, freqtime[:, None], ab_calc, signal, solution) else: if ab_calc not in [36, ]: EM = kernel.fullspace(off, angle, zsrc, zrec, etaH, etaV, zetaH, zetaV, ab_calc, msrc, mrec) else: # If <ab> = 36 (or 63), field is zero # In `bipole` and in `dipole`, this is taken care of in `fem`. Here # we have to take care of it separately EM = np.zeros((freqtime.size*nrec*nsrc), dtype=complex) # Squeeze if solution[1:] == 'split': EM = (np.squeeze(EM[0].reshape((-1, nrec, nsrc), order='F')), np.squeeze(EM[1].reshape((-1, nrec, nsrc), order='F')), np.squeeze(EM[2].reshape((-1, nrec, nsrc), order='F'))) elif solution[1:] == 'tetm': EM = (np.squeeze(EM[0].reshape((-1, nrec, nsrc), order='F')), np.squeeze(EM[1].reshape((-1, nrec, nsrc), order='F')), np.squeeze(EM[2].reshape((-1, nrec, nsrc), order='F')), np.squeeze(EM[3].reshape((-1, nrec, nsrc), order='F')), np.squeeze(EM[4].reshape((-1, nrec, nsrc), order='F'))) else: EM = np.squeeze(EM.reshape((-1, nrec, nsrc), order='F')) # === 4. FINISHED ============ printstartfinish(verb, t0) return EM
[ "def", "analytical", "(", "src", ",", "rec", ",", "res", ",", "freqtime", ",", "solution", "=", "'fs'", ",", "signal", "=", "None", ",", "ab", "=", "11", ",", "aniso", "=", "None", ",", "epermH", "=", "None", ",", "epermV", "=", "None", ",", "mpermH", "=", "None", ",", "mpermV", "=", "None", ",", "verb", "=", "2", ")", ":", "# === 1. LET'S START ============", "t0", "=", "printstartfinish", "(", "verb", ")", "# === 2. CHECK INPUT ============", "# Check times or frequencies", "if", "signal", "is", "not", "None", ":", "freqtime", "=", "check_time_only", "(", "freqtime", ",", "signal", ",", "verb", ")", "# Check layer parameters", "model", "=", "check_model", "(", "[", "]", ",", "res", ",", "aniso", ",", "epermH", ",", "epermV", ",", "mpermH", ",", "mpermV", ",", "True", ",", "verb", ")", "depth", ",", "res", ",", "aniso", ",", "epermH", ",", "epermV", ",", "mpermH", ",", "mpermV", ",", "_", "=", "model", "# Check frequency => get etaH, etaV, zetaH, and zetaV", "frequency", "=", "check_frequency", "(", "freqtime", ",", "res", ",", "aniso", ",", "epermH", ",", "epermV", ",", "mpermH", ",", "mpermV", ",", "verb", ")", "freqtime", ",", "etaH", ",", "etaV", ",", "zetaH", ",", "zetaV", "=", "frequency", "# Update etaH/etaV and zetaH/zetaV according to user-provided model", "if", "isinstance", "(", "res", ",", "dict", ")", "and", "'func_eta'", "in", "res", ":", "etaH", ",", "etaV", "=", "res", "[", "'func_eta'", "]", "(", "res", ",", "locals", "(", ")", ")", "if", "isinstance", "(", "res", ",", "dict", ")", "and", "'func_zeta'", "in", "res", ":", "zetaH", ",", "zetaV", "=", "res", "[", "'func_zeta'", "]", "(", "res", ",", "locals", "(", ")", ")", "# Check src-rec configuration", "# => Get flags if src or rec or both are magnetic (msrc, mrec)", "ab_calc", ",", "msrc", ",", "mrec", "=", "check_ab", "(", "ab", ",", "verb", ")", "# Check src and rec", "src", ",", "nsrc", "=", "check_dipole", "(", "src", ",", "'src'", ",", "verb", ")", "rec", ",", "nrec", "=", "check_dipole", "(", "rec", ",", "'rec'", ",", "verb", ")", "# Get offsets and angles (off, angle)", "off", ",", "angle", "=", "get_off_ang", "(", "src", ",", "rec", ",", "nsrc", ",", "nrec", ",", "verb", ")", "# Get layer number in which src and rec reside (lsrc/lrec)", "_", ",", "zsrc", "=", "get_layer_nr", "(", "src", ",", "depth", ")", "_", ",", "zrec", "=", "get_layer_nr", "(", "rec", ",", "depth", ")", "# Check possibilities", "check_solution", "(", "solution", ",", "signal", ",", "ab", ",", "msrc", ",", "mrec", ")", "# === 3. EM-FIELD CALCULATION ============", "if", "solution", "[", "0", "]", "==", "'d'", ":", "EM", "=", "kernel", ".", "halfspace", "(", "off", ",", "angle", ",", "zsrc", ",", "zrec", ",", "etaH", ",", "etaV", ",", "freqtime", "[", ":", ",", "None", "]", ",", "ab_calc", ",", "signal", ",", "solution", ")", "else", ":", "if", "ab_calc", "not", "in", "[", "36", ",", "]", ":", "EM", "=", "kernel", ".", "fullspace", "(", "off", ",", "angle", ",", "zsrc", ",", "zrec", ",", "etaH", ",", "etaV", ",", "zetaH", ",", "zetaV", ",", "ab_calc", ",", "msrc", ",", "mrec", ")", "else", ":", "# If <ab> = 36 (or 63), field is zero", "# In `bipole` and in `dipole`, this is taken care of in `fem`. Here", "# we have to take care of it separately", "EM", "=", "np", ".", "zeros", "(", "(", "freqtime", ".", "size", "*", "nrec", "*", "nsrc", ")", ",", "dtype", "=", "complex", ")", "# Squeeze", "if", "solution", "[", "1", ":", "]", "==", "'split'", ":", "EM", "=", "(", "np", ".", "squeeze", "(", "EM", "[", "0", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ",", "np", ".", "squeeze", "(", "EM", "[", "1", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ",", "np", ".", "squeeze", "(", "EM", "[", "2", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ")", "elif", "solution", "[", "1", ":", "]", "==", "'tetm'", ":", "EM", "=", "(", "np", ".", "squeeze", "(", "EM", "[", "0", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ",", "np", ".", "squeeze", "(", "EM", "[", "1", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ",", "np", ".", "squeeze", "(", "EM", "[", "2", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ",", "np", ".", "squeeze", "(", "EM", "[", "3", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ",", "np", ".", "squeeze", "(", "EM", "[", "4", "]", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", ")", "else", ":", "EM", "=", "np", ".", "squeeze", "(", "EM", ".", "reshape", "(", "(", "-", "1", ",", "nrec", ",", "nsrc", ")", ",", "order", "=", "'F'", ")", ")", "# === 4. FINISHED ============", "printstartfinish", "(", "verb", ",", "t0", ")", "return", "EM" ]
42.617117
0.000103
def arcovar(x, order): r"""Simple and fast implementation of the covariance AR estimate This code is 10 times faster than :func:`arcovar_marple` and more importantly only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple` :param array X: Array of complex data samples :param int oder: Order of linear prediction model :return: * a - Array of complex forward linear prediction coefficients * e - error The covariance method fits a Pth order autoregressive (AR) model to the input signal, which is assumed to be the output of an AR system driven by white noise. This method minimizes the forward prediction error in the least-squares sense. The output vector contains the normalized estimate of the AR system parameters The white noise input variance estimate is also returned. If is the power spectral density of y(n), then: .. math:: \frac{e}{\left| A(e^{jw}) \right|^2} = \frac{e}{\left| 1+\sum_{k-1}^P a(k)e^{-jwk}\right|^2} Because the method characterizes the input data using an all-pole model, the correct choice of the model order p is important. .. plot:: :width: 80% :include-source: from spectrum import arcovar, marple_data, arma2psd from pylab import plot, log10, linspace, axis ar_values, error = arcovar(marple_data, 15) psd = arma2psd(ar_values, sides='centerdc') plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd))) axis([-0.5, 0.5, -60, 0]) .. seealso:: :class:`pcovar` :validation: the AR parameters are the same as those returned by a completely different function :func:`arcovar_marple`. :References: [Mathworks]_ """ from spectrum import corrmtx import scipy.linalg X = corrmtx(x, order, 'covariance') Xc = np.matrix(X[:, 1:]) X1 = np.array(X[:, 0]) # Coefficients estimated via the covariance method # Here we use lstsq rathre than solve function because Xc is not square # matrix a, _residues, _rank, _singular_values = scipy.linalg.lstsq(-Xc, X1) # Estimate the input white noise variance Cz = np.dot(X1.conj().transpose(), Xc) e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a) assert e.imag < 1e-4, 'wierd behaviour' e = float(e.real) # ignore imag part that should be small return a, e
[ "def", "arcovar", "(", "x", ",", "order", ")", ":", "from", "spectrum", "import", "corrmtx", "import", "scipy", ".", "linalg", "X", "=", "corrmtx", "(", "x", ",", "order", ",", "'covariance'", ")", "Xc", "=", "np", ".", "matrix", "(", "X", "[", ":", ",", "1", ":", "]", ")", "X1", "=", "np", ".", "array", "(", "X", "[", ":", ",", "0", "]", ")", "# Coefficients estimated via the covariance method", "# Here we use lstsq rathre than solve function because Xc is not square", "# matrix", "a", ",", "_residues", ",", "_rank", ",", "_singular_values", "=", "scipy", ".", "linalg", ".", "lstsq", "(", "-", "Xc", ",", "X1", ")", "# Estimate the input white noise variance", "Cz", "=", "np", ".", "dot", "(", "X1", ".", "conj", "(", ")", ".", "transpose", "(", ")", ",", "Xc", ")", "e", "=", "np", ".", "dot", "(", "X1", ".", "conj", "(", ")", ".", "transpose", "(", ")", ",", "X1", ")", "+", "np", ".", "dot", "(", "Cz", ",", "a", ")", "assert", "e", ".", "imag", "<", "1e-4", ",", "'wierd behaviour'", "e", "=", "float", "(", "e", ".", "real", ")", "# ignore imag part that should be small", "return", "a", ",", "e" ]
33.681159
0.001672
def cogroup(self, other, numPartitions=None): """ Return a new DStream by applying 'cogroup' between RDDs of this DStream and `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
[ "def", "cogroup", "(", "self", ",", "other", ",", "numPartitions", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "return", "self", ".", "transformWith", "(", "lambda", "a", ",", "b", ":", "a", ".", "cogroup", "(", "b", ",", "numPartitions", ")", ",", "other", ")" ]
43.1
0.009091
def mean_size(self, p, q): ''' >>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6) >>> psd.mean_size(3, 2) 4.412484512922977e-06 Note that for the case where p == q, a different set of formulas are required - which do not have analytical results for many distributions. Therefore, a close numerical approximation is used instead, to perturb the values of p and q so they are 1E-9 away from each other. This leads only to slight errors, as in the example below where the correct answer is 5E-6. >>> psd.mean_size(3, 3) 4.9999999304923345e-06 ''' if p == q: p -= 1e-9 q += 1e-9 pow1 = q - self.order denominator = self._pdf_basis_integral_definite(d_min=self.d_minimum, d_max=self.d_excessive, n=pow1) root_power = p - q pow3 = p - self.order numerator = self._pdf_basis_integral_definite(d_min=self.d_minimum, d_max=self.d_excessive, n=pow3) return (numerator/denominator)**(1.0/(root_power))
[ "def", "mean_size", "(", "self", ",", "p", ",", "q", ")", ":", "if", "p", "==", "q", ":", "p", "-=", "1e-9", "q", "+=", "1e-9", "pow1", "=", "q", "-", "self", ".", "order", "denominator", "=", "self", ".", "_pdf_basis_integral_definite", "(", "d_min", "=", "self", ".", "d_minimum", ",", "d_max", "=", "self", ".", "d_excessive", ",", "n", "=", "pow1", ")", "root_power", "=", "p", "-", "q", "pow3", "=", "p", "-", "self", ".", "order", "numerator", "=", "self", ".", "_pdf_basis_integral_definite", "(", "d_min", "=", "self", ".", "d_minimum", ",", "d_max", "=", "self", ".", "d_excessive", ",", "n", "=", "pow3", ")", "return", "(", "numerator", "/", "denominator", ")", "**", "(", "1.0", "/", "(", "root_power", ")", ")" ]
41.807692
0.008094
def _get_gcloud_records(self, gcloud_zone, page_token=None): """ Generator function which yields ResourceRecordSet for the managed gcloud zone, until there are no more records to pull. :param gcloud_zone: zone to pull records from :type gcloud_zone: google.cloud.dns.ManagedZone :param page_token: page token for the page to get :return: a resource record set :type return: google.cloud.dns.ResourceRecordSet """ gcloud_iterator = gcloud_zone.list_resource_record_sets( page_token=page_token) for gcloud_record in gcloud_iterator: yield gcloud_record # This is to get results which may be on a "paged" page. # (if more than max_results) entries. if gcloud_iterator.next_page_token: for gcloud_record in self._get_gcloud_records( gcloud_zone, gcloud_iterator.next_page_token): # yield from is in python 3 only. yield gcloud_record
[ "def", "_get_gcloud_records", "(", "self", ",", "gcloud_zone", ",", "page_token", "=", "None", ")", ":", "gcloud_iterator", "=", "gcloud_zone", ".", "list_resource_record_sets", "(", "page_token", "=", "page_token", ")", "for", "gcloud_record", "in", "gcloud_iterator", ":", "yield", "gcloud_record", "# This is to get results which may be on a \"paged\" page.", "# (if more than max_results) entries.", "if", "gcloud_iterator", ".", "next_page_token", ":", "for", "gcloud_record", "in", "self", ".", "_get_gcloud_records", "(", "gcloud_zone", ",", "gcloud_iterator", ".", "next_page_token", ")", ":", "# yield from is in python 3 only.", "yield", "gcloud_record" ]
46.636364
0.00191
def _sort_by_sortedset_before(self): """ Return True if we have to sort by set and do the stuff *before* asking redis for the sort """ return self._sort_by_sortedset and self._sort_limits and (not self._lazy_collection['pks'] or self._want_score_value)
[ "def", "_sort_by_sortedset_before", "(", "self", ")", ":", "return", "self", ".", "_sort_by_sortedset", "and", "self", ".", "_sort_limits", "and", "(", "not", "self", ".", "_lazy_collection", "[", "'pks'", "]", "or", "self", ".", "_want_score_value", ")" ]
49.428571
0.014205
def get_zeta_i_j_given_separate_counts(self, y_i, y_j): ''' Parameters ---------- y_i, np.array(int) Arrays of word counts of words occurring in positive class y_j, np.array(int) Returns ------- np.array of z-scores ''' n_i, n_j = y_i.sum(), y_j.sum() prior_scale_j = prior_scale_i = 1 if self._scale_type == 'class-size': prior_scale_i = ((n_i) * self._scale * 1. / np.sum(self._priors)) prior_scale_j = ((n_j) * self._scale * 1. / np.sum(self._priors)) elif self._scale_type == 'corpus-size': prior_scale_j = prior_scale_i = ((n_i + n_j) * self._scale * 1. / np.sum(self._priors)) elif self._scale_type == 'word': prior_scale_j = prior_scale_i = self._scale / np.sum(self._priors) elif self._scale_type == 'background-corpus-size': prior_scale_j = prior_scale_i = self._scale a_wj = (self._priors * prior_scale_j) ** self._prior_power a_0j = np.sum(a_wj) a_wi = (self._priors * prior_scale_i) ** self._prior_power a_0i = np.sum(a_wi) delta_i_j = (np.log((y_i + a_wi) / (n_i + a_0i - y_i - a_wi)) - np.log((y_j + a_wj) / (n_j + a_0j - y_j - a_wj))) var_delta_i_j = (1. / (y_i + a_wi) + 1. / (n_i + a_0i - y_i - a_wi) + 1. / (y_j + a_wj) + 1. / (n_j + a_0j - y_j - a_wj)) zeta_i_j = delta_i_j / np.sqrt(var_delta_i_j) return zeta_i_j
[ "def", "get_zeta_i_j_given_separate_counts", "(", "self", ",", "y_i", ",", "y_j", ")", ":", "n_i", ",", "n_j", "=", "y_i", ".", "sum", "(", ")", ",", "y_j", ".", "sum", "(", ")", "prior_scale_j", "=", "prior_scale_i", "=", "1", "if", "self", ".", "_scale_type", "==", "'class-size'", ":", "prior_scale_i", "=", "(", "(", "n_i", ")", "*", "self", ".", "_scale", "*", "1.", "/", "np", ".", "sum", "(", "self", ".", "_priors", ")", ")", "prior_scale_j", "=", "(", "(", "n_j", ")", "*", "self", ".", "_scale", "*", "1.", "/", "np", ".", "sum", "(", "self", ".", "_priors", ")", ")", "elif", "self", ".", "_scale_type", "==", "'corpus-size'", ":", "prior_scale_j", "=", "prior_scale_i", "=", "(", "(", "n_i", "+", "n_j", ")", "*", "self", ".", "_scale", "*", "1.", "/", "np", ".", "sum", "(", "self", ".", "_priors", ")", ")", "elif", "self", ".", "_scale_type", "==", "'word'", ":", "prior_scale_j", "=", "prior_scale_i", "=", "self", ".", "_scale", "/", "np", ".", "sum", "(", "self", ".", "_priors", ")", "elif", "self", ".", "_scale_type", "==", "'background-corpus-size'", ":", "prior_scale_j", "=", "prior_scale_i", "=", "self", ".", "_scale", "a_wj", "=", "(", "self", ".", "_priors", "*", "prior_scale_j", ")", "**", "self", ".", "_prior_power", "a_0j", "=", "np", ".", "sum", "(", "a_wj", ")", "a_wi", "=", "(", "self", ".", "_priors", "*", "prior_scale_i", ")", "**", "self", ".", "_prior_power", "a_0i", "=", "np", ".", "sum", "(", "a_wi", ")", "delta_i_j", "=", "(", "np", ".", "log", "(", "(", "y_i", "+", "a_wi", ")", "/", "(", "n_i", "+", "a_0i", "-", "y_i", "-", "a_wi", ")", ")", "-", "np", ".", "log", "(", "(", "y_j", "+", "a_wj", ")", "/", "(", "n_j", "+", "a_0j", "-", "y_j", "-", "a_wj", ")", ")", ")", "var_delta_i_j", "=", "(", "1.", "/", "(", "y_i", "+", "a_wi", ")", "+", "1.", "/", "(", "n_i", "+", "a_0i", "-", "y_i", "-", "a_wi", ")", "+", "1.", "/", "(", "y_j", "+", "a_wj", ")", "+", "1.", "/", "(", "n_j", "+", "a_0j", "-", "y_j", "-", "a_wj", ")", ")", "zeta_i_j", "=", "delta_i_j", "/", "np", ".", "sqrt", "(", "var_delta_i_j", ")", "return", "zeta_i_j" ]
37.111111
0.029176
def regex_for_range(min_, max_): """ > regex_for_range(12, 345) '1[2-9]|[2-9]\d|[1-2]\d{2}|3[0-3]\d|34[0-5]' """ positive_subpatterns = [] negative_subpatterns = [] max_ -= 1 if min_ < 0: min__ = 1 if max_ < 0: min__ = abs(max_) max__ = abs(min_) negative_subpatterns = split_to_patterns(min__, max__) min_ = 0 if max_ >= 0: positive_subpatterns = split_to_patterns(min_, max_) negative_only_subpatterns = ['-' + val for val in negative_subpatterns if val not in positive_subpatterns] positive_only_subpatterns = [val for val in positive_subpatterns if val not in negative_subpatterns] intersected_subpatterns = ['-?' + val for val in negative_subpatterns if val in positive_subpatterns] subpatterns = negative_only_subpatterns + intersected_subpatterns + positive_only_subpatterns return '|'.join(subpatterns)
[ "def", "regex_for_range", "(", "min_", ",", "max_", ")", ":", "positive_subpatterns", "=", "[", "]", "negative_subpatterns", "=", "[", "]", "max_", "-=", "1", "if", "min_", "<", "0", ":", "min__", "=", "1", "if", "max_", "<", "0", ":", "min__", "=", "abs", "(", "max_", ")", "max__", "=", "abs", "(", "min_", ")", "negative_subpatterns", "=", "split_to_patterns", "(", "min__", ",", "max__", ")", "min_", "=", "0", "if", "max_", ">=", "0", ":", "positive_subpatterns", "=", "split_to_patterns", "(", "min_", ",", "max_", ")", "negative_only_subpatterns", "=", "[", "'-'", "+", "val", "for", "val", "in", "negative_subpatterns", "if", "val", "not", "in", "positive_subpatterns", "]", "positive_only_subpatterns", "=", "[", "val", "for", "val", "in", "positive_subpatterns", "if", "val", "not", "in", "negative_subpatterns", "]", "intersected_subpatterns", "=", "[", "'-?'", "+", "val", "for", "val", "in", "negative_subpatterns", "if", "val", "in", "positive_subpatterns", "]", "subpatterns", "=", "negative_only_subpatterns", "+", "intersected_subpatterns", "+", "positive_only_subpatterns", "return", "'|'", ".", "join", "(", "subpatterns", ")" ]
34.961538
0.009636
def set_type(self, form_type, css_class=None): """ Maybe you have a site where you're not allowed to change the python code, and for some reason you need to change the form_type in a template, not because you want to (because it seems like a bit of a hack) but maybe you don't really have a choice. Then this function was made for you. Sorry :param form_type: The new form_type :param css_class: If None (default) derrive this from the form_type. If a value is passed in this will be the new css_class for the form """ self.form_type = form_type if css_class is None: self.css_class = self.get_default_css_class(form_type) else: self.css_class = css_class return ''
[ "def", "set_type", "(", "self", ",", "form_type", ",", "css_class", "=", "None", ")", ":", "self", ".", "form_type", "=", "form_type", "if", "css_class", "is", "None", ":", "self", ".", "css_class", "=", "self", ".", "get_default_css_class", "(", "form_type", ")", "else", ":", "self", ".", "css_class", "=", "css_class", "return", "''" ]
38.409091
0.008083
def limits(self,variable): """Return minimum and maximum of variable across all rows of data.""" (vmin,vmax), = self.SELECT('min(%(variable)s), max(%(variable)s)' % vars()) return vmin,vmax
[ "def", "limits", "(", "self", ",", "variable", ")", ":", "(", "vmin", ",", "vmax", ")", ",", "=", "self", ".", "SELECT", "(", "'min(%(variable)s), max(%(variable)s)'", "%", "vars", "(", ")", ")", "return", "vmin", ",", "vmax" ]
52.5
0.028169
def parse(resp) -> DataFrameType: """Makes a dictionary of DataFrames from a response object""" statements = [] for statement in resp['results']: series = {} for s in statement.get('series', []): series[_get_name(s)] = _drop_zero_index(_serializer(s)) statements.append(series) if len(statements) == 1: series: dict = statements[0] if len(series) == 1: return list(series.values())[0] # DataFrame else: return series # dict return statements
[ "def", "parse", "(", "resp", ")", "->", "DataFrameType", ":", "statements", "=", "[", "]", "for", "statement", "in", "resp", "[", "'results'", "]", ":", "series", "=", "{", "}", "for", "s", "in", "statement", ".", "get", "(", "'series'", ",", "[", "]", ")", ":", "series", "[", "_get_name", "(", "s", ")", "]", "=", "_drop_zero_index", "(", "_serializer", "(", "s", ")", ")", "statements", ".", "append", "(", "series", ")", "if", "len", "(", "statements", ")", "==", "1", ":", "series", ":", "dict", "=", "statements", "[", "0", "]", "if", "len", "(", "series", ")", "==", "1", ":", "return", "list", "(", "series", ".", "values", "(", ")", ")", "[", "0", "]", "# DataFrame", "else", ":", "return", "series", "# dict", "return", "statements" ]
33.3125
0.001825
def bestModelIdAndErrScore(self, swarmId=None, genIdx=None): """Return the model ID of the model with the best result so far and it's score on the optimize metric. If swarm is None, then it returns the global best, otherwise it returns the best for the given swarm for all generatons up to and including genIdx. Parameters: --------------------------------------------------------------------- swarmId: A string representation of the sorted list of encoders in this swarm. For example '__address_encoder.__gym_encoder' genIdx: consider the best in all generations up to and including this generation if not None. retval: (modelID, result) """ if swarmId is None: return (self._bestModelID, self._bestResult) else: if swarmId not in self._swarmBestOverall: return (None, numpy.inf) # Get the best score, considering the appropriate generations genScores = self._swarmBestOverall[swarmId] bestModelId = None bestScore = numpy.inf for (i, (modelId, errScore)) in enumerate(genScores): if genIdx is not None and i > genIdx: break if errScore < bestScore: bestScore = errScore bestModelId = modelId return (bestModelId, bestScore)
[ "def", "bestModelIdAndErrScore", "(", "self", ",", "swarmId", "=", "None", ",", "genIdx", "=", "None", ")", ":", "if", "swarmId", "is", "None", ":", "return", "(", "self", ".", "_bestModelID", ",", "self", ".", "_bestResult", ")", "else", ":", "if", "swarmId", "not", "in", "self", ".", "_swarmBestOverall", ":", "return", "(", "None", ",", "numpy", ".", "inf", ")", "# Get the best score, considering the appropriate generations", "genScores", "=", "self", ".", "_swarmBestOverall", "[", "swarmId", "]", "bestModelId", "=", "None", "bestScore", "=", "numpy", ".", "inf", "for", "(", "i", ",", "(", "modelId", ",", "errScore", ")", ")", "in", "enumerate", "(", "genScores", ")", ":", "if", "genIdx", "is", "not", "None", "and", "i", ">", "genIdx", ":", "break", "if", "errScore", "<", "bestScore", ":", "bestScore", "=", "errScore", "bestModelId", "=", "modelId", "return", "(", "bestModelId", ",", "bestScore", ")" ]
36.685714
0.009863
def get_assessment_part_form_for_create_for_assessment(self, assessment_id, assessment_part_record_types): """Gets the assessment part form for creating new assessment parts for an assessment. A new form should be requested for each create transaction. arg: assessment_id (osid.id.Id): an assessment ``Id`` arg: assessment_part_record_types (osid.type.Type[]): array of assessment part record types to be included in the create operation or an empty list if none return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_id`` is not found raise: NullArgument - ``assessment_id`` or ``assessment_part_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.learning.ActivityAdminSession.get_activity_form_for_create_template if not isinstance(assessment_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') for arg in assessment_part_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if assessment_part_record_types == []: # WHY are we passing bank_id = self._catalog_id below, seems redundant: obj_form = objects.AssessmentPartForm( bank_id=self._catalog_id, assessment_id=assessment_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) else: obj_form = objects.AssessmentPartForm( bank_id=self._catalog_id, record_types=assessment_part_record_types, assessment_id=assessment_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) obj_form._for_update = False self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
[ "def", "get_assessment_part_form_for_create_for_assessment", "(", "self", ",", "assessment_id", ",", "assessment_part_record_types", ")", ":", "# Implemented from template for", "# osid.learning.ActivityAdminSession.get_activity_form_for_create_template", "if", "not", "isinstance", "(", "assessment_id", ",", "ABCId", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'argument is not a valid OSID Id'", ")", "for", "arg", "in", "assessment_part_record_types", ":", "if", "not", "isinstance", "(", "arg", ",", "ABCType", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more argument array elements is not a valid OSID Type'", ")", "if", "assessment_part_record_types", "==", "[", "]", ":", "# WHY are we passing bank_id = self._catalog_id below, seems redundant:", "obj_form", "=", "objects", ".", "AssessmentPartForm", "(", "bank_id", "=", "self", ".", "_catalog_id", ",", "assessment_id", "=", "assessment_id", ",", "catalog_id", "=", "self", ".", "_catalog_id", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "else", ":", "obj_form", "=", "objects", ".", "AssessmentPartForm", "(", "bank_id", "=", "self", ".", "_catalog_id", ",", "record_types", "=", "assessment_part_record_types", ",", "assessment_id", "=", "assessment_id", ",", "catalog_id", "=", "self", ".", "_catalog_id", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "obj_form", ".", "_for_update", "=", "False", "self", ".", "_forms", "[", "obj_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "not", "CREATED", "return", "obj_form" ]
49.270833
0.002488
def search_record(datas, keyword): """Search target JSON -> dictionary Arguments: datas: dictionary of record datas keyword: search keyword (default is null) Key target is "name" or "content" or "type". default null. Either key and type, or on the other hand. When keyword has include camma ",", Separate keyword to name, type, content. """ key_name, key_type, key_content = False, False, False if keyword.find(',') > -1: if len(keyword.split(',')) == 3: key_content = keyword.split(',')[2] key_name = keyword.split(',')[0] key_type = keyword.split(',')[1] result = [] for record in datas['records']: if key_name and key_type: if key_content: if ((record['name'].find(key_name) > -1 and record['type'] == key_type and record['content'].find(key_content) > -1)): result.append(record) else: if ((record['name'].find(key_name) > -1 and record['type'] == key_type)): result.append(record) elif ((record['name'].find(keyword) >= 0 or record['content'].find(keyword) >= 0 or record['type'] == keyword)): result.append(record) return result
[ "def", "search_record", "(", "datas", ",", "keyword", ")", ":", "key_name", ",", "key_type", ",", "key_content", "=", "False", ",", "False", ",", "False", "if", "keyword", ".", "find", "(", "','", ")", ">", "-", "1", ":", "if", "len", "(", "keyword", ".", "split", "(", "','", ")", ")", "==", "3", ":", "key_content", "=", "keyword", ".", "split", "(", "','", ")", "[", "2", "]", "key_name", "=", "keyword", ".", "split", "(", "','", ")", "[", "0", "]", "key_type", "=", "keyword", ".", "split", "(", "','", ")", "[", "1", "]", "result", "=", "[", "]", "for", "record", "in", "datas", "[", "'records'", "]", ":", "if", "key_name", "and", "key_type", ":", "if", "key_content", ":", "if", "(", "(", "record", "[", "'name'", "]", ".", "find", "(", "key_name", ")", ">", "-", "1", "and", "record", "[", "'type'", "]", "==", "key_type", "and", "record", "[", "'content'", "]", ".", "find", "(", "key_content", ")", ">", "-", "1", ")", ")", ":", "result", ".", "append", "(", "record", ")", "else", ":", "if", "(", "(", "record", "[", "'name'", "]", ".", "find", "(", "key_name", ")", ">", "-", "1", "and", "record", "[", "'type'", "]", "==", "key_type", ")", ")", ":", "result", ".", "append", "(", "record", ")", "elif", "(", "(", "record", "[", "'name'", "]", ".", "find", "(", "keyword", ")", ">=", "0", "or", "record", "[", "'content'", "]", ".", "find", "(", "keyword", ")", ">=", "0", "or", "record", "[", "'type'", "]", "==", "keyword", ")", ")", ":", "result", ".", "append", "(", "record", ")", "return", "result" ]
29.977273
0.000734
def get_domain(context, prefix): """ Return the domain used for the tracking code. Each service may be configured with its own domain (called `<name>_domain`), or a django-analytical-wide domain may be set (using `analytical_domain`. If no explicit domain is found in either the context or the settings, try to get the domain from the contrib sites framework. """ domain = context.get('%s_domain' % prefix) if domain is None: domain = context.get('analytical_domain') if domain is None: domain = getattr(settings, '%s_DOMAIN' % prefix.upper(), None) if domain is None: domain = getattr(settings, 'ANALYTICAL_DOMAIN', None) if domain is None: if 'django.contrib.sites' in settings.INSTALLED_APPS: from django.contrib.sites.models import Site try: domain = Site.objects.get_current().domain except (ImproperlyConfigured, Site.DoesNotExist): pass return domain
[ "def", "get_domain", "(", "context", ",", "prefix", ")", ":", "domain", "=", "context", ".", "get", "(", "'%s_domain'", "%", "prefix", ")", "if", "domain", "is", "None", ":", "domain", "=", "context", ".", "get", "(", "'analytical_domain'", ")", "if", "domain", "is", "None", ":", "domain", "=", "getattr", "(", "settings", ",", "'%s_DOMAIN'", "%", "prefix", ".", "upper", "(", ")", ",", "None", ")", "if", "domain", "is", "None", ":", "domain", "=", "getattr", "(", "settings", ",", "'ANALYTICAL_DOMAIN'", ",", "None", ")", "if", "domain", "is", "None", ":", "if", "'django.contrib.sites'", "in", "settings", ".", "INSTALLED_APPS", ":", "from", "django", ".", "contrib", ".", "sites", ".", "models", "import", "Site", "try", ":", "domain", "=", "Site", ".", "objects", ".", "get_current", "(", ")", ".", "domain", "except", "(", "ImproperlyConfigured", ",", "Site", ".", "DoesNotExist", ")", ":", "pass", "return", "domain" ]
41.166667
0.000989
def SLIT_GAUSSIAN(x,g): """ Instrumental (slit) function. B(x) = sqrt(ln(2)/pi)/γ*exp(-ln(2)*(x/γ)**2), where γ/2 is a gaussian half-width at half-maximum. """ g /= 2 return sqrt(log(2))/(sqrt(pi)*g)*exp(-log(2)*(x/g)**2)
[ "def", "SLIT_GAUSSIAN", "(", "x", ",", "g", ")", ":", "g", "/=", "2", "return", "sqrt", "(", "log", "(", "2", ")", ")", "/", "(", "sqrt", "(", "pi", ")", "*", "g", ")", "*", "exp", "(", "-", "log", "(", "2", ")", "*", "(", "x", "/", "g", ")", "**", "2", ")" ]
30.25
0.008032
def _unweave(target, advices, pointcut, ctx, depth, depth_predicate): """Unweave deeply advices in target.""" # if weaving has to be done if pointcut is None or pointcut(target): # do something only if target is intercepted if is_intercepted(target): _remove_advices(target=target, advices=advices, ctx=ctx) # search inside the target if depth > 0: # for an object or a class, weave on methods # get base ctx _base_ctx = None if ctx is not None: _base_ctx = base_ctx(ctx) for _, member in getmembers(target, depth_predicate): _unweave( target=member, advices=advices, pointcut=pointcut, depth=depth - 1, depth_predicate=depth_predicate, ctx=_base_ctx )
[ "def", "_unweave", "(", "target", ",", "advices", ",", "pointcut", ",", "ctx", ",", "depth", ",", "depth_predicate", ")", ":", "# if weaving has to be done", "if", "pointcut", "is", "None", "or", "pointcut", "(", "target", ")", ":", "# do something only if target is intercepted", "if", "is_intercepted", "(", "target", ")", ":", "_remove_advices", "(", "target", "=", "target", ",", "advices", "=", "advices", ",", "ctx", "=", "ctx", ")", "# search inside the target", "if", "depth", ">", "0", ":", "# for an object or a class, weave on methods", "# get base ctx", "_base_ctx", "=", "None", "if", "ctx", "is", "not", "None", ":", "_base_ctx", "=", "base_ctx", "(", "ctx", ")", "for", "_", ",", "member", "in", "getmembers", "(", "target", ",", "depth_predicate", ")", ":", "_unweave", "(", "target", "=", "member", ",", "advices", "=", "advices", ",", "pointcut", "=", "pointcut", ",", "depth", "=", "depth", "-", "1", ",", "depth_predicate", "=", "depth_predicate", ",", "ctx", "=", "_base_ctx", ")" ]
39.2
0.001245
def parse_table(tag): """ returns tuple of type ("class"/"func") and list of param strings. :param tag: :return: """ first = True table_header = None table_type = 'unknown' param_strings = [] thead = tag.find('thead', recursive=False) theads = None # list (items in <tr> row) of <th>/<tr> elements. if thead: theads = thead.find_all(["th", "td"]) # end if tbody = tag.find('tbody', recursive=False) if tbody: tbody_rows = tbody.find_all("tr") else: tbody_rows = tag.find_all("tr") # end if tbodys = [ # list (rows) of list (items in <tr> row) of <tr> elements. row.find_all(["td" ,"th"]) for row in tbody_rows ] if not thead: # so first row = header theads = tbody_rows[0] tbodys = tbody_rows[1:] # end if # TABLE HEADER found_columns = [] for column in theads: # Either (a) `<td><strong> ... </strong></td>` # or new (b) `<th> ... </th>` col = column.find("strong") if col: # (a) `<td><strong> ... </strong></td>` col_text = col.text else: # (b) `<th> ... </th>` col_text = column.text # end if found_columns.append(col_text) # end def # if TABLE is func for test_columns in func_fields: if found_columns == test_columns: table_header = test_columns table_type = 'func' break # end if # end for # if TABLE is class if not table_header: # only check if we don't have a result yet # search class now for test_columns in class_fields: if found_columns == test_columns: if table_header is not None: raise AssertionError("Table detected as func and class: {!r}".format(found_columns)) table_header = test_columns table_type = 'class' break # end if # end for # end if # TABLE is none of the above if not table_header: # we don't have a result yet raise AssertionError("Unknown table, {!r}".format(found_columns)) # end if # TABLE BODY for tds in tbodys: string = "\t".join([col.text for col in tds]) logger.debug("t: " + string) param_strings.append(string) pass # end for row return table_type, param_strings
[ "def", "parse_table", "(", "tag", ")", ":", "first", "=", "True", "table_header", "=", "None", "table_type", "=", "'unknown'", "param_strings", "=", "[", "]", "thead", "=", "tag", ".", "find", "(", "'thead'", ",", "recursive", "=", "False", ")", "theads", "=", "None", "# list (items in <tr> row) of <th>/<tr> elements.", "if", "thead", ":", "theads", "=", "thead", ".", "find_all", "(", "[", "\"th\"", ",", "\"td\"", "]", ")", "# end if", "tbody", "=", "tag", ".", "find", "(", "'tbody'", ",", "recursive", "=", "False", ")", "if", "tbody", ":", "tbody_rows", "=", "tbody", ".", "find_all", "(", "\"tr\"", ")", "else", ":", "tbody_rows", "=", "tag", ".", "find_all", "(", "\"tr\"", ")", "# end if", "tbodys", "=", "[", "# list (rows) of list (items in <tr> row) of <tr> elements.", "row", ".", "find_all", "(", "[", "\"td\"", ",", "\"th\"", "]", ")", "for", "row", "in", "tbody_rows", "]", "if", "not", "thead", ":", "# so first row = header", "theads", "=", "tbody_rows", "[", "0", "]", "tbodys", "=", "tbody_rows", "[", "1", ":", "]", "# end if", "# TABLE HEADER", "found_columns", "=", "[", "]", "for", "column", "in", "theads", ":", "# Either (a) `<td><strong> ... </strong></td>`", "# or new (b) `<th> ... </th>`", "col", "=", "column", ".", "find", "(", "\"strong\"", ")", "if", "col", ":", "# (a) `<td><strong> ... </strong></td>`", "col_text", "=", "col", ".", "text", "else", ":", "# (b) `<th> ... </th>`", "col_text", "=", "column", ".", "text", "# end if", "found_columns", ".", "append", "(", "col_text", ")", "# end def", "# if TABLE is func", "for", "test_columns", "in", "func_fields", ":", "if", "found_columns", "==", "test_columns", ":", "table_header", "=", "test_columns", "table_type", "=", "'func'", "break", "# end if", "# end for", "# if TABLE is class", "if", "not", "table_header", ":", "# only check if we don't have a result yet", "# search class now", "for", "test_columns", "in", "class_fields", ":", "if", "found_columns", "==", "test_columns", ":", "if", "table_header", "is", "not", "None", ":", "raise", "AssertionError", "(", "\"Table detected as func and class: {!r}\"", ".", "format", "(", "found_columns", ")", ")", "table_header", "=", "test_columns", "table_type", "=", "'class'", "break", "# end if", "# end for", "# end if", "# TABLE is none of the above", "if", "not", "table_header", ":", "# we don't have a result yet", "raise", "AssertionError", "(", "\"Unknown table, {!r}\"", ".", "format", "(", "found_columns", ")", ")", "# end if", "# TABLE BODY", "for", "tds", "in", "tbodys", ":", "string", "=", "\"\\t\"", ".", "join", "(", "[", "col", ".", "text", "for", "col", "in", "tds", "]", ")", "logger", ".", "debug", "(", "\"t: \"", "+", "string", ")", "param_strings", ".", "append", "(", "string", ")", "pass", "# end for row", "return", "table_type", ",", "param_strings" ]
28.130952
0.001635
def tasks(self): """Get the list of tasks""" self._rwlock.reader_acquire() tl = [v for v in self._tasks.values()] tl.sort(key=lambda x: x.task_id) self._rwlock.reader_release() return tl
[ "def", "tasks", "(", "self", ")", ":", "self", ".", "_rwlock", ".", "reader_acquire", "(", ")", "tl", "=", "[", "v", "for", "v", "in", "self", ".", "_tasks", ".", "values", "(", ")", "]", "tl", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "task_id", ")", "self", ".", "_rwlock", ".", "reader_release", "(", ")", "return", "tl" ]
25.333333
0.008475
def get_flash(self, format_ = "nl"): """ return a string representations of the flash """ flash = [self.flash.read(i) for i in range(self.flash.size)] return self._format_mem(flash, format_)
[ "def", "get_flash", "(", "self", ",", "format_", "=", "\"nl\"", ")", ":", "flash", "=", "[", "self", ".", "flash", ".", "read", "(", "i", ")", "for", "i", "in", "range", "(", "self", ".", "flash", ".", "size", ")", "]", "return", "self", ".", "_format_mem", "(", "flash", ",", "format_", ")" ]
32.666667
0.044776
async def set_key_metadata(wallet_handle: int, verkey: str, metadata: str) -> None: """ Creates keys pair and stores in the wallet. :param wallet_handle: Wallet handle (created by open_wallet). :param verkey: the key (verkey, key id) to store metadata. :param metadata: the meta information that will be store with the key. :return: Error code """ logger = logging.getLogger(__name__) logger.debug("set_key_metadata: >>> wallet_handle: %r, verkey: %r, metadata: %r", wallet_handle, verkey, metadata) if not hasattr(set_key_metadata, "cb"): logger.debug("set_key_metadata: Creating callback") set_key_metadata.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32)) c_wallet_handle = c_int32(wallet_handle) c_verkey = c_char_p(verkey.encode('utf-8')) c_metadata = c_char_p(metadata.encode('utf-8')) await do_call('indy_set_key_metadata', c_wallet_handle, c_verkey, c_metadata, set_key_metadata.cb) logger.debug("create_key: <<<")
[ "async", "def", "set_key_metadata", "(", "wallet_handle", ":", "int", ",", "verkey", ":", "str", ",", "metadata", ":", "str", ")", "->", "None", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"set_key_metadata: >>> wallet_handle: %r, verkey: %r, metadata: %r\"", ",", "wallet_handle", ",", "verkey", ",", "metadata", ")", "if", "not", "hasattr", "(", "set_key_metadata", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"set_key_metadata: Creating callback\"", ")", "set_key_metadata", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ")", ")", "c_wallet_handle", "=", "c_int32", "(", "wallet_handle", ")", "c_verkey", "=", "c_char_p", "(", "verkey", ".", "encode", "(", "'utf-8'", ")", ")", "c_metadata", "=", "c_char_p", "(", "metadata", ".", "encode", "(", "'utf-8'", ")", ")", "await", "do_call", "(", "'indy_set_key_metadata'", ",", "c_wallet_handle", ",", "c_verkey", ",", "c_metadata", ",", "set_key_metadata", ".", "cb", ")", "logger", ".", "debug", "(", "\"create_key: <<<\"", ")" ]
34.818182
0.001693
def dump_public_key(public_key, encoding='pem'): """ Serializes a public key object into a byte string :param public_key: An oscrypto.asymmetric.PublicKey or asn1crypto.keys.PublicKeyInfo object :param encoding: A unicode string of "pem" or "der" :return: A byte string of the encoded public key """ if encoding not in set(['pem', 'der']): raise ValueError(pretty_message( ''' encoding must be one of "pem", "der", not %s ''', repr(encoding) )) is_oscrypto = isinstance(public_key, PublicKey) if not isinstance(public_key, keys.PublicKeyInfo) and not is_oscrypto: raise TypeError(pretty_message( ''' public_key must be an instance of oscrypto.asymmetric.PublicKey or asn1crypto.keys.PublicKeyInfo, not %s ''', type_name(public_key) )) if is_oscrypto: public_key = public_key.asn1 output = public_key.dump() if encoding == 'pem': output = pem.armor('PUBLIC KEY', output) return output
[ "def", "dump_public_key", "(", "public_key", ",", "encoding", "=", "'pem'", ")", ":", "if", "encoding", "not", "in", "set", "(", "[", "'pem'", ",", "'der'", "]", ")", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n encoding must be one of \"pem\", \"der\", not %s\n '''", ",", "repr", "(", "encoding", ")", ")", ")", "is_oscrypto", "=", "isinstance", "(", "public_key", ",", "PublicKey", ")", "if", "not", "isinstance", "(", "public_key", ",", "keys", ".", "PublicKeyInfo", ")", "and", "not", "is_oscrypto", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n public_key must be an instance of oscrypto.asymmetric.PublicKey or\n asn1crypto.keys.PublicKeyInfo, not %s\n '''", ",", "type_name", "(", "public_key", ")", ")", ")", "if", "is_oscrypto", ":", "public_key", "=", "public_key", ".", "asn1", "output", "=", "public_key", ".", "dump", "(", ")", "if", "encoding", "==", "'pem'", ":", "output", "=", "pem", ".", "armor", "(", "'PUBLIC KEY'", ",", "output", ")", "return", "output" ]
27.794872
0.001783
def ignore_stops_before_now(self): """Ignore any stops received before this point""" self._sentinel_stop = object() self._q.put(self._sentinel_stop)
[ "def", "ignore_stops_before_now", "(", "self", ")", ":", "self", ".", "_sentinel_stop", "=", "object", "(", ")", "self", ".", "_q", ".", "put", "(", "self", ".", "_sentinel_stop", ")" ]
42.25
0.011628
def is_method(method, flags=METHOD_ALL): """ Determines whether the passed value is a method satisfying certain conditions: * Being instance method. * Being class method. * Being bound method. * Being unbound method. Flag check is considered or-wise. The default is to consider every option. :param method: :param flags: :return: """ if isinstance(method, types.UnboundMethodType): if flags & METHOD_CLASS and issubclass(method.im_class, type): return True if flags & METHOD_INSTANCE and not issubclass(method.im_class, type): return True if flags & METHOD_BOUND and method.im_self is not None: return True if flags & METHOD_UNBOUND and method.im_self is None: return True return False
[ "def", "is_method", "(", "method", ",", "flags", "=", "METHOD_ALL", ")", ":", "if", "isinstance", "(", "method", ",", "types", ".", "UnboundMethodType", ")", ":", "if", "flags", "&", "METHOD_CLASS", "and", "issubclass", "(", "method", ".", "im_class", ",", "type", ")", ":", "return", "True", "if", "flags", "&", "METHOD_INSTANCE", "and", "not", "issubclass", "(", "method", ".", "im_class", ",", "type", ")", ":", "return", "True", "if", "flags", "&", "METHOD_BOUND", "and", "method", ".", "im_self", "is", "not", "None", ":", "return", "True", "if", "flags", "&", "METHOD_UNBOUND", "and", "method", ".", "im_self", "is", "None", ":", "return", "True", "return", "False" ]
36.545455
0.002424
def save(self, request, resource=None, **kwargs): """Create a resource.""" resources = resource if isinstance(resource, list) else [resource] for obj in resources: obj.save() return resource
[ "def", "save", "(", "self", ",", "request", ",", "resource", "=", "None", ",", "*", "*", "kwargs", ")", ":", "resources", "=", "resource", "if", "isinstance", "(", "resource", ",", "list", ")", "else", "[", "resource", "]", "for", "obj", "in", "resources", ":", "obj", ".", "save", "(", ")", "return", "resource" ]
38.166667
0.008547
def delete_api_method(restApiId, resourcePath, httpMethod, region=None, key=None, keyid=None, profile=None): ''' Delete API method for a resource in the given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_method restApiId resourcePath httpMethod ''' try: resource = describe_api_resource(restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile).get('resource') if resource: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod) return {'deleted': True} return {'deleted': False, 'error': 'get API method failed: no such resource'} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "delete_api_method", "(", "restApiId", ",", "resourcePath", ",", "httpMethod", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "resource", "=", "describe_api_resource", "(", "restApiId", ",", "resourcePath", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", ".", "get", "(", "'resource'", ")", "if", "resource", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "conn", ".", "delete_method", "(", "restApiId", "=", "restApiId", ",", "resourceId", "=", "resource", "[", "'id'", "]", ",", "httpMethod", "=", "httpMethod", ")", "return", "{", "'deleted'", ":", "True", "}", "return", "{", "'deleted'", ":", "False", ",", "'error'", ":", "'get API method failed: no such resource'", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'deleted'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
43.52381
0.008565
def rotateInDeclination(v1, theta_deg): """Rotation is chosen so a rotation of 90 degrees from zenith ends up at ra=0, dec=0""" axis = np.array([0,-1,0]) return rotateAroundVector(v1, axis, theta_deg)
[ "def", "rotateInDeclination", "(", "v1", ",", "theta_deg", ")", ":", "axis", "=", "np", ".", "array", "(", "[", "0", ",", "-", "1", ",", "0", "]", ")", "return", "rotateAroundVector", "(", "v1", ",", "axis", ",", "theta_deg", ")" ]
42.4
0.013889
def update_lincs_proteins(): """Load the csv of LINCS protein metadata into a dict. Produces a dict keyed by HMS LINCS protein ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv. """ url = 'http://lincs.hms.harvard.edu/db/proteins/' prot_data = load_lincs_csv(url) prot_dict = {d['HMS LINCS ID']: d.copy() for d in prot_data} assert len(prot_dict) == len(prot_data), "We lost data." fname = os.path.join(path, 'lincs_proteins.json') with open(fname, 'w') as fh: json.dump(prot_dict, fh, indent=1)
[ "def", "update_lincs_proteins", "(", ")", ":", "url", "=", "'http://lincs.hms.harvard.edu/db/proteins/'", "prot_data", "=", "load_lincs_csv", "(", "url", ")", "prot_dict", "=", "{", "d", "[", "'HMS LINCS ID'", "]", ":", "d", ".", "copy", "(", ")", "for", "d", "in", "prot_data", "}", "assert", "len", "(", "prot_dict", ")", "==", "len", "(", "prot_data", ")", ",", "\"We lost data.\"", "fname", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'lincs_proteins.json'", ")", "with", "open", "(", "fname", ",", "'w'", ")", "as", "fh", ":", "json", ".", "dump", "(", "prot_dict", ",", "fh", ",", "indent", "=", "1", ")" ]
42.357143
0.00165
def pair_SAM_alignments_with_buffer( alignments, max_buffer_size=30000000, primary_only=False): '''Iterate over SAM aligments with buffer, position-sorted paired-end Args: alignments (iterator of SAM/BAM alignments): the alignments to wrap max_buffer_size (int): maxmal numer of alignments to keep in memory. primary_only (bool): for each read, consider only the primary line (SAM flag 0x900 = 0). The SAM specification requires one and only one of those for each read. Yields: 2-tuples with each pair of alignments. ''' almnt_buffer = {} ambiguous_pairing_counter = 0 for almnt in alignments: if not almnt.paired_end: raise ValueError( "Sequence of paired-end alignments expected, but got single-end alignment.") if almnt.pe_which == "unknown": raise ValueError( "Cannot process paired-end alignment found with 'unknown' 'pe_which' status.") # FIXME: almnt.not_primary_alignment currently means secondary if primary_only and (almnt.not_primary_alignment or almnt.supplementary): continue matekey = ( almnt.read.name, "second" if almnt.pe_which == "first" else "first", almnt.mate_start.chrom if almnt.mate_aligned else None, almnt.mate_start.pos if almnt.mate_aligned else None, almnt.iv.chrom if almnt.aligned else None, almnt.iv.start if almnt.aligned else None, -almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None) if matekey in almnt_buffer: if len(almnt_buffer[matekey]) == 1: mate = almnt_buffer[matekey][0] del almnt_buffer[matekey] else: mate = almnt_buffer[matekey].pop(0) if ambiguous_pairing_counter == 0: ambiguous_pairing_first_occurance = matekey ambiguous_pairing_counter += 1 if almnt.pe_which == "first": yield (almnt, mate) else: yield (mate, almnt) else: almntkey = ( almnt.read.name, almnt.pe_which, almnt.iv.chrom if almnt.aligned else None, almnt.iv.start if almnt.aligned else None, almnt.mate_start.chrom if almnt.mate_aligned else None, almnt.mate_start.pos if almnt.mate_aligned else None, almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None) if almntkey not in almnt_buffer: almnt_buffer[almntkey] = [almnt] else: almnt_buffer[almntkey].append(almnt) if len(almnt_buffer) > max_buffer_size: raise ValueError( "Maximum alignment buffer size exceeded while pairing SAM alignments.") if len(almnt_buffer) > 0: warnings.warn( "Mate records missing for %d records; first such record: %s." % (len(almnt_buffer), str(list(almnt_buffer.values())[0][0]))) for almnt_list in list(almnt_buffer.values()): for almnt in almnt_list: if almnt.pe_which == "first": yield (almnt, None) else: yield (None, almnt) if ambiguous_pairing_counter > 0: warnings.warn( "Mate pairing was ambiguous for %d records; mate key for first such record: %s." % (ambiguous_pairing_counter, str(ambiguous_pairing_first_occurance)))
[ "def", "pair_SAM_alignments_with_buffer", "(", "alignments", ",", "max_buffer_size", "=", "30000000", ",", "primary_only", "=", "False", ")", ":", "almnt_buffer", "=", "{", "}", "ambiguous_pairing_counter", "=", "0", "for", "almnt", "in", "alignments", ":", "if", "not", "almnt", ".", "paired_end", ":", "raise", "ValueError", "(", "\"Sequence of paired-end alignments expected, but got single-end alignment.\"", ")", "if", "almnt", ".", "pe_which", "==", "\"unknown\"", ":", "raise", "ValueError", "(", "\"Cannot process paired-end alignment found with 'unknown' 'pe_which' status.\"", ")", "# FIXME: almnt.not_primary_alignment currently means secondary", "if", "primary_only", "and", "(", "almnt", ".", "not_primary_alignment", "or", "almnt", ".", "supplementary", ")", ":", "continue", "matekey", "=", "(", "almnt", ".", "read", ".", "name", ",", "\"second\"", "if", "almnt", ".", "pe_which", "==", "\"first\"", "else", "\"first\"", ",", "almnt", ".", "mate_start", ".", "chrom", "if", "almnt", ".", "mate_aligned", "else", "None", ",", "almnt", ".", "mate_start", ".", "pos", "if", "almnt", ".", "mate_aligned", "else", "None", ",", "almnt", ".", "iv", ".", "chrom", "if", "almnt", ".", "aligned", "else", "None", ",", "almnt", ".", "iv", ".", "start", "if", "almnt", ".", "aligned", "else", "None", ",", "-", "almnt", ".", "inferred_insert_size", "if", "almnt", ".", "aligned", "and", "almnt", ".", "mate_aligned", "else", "None", ")", "if", "matekey", "in", "almnt_buffer", ":", "if", "len", "(", "almnt_buffer", "[", "matekey", "]", ")", "==", "1", ":", "mate", "=", "almnt_buffer", "[", "matekey", "]", "[", "0", "]", "del", "almnt_buffer", "[", "matekey", "]", "else", ":", "mate", "=", "almnt_buffer", "[", "matekey", "]", ".", "pop", "(", "0", ")", "if", "ambiguous_pairing_counter", "==", "0", ":", "ambiguous_pairing_first_occurance", "=", "matekey", "ambiguous_pairing_counter", "+=", "1", "if", "almnt", ".", "pe_which", "==", "\"first\"", ":", "yield", "(", "almnt", ",", "mate", ")", "else", ":", "yield", "(", "mate", ",", "almnt", ")", "else", ":", "almntkey", "=", "(", "almnt", ".", "read", ".", "name", ",", "almnt", ".", "pe_which", ",", "almnt", ".", "iv", ".", "chrom", "if", "almnt", ".", "aligned", "else", "None", ",", "almnt", ".", "iv", ".", "start", "if", "almnt", ".", "aligned", "else", "None", ",", "almnt", ".", "mate_start", ".", "chrom", "if", "almnt", ".", "mate_aligned", "else", "None", ",", "almnt", ".", "mate_start", ".", "pos", "if", "almnt", ".", "mate_aligned", "else", "None", ",", "almnt", ".", "inferred_insert_size", "if", "almnt", ".", "aligned", "and", "almnt", ".", "mate_aligned", "else", "None", ")", "if", "almntkey", "not", "in", "almnt_buffer", ":", "almnt_buffer", "[", "almntkey", "]", "=", "[", "almnt", "]", "else", ":", "almnt_buffer", "[", "almntkey", "]", ".", "append", "(", "almnt", ")", "if", "len", "(", "almnt_buffer", ")", ">", "max_buffer_size", ":", "raise", "ValueError", "(", "\"Maximum alignment buffer size exceeded while pairing SAM alignments.\"", ")", "if", "len", "(", "almnt_buffer", ")", ">", "0", ":", "warnings", ".", "warn", "(", "\"Mate records missing for %d records; first such record: %s.\"", "%", "(", "len", "(", "almnt_buffer", ")", ",", "str", "(", "list", "(", "almnt_buffer", ".", "values", "(", ")", ")", "[", "0", "]", "[", "0", "]", ")", ")", ")", "for", "almnt_list", "in", "list", "(", "almnt_buffer", ".", "values", "(", ")", ")", ":", "for", "almnt", "in", "almnt_list", ":", "if", "almnt", ".", "pe_which", "==", "\"first\"", ":", "yield", "(", "almnt", ",", "None", ")", "else", ":", "yield", "(", "None", ",", "almnt", ")", "if", "ambiguous_pairing_counter", ">", "0", ":", "warnings", ".", "warn", "(", "\"Mate pairing was ambiguous for %d records; mate key for first such record: %s.\"", "%", "(", "ambiguous_pairing_counter", ",", "str", "(", "ambiguous_pairing_first_occurance", ")", ")", ")" ]
43.024096
0.002464
def angle_factor(angle, ab, msrc, mrec): r"""Return the angle-dependent factor. The whole calculation in the wavenumber domain is only a function of the distance between the source and the receiver, it is independent of the angel. The angle-dependency is this factor, which can be applied to the corresponding parts in the wavenumber or in the frequency domain. The ``angle_factor`` corresponds to the sine and cosine-functions in Eqs 105-107, 111-116, 119-121, 123-128. This function is called from one of the Hankel functions in :mod:`transform`. Consult the modelling routines in :mod:`model` for a description of the input and output parameters. """ # 33/66 are completely symmetric and hence independent of angle if ab in [33, ]: return np.ones(angle.size) # Evaluation angle eval_angle = angle.copy() # Add pi if receiver is magnetic (reciprocity), but not if source is # electric, because then source and receiver are swapped, ME => EM: # G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z). if mrec and not msrc: eval_angle += np.pi # Define fct (cos/sin) and angles to be tested if ab in [11, 22, 15, 24, 13, 31, 26, 35]: fct = np.cos test_ang_1 = np.pi/2 test_ang_2 = 3*np.pi/2 else: fct = np.sin test_ang_1 = np.pi test_ang_2 = 2*np.pi if ab in [11, 22, 15, 24, 12, 21, 14, 25]: eval_angle *= 2 # Get factor factAng = fct(eval_angle) # Ensure cos([pi/2, 3pi/2]) and sin([pi, 2pi]) are zero (floating pt issue) factAng[np.isclose(np.abs(eval_angle), test_ang_1, 1e-10, 1e-14)] = 0 factAng[np.isclose(np.abs(eval_angle), test_ang_2, 1e-10, 1e-14)] = 0 return factAng
[ "def", "angle_factor", "(", "angle", ",", "ab", ",", "msrc", ",", "mrec", ")", ":", "# 33/66 are completely symmetric and hence independent of angle", "if", "ab", "in", "[", "33", ",", "]", ":", "return", "np", ".", "ones", "(", "angle", ".", "size", ")", "# Evaluation angle", "eval_angle", "=", "angle", ".", "copy", "(", ")", "# Add pi if receiver is magnetic (reciprocity), but not if source is", "# electric, because then source and receiver are swapped, ME => EM:", "# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z).", "if", "mrec", "and", "not", "msrc", ":", "eval_angle", "+=", "np", ".", "pi", "# Define fct (cos/sin) and angles to be tested", "if", "ab", "in", "[", "11", ",", "22", ",", "15", ",", "24", ",", "13", ",", "31", ",", "26", ",", "35", "]", ":", "fct", "=", "np", ".", "cos", "test_ang_1", "=", "np", ".", "pi", "/", "2", "test_ang_2", "=", "3", "*", "np", ".", "pi", "/", "2", "else", ":", "fct", "=", "np", ".", "sin", "test_ang_1", "=", "np", ".", "pi", "test_ang_2", "=", "2", "*", "np", ".", "pi", "if", "ab", "in", "[", "11", ",", "22", ",", "15", ",", "24", ",", "12", ",", "21", ",", "14", ",", "25", "]", ":", "eval_angle", "*=", "2", "# Get factor", "factAng", "=", "fct", "(", "eval_angle", ")", "# Ensure cos([pi/2, 3pi/2]) and sin([pi, 2pi]) are zero (floating pt issue)", "factAng", "[", "np", ".", "isclose", "(", "np", ".", "abs", "(", "eval_angle", ")", ",", "test_ang_1", ",", "1e-10", ",", "1e-14", ")", "]", "=", "0", "factAng", "[", "np", ".", "isclose", "(", "np", ".", "abs", "(", "eval_angle", ")", ",", "test_ang_2", ",", "1e-10", ",", "1e-14", ")", "]", "=", "0", "return", "factAng" ]
33.666667
0.000566
def main(argv=None): """ben-doc entry point""" arguments = cli_common(__doc__, argv=argv) campaign_path = arguments['CAMPAIGN-DIR'] driver = CampaignDriver(campaign_path, expandcampvars=False) with pushd(campaign_path): render( template=arguments['--template'], ostr=arguments['--output'], campaign=driver, ) if argv is not None: return driver
[ "def", "main", "(", "argv", "=", "None", ")", ":", "arguments", "=", "cli_common", "(", "__doc__", ",", "argv", "=", "argv", ")", "campaign_path", "=", "arguments", "[", "'CAMPAIGN-DIR'", "]", "driver", "=", "CampaignDriver", "(", "campaign_path", ",", "expandcampvars", "=", "False", ")", "with", "pushd", "(", "campaign_path", ")", ":", "render", "(", "template", "=", "arguments", "[", "'--template'", "]", ",", "ostr", "=", "arguments", "[", "'--output'", "]", ",", "campaign", "=", "driver", ",", ")", "if", "argv", "is", "not", "None", ":", "return", "driver" ]
31.923077
0.002342
def create_snapshot(self, systemId, snapshotSpecificationObject): """ Create snapshot for list of volumes :param systemID: Cluster ID :param snapshotSpecificationObject: Of class SnapshotSpecification :rtype: SnapshotGroupId """ self.conn.connection._check_login() #try: response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/System::", systemId, 'action/snapshotVolumes'), json=snapshotSpecificationObject.__to_dict__()) #except: # raise RuntimeError("create_snapshot_by_system_id() - Error communicating with ScaleIO gateway") return response
[ "def", "create_snapshot", "(", "self", ",", "systemId", ",", "snapshotSpecificationObject", ")", ":", "self", ".", "conn", ".", "connection", ".", "_check_login", "(", ")", "#try:", "response", "=", "self", ".", "conn", ".", "connection", ".", "_do_post", "(", "\"{}/{}{}/{}\"", ".", "format", "(", "self", ".", "conn", ".", "connection", ".", "_api_url", ",", "\"instances/System::\"", ",", "systemId", ",", "'action/snapshotVolumes'", ")", ",", "json", "=", "snapshotSpecificationObject", ".", "__to_dict__", "(", ")", ")", "#except:", "# raise RuntimeError(\"create_snapshot_by_system_id() - Error communicating with ScaleIO gateway\")", "return", "response" ]
52.307692
0.010116
def get_digests(self): """ Returns a map of images to their digests """ try: pulp = get_manifests_in_pulp_repository(self.workflow) except KeyError: pulp = None digests = {} # repository -> digests for registry in self.workflow.push_conf.docker_registries: for image in self.workflow.tag_conf.images: image_str = image.to_str() if image_str in registry.digests: image_digests = registry.digests[image_str] if pulp is None: digest_list = [image_digests.default] else: # If Pulp is enabled, only report digests that # were synced into Pulp. This may not be all # of them, depending on whether Pulp has # schema 2 support. digest_list = [digest for digest in (image_digests.v1, image_digests.v2) if digest in pulp] digests[image.to_str(registry=False)] = digest_list return digests
[ "def", "get_digests", "(", "self", ")", ":", "try", ":", "pulp", "=", "get_manifests_in_pulp_repository", "(", "self", ".", "workflow", ")", "except", "KeyError", ":", "pulp", "=", "None", "digests", "=", "{", "}", "# repository -> digests", "for", "registry", "in", "self", ".", "workflow", ".", "push_conf", ".", "docker_registries", ":", "for", "image", "in", "self", ".", "workflow", ".", "tag_conf", ".", "images", ":", "image_str", "=", "image", ".", "to_str", "(", ")", "if", "image_str", "in", "registry", ".", "digests", ":", "image_digests", "=", "registry", ".", "digests", "[", "image_str", "]", "if", "pulp", "is", "None", ":", "digest_list", "=", "[", "image_digests", ".", "default", "]", "else", ":", "# If Pulp is enabled, only report digests that", "# were synced into Pulp. This may not be all", "# of them, depending on whether Pulp has", "# schema 2 support.", "digest_list", "=", "[", "digest", "for", "digest", "in", "(", "image_digests", ".", "v1", ",", "image_digests", ".", "v2", ")", "if", "digest", "in", "pulp", "]", "digests", "[", "image", ".", "to_str", "(", "registry", "=", "False", ")", "]", "=", "digest_list", "return", "digests" ]
40.333333
0.001614
def flatten_dict(dct, separator='-->', allowed_types=[int, float, bool]): """Returns a list of string identifiers for each element in dct. Recursively scans through dct and finds every element whose type is in allowed_types and adds a string indentifier for it. eg: dct = { 'a': 'a string', 'b': { 'c': 1.0, 'd': True } } flatten_dict(dct) would return ['a', 'b-->c', 'b-->d'] """ flat_list = [] for key in sorted(dct): if key[:2] == '__': continue key_type = type(dct[key]) if key_type in allowed_types: flat_list.append(str(key)) elif key_type is dict: sub_list = flatten_dict(dct[key]) sub_list = [str(key) + separator + sl for sl in sub_list] flat_list += sub_list return flat_list
[ "def", "flatten_dict", "(", "dct", ",", "separator", "=", "'-->'", ",", "allowed_types", "=", "[", "int", ",", "float", ",", "bool", "]", ")", ":", "flat_list", "=", "[", "]", "for", "key", "in", "sorted", "(", "dct", ")", ":", "if", "key", "[", ":", "2", "]", "==", "'__'", ":", "continue", "key_type", "=", "type", "(", "dct", "[", "key", "]", ")", "if", "key_type", "in", "allowed_types", ":", "flat_list", ".", "append", "(", "str", "(", "key", ")", ")", "elif", "key_type", "is", "dict", ":", "sub_list", "=", "flatten_dict", "(", "dct", "[", "key", "]", ")", "sub_list", "=", "[", "str", "(", "key", ")", "+", "separator", "+", "sl", "for", "sl", "in", "sub_list", "]", "flat_list", "+=", "sub_list", "return", "flat_list" ]
29.1
0.001109
def connect_input(self, spec_name, node, node_input, format=None, **kwargs): # @ReservedAssignment @IgnorePep8 """ Connects a study fileset_spec as an input to the provided node Parameters ---------- spec_name : str Name of the study data spec (or one of the IDs from the iterator nodes, 'subject_id' or 'visit_id') to connect to the node node : arcana.Node The node to connect the input to node_input : str Name of the input on the node to connect the fileset spec to format : FileFormat | None The file format the input is expected in. If it differs from the format in data spec or of study input then an implicit conversion is performed. If None the file format in the data spec is assumed """ if spec_name in self.study.ITERFIELDS: self._iterator_conns[spec_name].append((node, node_input, format)) else: name = self._map_name(spec_name, self._input_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed input '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) self._input_conns[name].append((node, node_input, format, kwargs))
[ "def", "connect_input", "(", "self", ",", "spec_name", ",", "node", ",", "node_input", ",", "format", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# @ReservedAssignment @IgnorePep8", "if", "spec_name", "in", "self", ".", "study", ".", "ITERFIELDS", ":", "self", ".", "_iterator_conns", "[", "spec_name", "]", ".", "append", "(", "(", "node", ",", "node_input", ",", "format", ")", ")", "else", ":", "name", "=", "self", ".", "_map_name", "(", "spec_name", ",", "self", ".", "_input_map", ")", "if", "name", "not", "in", "self", ".", "study", ".", "data_spec_names", "(", ")", ":", "raise", "ArcanaDesignError", "(", "\"Proposed input '{}' to {} is not a valid spec name ('{}')\"", ".", "format", "(", "name", ",", "self", ".", "_error_msg_loc", ",", "\"', '\"", ".", "join", "(", "self", ".", "study", ".", "data_spec_names", "(", ")", ")", ")", ")", "self", ".", "_input_conns", "[", "name", "]", ".", "append", "(", "(", "node", ",", "node_input", ",", "format", ",", "kwargs", ")", ")" ]
49.103448
0.002066
def push(self, url, title=''): """ Pushes the url into the history stack at the current index. :param url | <str> :return <bool> | changed """ # ignore refreshes of the top level if self.currentUrl() == url or self._blockStack: return False self._blockStack = True self._stack = self._stack[:self._index+1] self._stack.append((nativestring(url), nativestring(title))) over = len(self._stack) - self.maximum() if over > 0: self._stack = self._stack[over:] self._index = len(self._stack) - 1 self.canGoBackChanged.emit(self.canGoBack()) self.canGoForwardChanged.emit(self.canGoForward()) self._blockStack = False return True
[ "def", "push", "(", "self", ",", "url", ",", "title", "=", "''", ")", ":", "# ignore refreshes of the top level", "if", "self", ".", "currentUrl", "(", ")", "==", "url", "or", "self", ".", "_blockStack", ":", "return", "False", "self", ".", "_blockStack", "=", "True", "self", ".", "_stack", "=", "self", ".", "_stack", "[", ":", "self", ".", "_index", "+", "1", "]", "self", ".", "_stack", ".", "append", "(", "(", "nativestring", "(", "url", ")", ",", "nativestring", "(", "title", ")", ")", ")", "over", "=", "len", "(", "self", ".", "_stack", ")", "-", "self", ".", "maximum", "(", ")", "if", "over", ">", "0", ":", "self", ".", "_stack", "=", "self", ".", "_stack", "[", "over", ":", "]", "self", ".", "_index", "=", "len", "(", "self", ".", "_stack", ")", "-", "1", "self", ".", "canGoBackChanged", ".", "emit", "(", "self", ".", "canGoBack", "(", ")", ")", "self", ".", "canGoForwardChanged", ".", "emit", "(", "self", ".", "canGoForward", "(", ")", ")", "self", ".", "_blockStack", "=", "False", "return", "True" ]
30.357143
0.011403
def get_option(file_name, section, option, separator='='): ''' Get value of a key from a section in an ini file. Returns ``None`` if no matching key was found. API Example: .. code-block:: python import salt sc = salt.client.get_local_client() sc.cmd('target', 'ini.get_option', [path_to_ini_file, section_name, option]) CLI Example: .. code-block:: bash salt '*' ini.get_option /path/to/ini section_name option_name ''' inifile = _Ini.get_ini_file(file_name, separator=separator) if section: try: return inifile.get(section, {}).get(option, None) except AttributeError: return None else: return inifile.get(option, None)
[ "def", "get_option", "(", "file_name", ",", "section", ",", "option", ",", "separator", "=", "'='", ")", ":", "inifile", "=", "_Ini", ".", "get_ini_file", "(", "file_name", ",", "separator", "=", "separator", ")", "if", "section", ":", "try", ":", "return", "inifile", ".", "get", "(", "section", ",", "{", "}", ")", ".", "get", "(", "option", ",", "None", ")", "except", "AttributeError", ":", "return", "None", "else", ":", "return", "inifile", ".", "get", "(", "option", ",", "None", ")" ]
26.392857
0.001305
def simxLoadUI(clientID, uiPathAndName, options, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' count = ct.c_int() uiHandles = ct.POINTER(ct.c_int)() if (sys.version_info[0] == 3) and (type(uiPathAndName) is str): uiPathAndName=uiPathAndName.encode('utf-8') ret = c_LoadUI(clientID, uiPathAndName, options, ct.byref(count), ct.byref(uiHandles), operationMode) handles = [] if ret == 0: for i in range(count.value): handles.append(uiHandles[i]) #free C buffers c_ReleaseBuffer(uiHandles) return ret, handles
[ "def", "simxLoadUI", "(", "clientID", ",", "uiPathAndName", ",", "options", ",", "operationMode", ")", ":", "count", "=", "ct", ".", "c_int", "(", ")", "uiHandles", "=", "ct", ".", "POINTER", "(", "ct", ".", "c_int", ")", "(", ")", "if", "(", "sys", ".", "version_info", "[", "0", "]", "==", "3", ")", "and", "(", "type", "(", "uiPathAndName", ")", "is", "str", ")", ":", "uiPathAndName", "=", "uiPathAndName", ".", "encode", "(", "'utf-8'", ")", "ret", "=", "c_LoadUI", "(", "clientID", ",", "uiPathAndName", ",", "options", ",", "ct", ".", "byref", "(", "count", ")", ",", "ct", ".", "byref", "(", "uiHandles", ")", ",", "operationMode", ")", "handles", "=", "[", "]", "if", "ret", "==", "0", ":", "for", "i", "in", "range", "(", "count", ".", "value", ")", ":", "handles", ".", "append", "(", "uiHandles", "[", "i", "]", ")", "#free C buffers", "c_ReleaseBuffer", "(", "uiHandles", ")", "return", "ret", ",", "handles" ]
33.789474
0.009091
def resolvePublic(self, pubID): """Try to lookup the catalog local reference associated to a public ID in that catalog """ ret = libxml2mod.xmlACatalogResolvePublic(self._o, pubID) return ret
[ "def", "resolvePublic", "(", "self", ",", "pubID", ")", ":", "ret", "=", "libxml2mod", ".", "xmlACatalogResolvePublic", "(", "self", ".", "_o", ",", "pubID", ")", "return", "ret" ]
44.4
0.00885
def _command_list(self): """ build the command list """ cmd = [self.params.binary, "-f", str(self.params.f), "-T", str(self.params.T), "-m", str(self.params.m), "-N", str(self.params.N), "-x", str(self.params.x), "-p", str(self.params.p), "-n", str(self.params.n), "-w", str(self.params.w), "-s", str(self.params.s), ] ## add ougroups if self.params.o: cmd += ["-o"] cmd += [",".join(self.params.o)] return cmd
[ "def", "_command_list", "(", "self", ")", ":", "cmd", "=", "[", "self", ".", "params", ".", "binary", ",", "\"-f\"", ",", "str", "(", "self", ".", "params", ".", "f", ")", ",", "\"-T\"", ",", "str", "(", "self", ".", "params", ".", "T", ")", ",", "\"-m\"", ",", "str", "(", "self", ".", "params", ".", "m", ")", ",", "\"-N\"", ",", "str", "(", "self", ".", "params", ".", "N", ")", ",", "\"-x\"", ",", "str", "(", "self", ".", "params", ".", "x", ")", ",", "\"-p\"", ",", "str", "(", "self", ".", "params", ".", "p", ")", ",", "\"-n\"", ",", "str", "(", "self", ".", "params", ".", "n", ")", ",", "\"-w\"", ",", "str", "(", "self", ".", "params", ".", "w", ")", ",", "\"-s\"", ",", "str", "(", "self", ".", "params", ".", "s", ")", ",", "]", "## add ougroups", "if", "self", ".", "params", ".", "o", ":", "cmd", "+=", "[", "\"-o\"", "]", "cmd", "+=", "[", "\",\"", ".", "join", "(", "self", ".", "params", ".", "o", ")", "]", "return", "cmd" ]
34.388889
0.011006
def somethingFound(self,data,mode="phonefy"): ''' Verifying if something was found. Note that this method needed to be rewritten as in Spoj we need to look for a text which APPEARS instead of looking for a text that does NOT appear. :param data: Data where the self.notFoundText will be searched. :param mode: Mode to be executed. :return: Returns True if exists. ''' #try: for text in self.notFoundText[mode]: if text in data: # This is the change with regards to the standard behaviour! return True return False
[ "def", "somethingFound", "(", "self", ",", "data", ",", "mode", "=", "\"phonefy\"", ")", ":", "#try:", "for", "text", "in", "self", ".", "notFoundText", "[", "mode", "]", ":", "if", "text", "in", "data", ":", "# This is the change with regards to the standard behaviour!", "return", "True", "return", "False" ]
45.066667
0.014493
def _GetTripIndex(self, schedule=None): """Return a list of (trip, index). trip: a Trip object index: an offset in trip.GetStopTimes() """ trip_index = [] for trip, sequence in self._GetTripSequence(schedule): for index, st in enumerate(trip.GetStopTimes()): if st.stop_sequence == sequence: trip_index.append((trip, index)) break else: raise RuntimeError("stop_sequence %d not found in trip_id %s" % sequence, trip.trip_id) return trip_index
[ "def", "_GetTripIndex", "(", "self", ",", "schedule", "=", "None", ")", ":", "trip_index", "=", "[", "]", "for", "trip", ",", "sequence", "in", "self", ".", "_GetTripSequence", "(", "schedule", ")", ":", "for", "index", ",", "st", "in", "enumerate", "(", "trip", ".", "GetStopTimes", "(", ")", ")", ":", "if", "st", ".", "stop_sequence", "==", "sequence", ":", "trip_index", ".", "append", "(", "(", "trip", ",", "index", ")", ")", "break", "else", ":", "raise", "RuntimeError", "(", "\"stop_sequence %d not found in trip_id %s\"", "%", "sequence", ",", "trip", ".", "trip_id", ")", "return", "trip_index" ]
33.1875
0.009158
def anim(self, start=0, stop=None, fps=30): """ Method to return a matplotlib animation. The start and stop frames may be specified as well as the fps. """ figure = self.state or self.initialize_plot() anim = animation.FuncAnimation(figure, self.update_frame, frames=self.keys, interval = 1000.0/fps) # Close the figure handle if self._close_figures: plt.close(figure) return anim
[ "def", "anim", "(", "self", ",", "start", "=", "0", ",", "stop", "=", "None", ",", "fps", "=", "30", ")", ":", "figure", "=", "self", ".", "state", "or", "self", ".", "initialize_plot", "(", ")", "anim", "=", "animation", ".", "FuncAnimation", "(", "figure", ",", "self", ".", "update_frame", ",", "frames", "=", "self", ".", "keys", ",", "interval", "=", "1000.0", "/", "fps", ")", "# Close the figure handle", "if", "self", ".", "_close_figures", ":", "plt", ".", "close", "(", "figure", ")", "return", "anim" ]
43.25
0.009434
def report(self, value): """ Setter for **self.__report** attribute. :param value: Attribute value. :type value: bool """ if value is not None: assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("report", value) self.__report = value
[ "def", "report", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "bool", ",", "\"'{0}' attribute: '{1}' type is not 'bool'!\"", ".", "format", "(", "\"report\"", ",", "value", ")", "self", ".", "__report", "=", "value" ]
29.272727
0.009036
def _add_input_state(self, node, input_state): """ Add the input state to all successors of the given node. :param node: The node whose successors' input states will be touched. :param input_state: The state that will be added to successors of the node. :return: None """ successors = self._graph_visitor.successors(node) for succ in successors: if succ in self._state_map: self._state_map[succ] = self._merge_states(succ, *([ self._state_map[succ], input_state ])) else: self._state_map[succ] = input_state
[ "def", "_add_input_state", "(", "self", ",", "node", ",", "input_state", ")", ":", "successors", "=", "self", ".", "_graph_visitor", ".", "successors", "(", "node", ")", "for", "succ", "in", "successors", ":", "if", "succ", "in", "self", ".", "_state_map", ":", "self", ".", "_state_map", "[", "succ", "]", "=", "self", ".", "_merge_states", "(", "succ", ",", "*", "(", "[", "self", ".", "_state_map", "[", "succ", "]", ",", "input_state", "]", ")", ")", "else", ":", "self", ".", "_state_map", "[", "succ", "]", "=", "input_state" ]
39.5625
0.010802
async def delete(self, _id=None): """Delete entry from database table. Accepts id. delete(id) => 1 (if exists) delete(id) => {"error":404, "reason":"Not found"} (if does not exist) delete() => {"error":400, "reason":"Missed required fields"} """ if not _id: return {"error":400, "reason":"Missed required fields"} document = await self.collection.find_one({"id": _id}) if not document: return {"error":404, "reason":"Not found"} deleted_count = await self.collection.delete_one( {"id": _id}).deleted_count return deleted_count
[ "async", "def", "delete", "(", "self", ",", "_id", "=", "None", ")", ":", "if", "not", "_id", ":", "return", "{", "\"error\"", ":", "400", ",", "\"reason\"", ":", "\"Missed required fields\"", "}", "document", "=", "await", "self", ".", "collection", ".", "find_one", "(", "{", "\"id\"", ":", "_id", "}", ")", "if", "not", "document", ":", "return", "{", "\"error\"", ":", "404", ",", "\"reason\"", ":", "\"Not found\"", "}", "deleted_count", "=", "await", "self", ".", "collection", ".", "delete_one", "(", "{", "\"id\"", ":", "_id", "}", ")", ".", "deleted_count", "return", "deleted_count" ]
26.52381
0.045061
def _replace_labels(doc): """Really hacky find-and-replace method that modifies one of the sklearn docstrings to change the semantics of labels_ for the subclasses""" lines = doc.splitlines() labelstart, labelend = None, None foundattributes = False for i, line in enumerate(lines): stripped = line.strip() if stripped == 'Attributes': foundattributes = True if foundattributes and not labelstart and stripped.startswith('labels_'): labelstart = len('\n'.join(lines[:i])) + 1 if labelstart and not labelend and stripped == '': labelend = len('\n'.join(lines[:i + 1])) if labelstart is None or labelend is None: return doc replace = '\n'.join([ ' labels_ : list of arrays, each of shape [sequence_length, ]', ' The label of each point is an integer in [0, n_clusters).', '', ]) return doc[:labelstart] + replace + doc[labelend:]
[ "def", "_replace_labels", "(", "doc", ")", ":", "lines", "=", "doc", ".", "splitlines", "(", ")", "labelstart", ",", "labelend", "=", "None", ",", "None", "foundattributes", "=", "False", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "stripped", "=", "line", ".", "strip", "(", ")", "if", "stripped", "==", "'Attributes'", ":", "foundattributes", "=", "True", "if", "foundattributes", "and", "not", "labelstart", "and", "stripped", ".", "startswith", "(", "'labels_'", ")", ":", "labelstart", "=", "len", "(", "'\\n'", ".", "join", "(", "lines", "[", ":", "i", "]", ")", ")", "+", "1", "if", "labelstart", "and", "not", "labelend", "and", "stripped", "==", "''", ":", "labelend", "=", "len", "(", "'\\n'", ".", "join", "(", "lines", "[", ":", "i", "+", "1", "]", ")", ")", "if", "labelstart", "is", "None", "or", "labelend", "is", "None", ":", "return", "doc", "replace", "=", "'\\n'", ".", "join", "(", "[", "' labels_ : list of arrays, each of shape [sequence_length, ]'", ",", "' The label of each point is an integer in [0, n_clusters).'", ",", "''", ",", "]", ")", "return", "doc", "[", ":", "labelstart", "]", "+", "replace", "+", "doc", "[", "labelend", ":", "]" ]
39.833333
0.002043
def makeBaudRatePacket(ID, rate): """ Set baud rate of servo. in: rate - 0: 9600, 1:57600, 2:115200, 3:1Mbps out: write packet """ if rate not in [0, 1, 2, 3]: raise Exception('Packet.makeBaudRatePacket: wrong rate {}'.format(rate)) pkt = makeWritePacket(ID, xl320.XL320_BAUD_RATE, [rate]) return pkt
[ "def", "makeBaudRatePacket", "(", "ID", ",", "rate", ")", ":", "if", "rate", "not", "in", "[", "0", ",", "1", ",", "2", ",", "3", "]", ":", "raise", "Exception", "(", "'Packet.makeBaudRatePacket: wrong rate {}'", ".", "format", "(", "rate", ")", ")", "pkt", "=", "makeWritePacket", "(", "ID", ",", "xl320", ".", "XL320_BAUD_RATE", ",", "[", "rate", "]", ")", "return", "pkt" ]
27.363636
0.032154
def goto_line(self, line, column=0, end_column=0, move=True, word=''): """ Moves the text cursor to the specified position. :param line: Number of the line to go to (0 based) :param column: Optional column number. Default is 0 (start of line). :param move: True to move the cursor. False will return the cursor without setting it on the editor. :param word: Highlight the word, when moving to the line. :return: The new text cursor :rtype: QtGui.QTextCursor """ line = min(line, self.line_count()) text_cursor = self._move_cursor_to(line) if column: text_cursor.movePosition(text_cursor.Right, text_cursor.MoveAnchor, column) if end_column: text_cursor.movePosition(text_cursor.Right, text_cursor.KeepAnchor, end_column) if move: block = text_cursor.block() self.unfold_if_colapsed(block) self._editor.setTextCursor(text_cursor) if self._editor.isVisible(): self._editor.centerCursor() else: self._editor.focus_in.connect( self._editor.center_cursor_on_next_focus) if word and to_text_string(word) in to_text_string(block.text()): self._editor.find(word, QTextDocument.FindCaseSensitively) return text_cursor
[ "def", "goto_line", "(", "self", ",", "line", ",", "column", "=", "0", ",", "end_column", "=", "0", ",", "move", "=", "True", ",", "word", "=", "''", ")", ":", "line", "=", "min", "(", "line", ",", "self", ".", "line_count", "(", ")", ")", "text_cursor", "=", "self", ".", "_move_cursor_to", "(", "line", ")", "if", "column", ":", "text_cursor", ".", "movePosition", "(", "text_cursor", ".", "Right", ",", "text_cursor", ".", "MoveAnchor", ",", "column", ")", "if", "end_column", ":", "text_cursor", ".", "movePosition", "(", "text_cursor", ".", "Right", ",", "text_cursor", ".", "KeepAnchor", ",", "end_column", ")", "if", "move", ":", "block", "=", "text_cursor", ".", "block", "(", ")", "self", ".", "unfold_if_colapsed", "(", "block", ")", "self", ".", "_editor", ".", "setTextCursor", "(", "text_cursor", ")", "if", "self", ".", "_editor", ".", "isVisible", "(", ")", ":", "self", ".", "_editor", ".", "centerCursor", "(", ")", "else", ":", "self", ".", "_editor", ".", "focus_in", ".", "connect", "(", "self", ".", "_editor", ".", "center_cursor_on_next_focus", ")", "if", "word", "and", "to_text_string", "(", "word", ")", "in", "to_text_string", "(", "block", ".", "text", "(", ")", ")", ":", "self", ".", "_editor", ".", "find", "(", "word", ",", "QTextDocument", ".", "FindCaseSensitively", ")", "return", "text_cursor" ]
44.151515
0.001343
def yticks(self): """Compute the yticks labels of this grid_stack, used for plotting the y-axis ticks when visualizing an image \ """ return np.linspace(np.amin(self.grid_stack.regular[:, 0]), np.amax(self.grid_stack.regular[:, 0]), 4)
[ "def", "yticks", "(", "self", ")", ":", "return", "np", ".", "linspace", "(", "np", ".", "amin", "(", "self", ".", "grid_stack", ".", "regular", "[", ":", ",", "0", "]", ")", ",", "np", ".", "amax", "(", "self", ".", "grid_stack", ".", "regular", "[", ":", ",", "0", "]", ")", ",", "4", ")" ]
64
0.015444
def load(): """ Loads the built-in operators into the global test engine. """ for operator in operators: module, symbols = operator[0], operator[1:] path = 'grappa.operators.{}'.format(module) # Dynamically import modules operator = __import__(path, None, None, symbols) # Register operators in the test engine for symbol in symbols: Engine.register(getattr(operator, symbol))
[ "def", "load", "(", ")", ":", "for", "operator", "in", "operators", ":", "module", ",", "symbols", "=", "operator", "[", "0", "]", ",", "operator", "[", "1", ":", "]", "path", "=", "'grappa.operators.{}'", ".", "format", "(", "module", ")", "# Dynamically import modules", "operator", "=", "__import__", "(", "path", ",", "None", ",", "None", ",", "symbols", ")", "# Register operators in the test engine", "for", "symbol", "in", "symbols", ":", "Engine", ".", "register", "(", "getattr", "(", "operator", ",", "symbol", ")", ")" ]
31.5
0.002203
def __deactivate_recipes(self, plugin, *args, **kwargs): """ Deactivates/unregisters all recipes of the current plugin, if this plugin gets deactivated. """ recipes = self.get() for recipe in recipes.keys(): self.unregister(recipe)
[ "def", "__deactivate_recipes", "(", "self", ",", "plugin", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "recipes", "=", "self", ".", "get", "(", ")", "for", "recipe", "in", "recipes", ".", "keys", "(", ")", ":", "self", ".", "unregister", "(", "recipe", ")" ]
39.571429
0.010601