text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def skip_cycles(self) -> int: """The number of cycles dedicated to skips.""" return sum((int(re.sub(r'\D', '', op)) for op in self.skip_tokens))
[ "def", "skip_cycles", "(", "self", ")", "->", "int", ":", "return", "sum", "(", "(", "int", "(", "re", ".", "sub", "(", "r'\\D'", ",", "''", ",", "op", ")", ")", "for", "op", "in", "self", ".", "skip_tokens", ")", ")" ]
52.666667
0.0125
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 17x17 resnet block.""" with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7') tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
[ "def", "block17", "(", "net", ",", "scale", "=", "1.0", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ",", "scope", "=", "None", ",", "reuse", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'Block17'", ",", "[", "net", "]", ",", "reuse", "=", "reuse", ")", ":", "with", "tf", ".", "variable_scope", "(", "'Branch_0'", ")", ":", "tower_conv", "=", "slim", ".", "conv2d", "(", "net", ",", "192", ",", "1", ",", "scope", "=", "'Conv2d_1x1'", ")", "with", "tf", ".", "variable_scope", "(", "'Branch_1'", ")", ":", "tower_conv1_0", "=", "slim", ".", "conv2d", "(", "net", ",", "128", ",", "1", ",", "scope", "=", "'Conv2d_0a_1x1'", ")", "tower_conv1_1", "=", "slim", ".", "conv2d", "(", "tower_conv1_0", ",", "160", ",", "[", "1", ",", "7", "]", ",", "scope", "=", "'Conv2d_0b_1x7'", ")", "tower_conv1_2", "=", "slim", ".", "conv2d", "(", "tower_conv1_1", ",", "192", ",", "[", "7", ",", "1", "]", ",", "scope", "=", "'Conv2d_0c_7x1'", ")", "mixed", "=", "tf", ".", "concat", "(", "axis", "=", "3", ",", "values", "=", "[", "tower_conv", ",", "tower_conv1_2", "]", ")", "up", "=", "slim", ".", "conv2d", "(", "mixed", ",", "net", ".", "get_shape", "(", ")", "[", "3", "]", ",", "1", ",", "normalizer_fn", "=", "None", ",", "activation_fn", "=", "None", ",", "scope", "=", "'Conv2d_1x1'", ")", "net", "+=", "scale", "*", "up", "if", "activation_fn", ":", "net", "=", "activation_fn", "(", "net", ")", "return", "net" ]
50.111111
0.009793
def lognormcdf(x, mu, tau): """Log-normal cumulative density function""" x = np.atleast_1d(x) return np.array( [0.5 * (1 - flib.derf(-(np.sqrt(tau / 2)) * (np.log(y) - mu))) for y in x])
[ "def", "lognormcdf", "(", "x", ",", "mu", ",", "tau", ")", ":", "x", "=", "np", ".", "atleast_1d", "(", "x", ")", "return", "np", ".", "array", "(", "[", "0.5", "*", "(", "1", "-", "flib", ".", "derf", "(", "-", "(", "np", ".", "sqrt", "(", "tau", "/", "2", ")", ")", "*", "(", "np", ".", "log", "(", "y", ")", "-", "mu", ")", ")", ")", "for", "y", "in", "x", "]", ")" ]
40.4
0.009709
def magfit(logfile): '''find best magnetometer offset fit to a log file''' print("Processing log %s" % filename) mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps) flying = False gps_heading = 0.0 data = [] # get the current mag offsets m = mlog.recv_match(type='SENSOR_OFFSETS',condition=args.condition) offsets = vec3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z) attitude = mlog.recv_match(type='ATTITUDE',condition=args.condition) # now gather all the data while True: m = mlog.recv_match(condition=args.condition) if m is None: break if m.get_type() == "GPS_RAW": # flying if groundspeed more than 5 m/s flying = (m.v > args.minspeed and m.fix_type == 2) gps_heading = m.hdg if m.get_type() == "GPS_RAW_INT": # flying if groundspeed more than 5 m/s flying = (m.vel/100 > args.minspeed and m.fix_type == 3) gps_heading = m.cog/100 if m.get_type() == "ATTITUDE": attitude = m if m.get_type() == "SENSOR_OFFSETS": # update current offsets offsets = vec3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z) if not flying: continue if m.get_type() == "RAW_IMU": data.append((m.xmag - offsets.x, m.ymag - offsets.y, m.zmag - offsets.z, attitude.roll, attitude.pitch, gps_heading)) print("Extracted %u data points" % len(data)) print("Current offsets: %s" % offsets) ofs2 = fit_data(data) print("Declination estimate: %.1f" % ofs2[-1]) new_offsets = vec3(ofs2[0], ofs2[1], ofs2[2]) a = [[ofs2[3], ofs2[4], ofs2[5]], [ofs2[6], ofs2[7], ofs2[8]], [ofs2[9], ofs2[10], ofs2[11]]] print(a) print("New offsets : %s" % new_offsets)
[ "def", "magfit", "(", "logfile", ")", ":", "print", "(", "\"Processing log %s\"", "%", "filename", ")", "mlog", "=", "mavutil", ".", "mavlink_connection", "(", "filename", ",", "notimestamps", "=", "args", ".", "notimestamps", ")", "flying", "=", "False", "gps_heading", "=", "0.0", "data", "=", "[", "]", "# get the current mag offsets", "m", "=", "mlog", ".", "recv_match", "(", "type", "=", "'SENSOR_OFFSETS'", ",", "condition", "=", "args", ".", "condition", ")", "offsets", "=", "vec3", "(", "m", ".", "mag_ofs_x", ",", "m", ".", "mag_ofs_y", ",", "m", ".", "mag_ofs_z", ")", "attitude", "=", "mlog", ".", "recv_match", "(", "type", "=", "'ATTITUDE'", ",", "condition", "=", "args", ".", "condition", ")", "# now gather all the data", "while", "True", ":", "m", "=", "mlog", ".", "recv_match", "(", "condition", "=", "args", ".", "condition", ")", "if", "m", "is", "None", ":", "break", "if", "m", ".", "get_type", "(", ")", "==", "\"GPS_RAW\"", ":", "# flying if groundspeed more than 5 m/s", "flying", "=", "(", "m", ".", "v", ">", "args", ".", "minspeed", "and", "m", ".", "fix_type", "==", "2", ")", "gps_heading", "=", "m", ".", "hdg", "if", "m", ".", "get_type", "(", ")", "==", "\"GPS_RAW_INT\"", ":", "# flying if groundspeed more than 5 m/s", "flying", "=", "(", "m", ".", "vel", "/", "100", ">", "args", ".", "minspeed", "and", "m", ".", "fix_type", "==", "3", ")", "gps_heading", "=", "m", ".", "cog", "/", "100", "if", "m", ".", "get_type", "(", ")", "==", "\"ATTITUDE\"", ":", "attitude", "=", "m", "if", "m", ".", "get_type", "(", ")", "==", "\"SENSOR_OFFSETS\"", ":", "# update current offsets", "offsets", "=", "vec3", "(", "m", ".", "mag_ofs_x", ",", "m", ".", "mag_ofs_y", ",", "m", ".", "mag_ofs_z", ")", "if", "not", "flying", ":", "continue", "if", "m", ".", "get_type", "(", ")", "==", "\"RAW_IMU\"", ":", "data", ".", "append", "(", "(", "m", ".", "xmag", "-", "offsets", ".", "x", ",", "m", ".", "ymag", "-", "offsets", ".", "y", ",", "m", ".", "zmag", "-", "offsets", ".", "z", ",", "attitude", ".", "roll", ",", "attitude", ".", "pitch", ",", "gps_heading", ")", ")", "print", "(", "\"Extracted %u data points\"", "%", "len", "(", "data", ")", ")", "print", "(", "\"Current offsets: %s\"", "%", "offsets", ")", "ofs2", "=", "fit_data", "(", "data", ")", "print", "(", "\"Declination estimate: %.1f\"", "%", "ofs2", "[", "-", "1", "]", ")", "new_offsets", "=", "vec3", "(", "ofs2", "[", "0", "]", ",", "ofs2", "[", "1", "]", ",", "ofs2", "[", "2", "]", ")", "a", "=", "[", "[", "ofs2", "[", "3", "]", ",", "ofs2", "[", "4", "]", ",", "ofs2", "[", "5", "]", "]", ",", "[", "ofs2", "[", "6", "]", ",", "ofs2", "[", "7", "]", ",", "ofs2", "[", "8", "]", "]", ",", "[", "ofs2", "[", "9", "]", ",", "ofs2", "[", "10", "]", ",", "ofs2", "[", "11", "]", "]", "]", "print", "(", "a", ")", "print", "(", "\"New offsets : %s\"", "%", "new_offsets", ")" ]
37.375
0.002173
def get_metric(self, timestamp): """Get a metric including all current time series. Get a :class:`opencensus.metrics.export.metric.Metric` with one :class:`opencensus.metrics.export.time_series.TimeSeries` for each set of label values with a recorded measurement. Each `TimeSeries` has a single point that represents the last recorded value. :type timestamp: :class:`datetime.datetime` :param timestamp: Recording time to report, usually the current time. :rtype: :class:`opencensus.metrics.export.metric.Metric` or None :return: A converted metric for all current measurements. """ if not self.points: return None with self._points_lock: ts_list = get_timeseries_list(self.points, timestamp) return metric.Metric(self.descriptor, ts_list)
[ "def", "get_metric", "(", "self", ",", "timestamp", ")", ":", "if", "not", "self", ".", "points", ":", "return", "None", "with", "self", ".", "_points_lock", ":", "ts_list", "=", "get_timeseries_list", "(", "self", ".", "points", ",", "timestamp", ")", "return", "metric", ".", "Metric", "(", "self", ".", "descriptor", ",", "ts_list", ")" ]
42.6
0.002296
def list_job(jid, ext_source=None, display_progress=False): ''' List a specific job given by its jid ext_source If provided, specifies which external job cache to use. display_progress : False If ``True``, fire progress events. .. versionadded:: 2015.8.8 CLI Example: .. code-block:: bash salt-run jobs.list_job 20130916125524463507 salt-run jobs.list_job 20130916125524463507 --out=pprint ''' ret = {'jid': jid} mminion = salt.minion.MasterMinion(__opts__) returner = _get_returner(( __opts__['ext_job_cache'], ext_source, __opts__['master_job_cache'] )) if display_progress: __jid_event__.fire_event( {'message': 'Querying returner: {0}'.format(returner)}, 'progress' ) job = mminion.returners['{0}.get_load'.format(returner)](jid) ret.update(_format_jid_instance(jid, job)) ret['Result'] = mminion.returners['{0}.get_jid'.format(returner)](jid) fstr = '{0}.get_endtime'.format(__opts__['master_job_cache']) if (__opts__.get('job_cache_store_endtime') and fstr in mminion.returners): endtime = mminion.returners[fstr](jid) if endtime: ret['EndTime'] = endtime return ret
[ "def", "list_job", "(", "jid", ",", "ext_source", "=", "None", ",", "display_progress", "=", "False", ")", ":", "ret", "=", "{", "'jid'", ":", "jid", "}", "mminion", "=", "salt", ".", "minion", ".", "MasterMinion", "(", "__opts__", ")", "returner", "=", "_get_returner", "(", "(", "__opts__", "[", "'ext_job_cache'", "]", ",", "ext_source", ",", "__opts__", "[", "'master_job_cache'", "]", ")", ")", "if", "display_progress", ":", "__jid_event__", ".", "fire_event", "(", "{", "'message'", ":", "'Querying returner: {0}'", ".", "format", "(", "returner", ")", "}", ",", "'progress'", ")", "job", "=", "mminion", ".", "returners", "[", "'{0}.get_load'", ".", "format", "(", "returner", ")", "]", "(", "jid", ")", "ret", ".", "update", "(", "_format_jid_instance", "(", "jid", ",", "job", ")", ")", "ret", "[", "'Result'", "]", "=", "mminion", ".", "returners", "[", "'{0}.get_jid'", ".", "format", "(", "returner", ")", "]", "(", "jid", ")", "fstr", "=", "'{0}.get_endtime'", ".", "format", "(", "__opts__", "[", "'master_job_cache'", "]", ")", "if", "(", "__opts__", ".", "get", "(", "'job_cache_store_endtime'", ")", "and", "fstr", "in", "mminion", ".", "returners", ")", ":", "endtime", "=", "mminion", ".", "returners", "[", "fstr", "]", "(", "jid", ")", "if", "endtime", ":", "ret", "[", "'EndTime'", "]", "=", "endtime", "return", "ret" ]
28.5
0.000771
def unravel(txt, binding, msgtype="response"): """ Will unpack the received text. Depending on the context the original response may have been transformed before transmission. :param txt: :param binding: :param msgtype: :return: """ # logger.debug("unravel '%s'", txt) if binding not in [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST, BINDING_SOAP, BINDING_URI, BINDING_HTTP_ARTIFACT, None]: raise UnknownBinding("Don't know how to handle '%s'" % binding) else: try: if binding == BINDING_HTTP_REDIRECT: xmlstr = decode_base64_and_inflate(txt) elif binding == BINDING_HTTP_POST: xmlstr = base64.b64decode(txt) elif binding == BINDING_SOAP: func = getattr(soap, "parse_soap_enveloped_saml_%s" % msgtype) xmlstr = func(txt) elif binding == BINDING_HTTP_ARTIFACT: xmlstr = base64.b64decode(txt) else: xmlstr = txt except Exception: raise UnravelError("Unravelling binding '%s' failed" % binding) return xmlstr
[ "def", "unravel", "(", "txt", ",", "binding", ",", "msgtype", "=", "\"response\"", ")", ":", "# logger.debug(\"unravel '%s'\", txt)", "if", "binding", "not", "in", "[", "BINDING_HTTP_REDIRECT", ",", "BINDING_HTTP_POST", ",", "BINDING_SOAP", ",", "BINDING_URI", ",", "BINDING_HTTP_ARTIFACT", ",", "None", "]", ":", "raise", "UnknownBinding", "(", "\"Don't know how to handle '%s'\"", "%", "binding", ")", "else", ":", "try", ":", "if", "binding", "==", "BINDING_HTTP_REDIRECT", ":", "xmlstr", "=", "decode_base64_and_inflate", "(", "txt", ")", "elif", "binding", "==", "BINDING_HTTP_POST", ":", "xmlstr", "=", "base64", ".", "b64decode", "(", "txt", ")", "elif", "binding", "==", "BINDING_SOAP", ":", "func", "=", "getattr", "(", "soap", ",", "\"parse_soap_enveloped_saml_%s\"", "%", "msgtype", ")", "xmlstr", "=", "func", "(", "txt", ")", "elif", "binding", "==", "BINDING_HTTP_ARTIFACT", ":", "xmlstr", "=", "base64", ".", "b64decode", "(", "txt", ")", "else", ":", "xmlstr", "=", "txt", "except", "Exception", ":", "raise", "UnravelError", "(", "\"Unravelling binding '%s' failed\"", "%", "binding", ")", "return", "xmlstr" ]
40.90625
0.001493
def get_upper_triangle(correlation_matrix): ''' Extract upper triangle from a square matrix. Negative values are set to 0. Args: correlation_matrix (pandas df): Correlations between all replicates Returns: upper_tri_df (pandas df): Upper triangle extracted from correlation_matrix; rid is the row index, cid is the column index, corr is the extracted correlation value ''' upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool)) # convert matrix into long form description upper_tri_df = upper_triangle.stack().reset_index(level=1) upper_tri_df.columns = ['rid', 'corr'] # Index at this point is cid, it now becomes a column upper_tri_df.reset_index(level=0, inplace=True) # Get rid of negative values upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0) return upper_tri_df.round(rounding_precision)
[ "def", "get_upper_triangle", "(", "correlation_matrix", ")", ":", "upper_triangle", "=", "correlation_matrix", ".", "where", "(", "np", ".", "triu", "(", "np", ".", "ones", "(", "correlation_matrix", ".", "shape", ")", ",", "k", "=", "1", ")", ".", "astype", "(", "np", ".", "bool", ")", ")", "# convert matrix into long form description", "upper_tri_df", "=", "upper_triangle", ".", "stack", "(", ")", ".", "reset_index", "(", "level", "=", "1", ")", "upper_tri_df", ".", "columns", "=", "[", "'rid'", ",", "'corr'", "]", "# Index at this point is cid, it now becomes a column", "upper_tri_df", ".", "reset_index", "(", "level", "=", "0", ",", "inplace", "=", "True", ")", "# Get rid of negative values", "upper_tri_df", "[", "'corr'", "]", "=", "upper_tri_df", "[", "'corr'", "]", ".", "clip", "(", "lower", "=", "0", ")", "return", "upper_tri_df", ".", "round", "(", "rounding_precision", ")" ]
36.72
0.002123
def popen_uci(cls, command: Union[str, List[str]], *, timeout: Optional[float] = 10.0, debug: bool = False, setpgrp: bool = False, **popen_args: Any) -> "SimpleEngine": """ Spawns and initializes an UCI engine. Returns a :class:`~chess.engine.SimpleEngine` instance. """ return cls.popen(UciProtocol, command, timeout=timeout, debug=debug, setpgrp=setpgrp, **popen_args)
[ "def", "popen_uci", "(", "cls", ",", "command", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", ",", "*", ",", "timeout", ":", "Optional", "[", "float", "]", "=", "10.0", ",", "debug", ":", "bool", "=", "False", ",", "setpgrp", ":", "bool", "=", "False", ",", "*", "*", "popen_args", ":", "Any", ")", "->", "\"SimpleEngine\"", ":", "return", "cls", ".", "popen", "(", "UciProtocol", ",", "command", ",", "timeout", "=", "timeout", ",", "debug", "=", "debug", ",", "setpgrp", "=", "setpgrp", ",", "*", "*", "popen_args", ")" ]
67.5
0.009756
def get_ip_address_info(ip_address, cache=None, nameservers=None, timeout=2.0, parallel=False): """ Returns reverse DNS and country information for the given IP address Args: ip_address (str): The IP address to check cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds parallel (bool): parallel processing Returns: OrderedDict: ``ip_address``, ``reverse_dns`` """ ip_address = ip_address.lower() if cache: info = cache.get(ip_address, None) if info: return info info = OrderedDict() info["ip_address"] = ip_address reverse_dns = get_reverse_dns(ip_address, nameservers=nameservers, timeout=timeout) country = get_ip_address_country(ip_address, parallel=parallel) info["country"] = country info["reverse_dns"] = reverse_dns info["base_domain"] = None if reverse_dns is not None: base_domain = get_base_domain(reverse_dns) info["base_domain"] = base_domain return info
[ "def", "get_ip_address_info", "(", "ip_address", ",", "cache", "=", "None", ",", "nameservers", "=", "None", ",", "timeout", "=", "2.0", ",", "parallel", "=", "False", ")", ":", "ip_address", "=", "ip_address", ".", "lower", "(", ")", "if", "cache", ":", "info", "=", "cache", ".", "get", "(", "ip_address", ",", "None", ")", "if", "info", ":", "return", "info", "info", "=", "OrderedDict", "(", ")", "info", "[", "\"ip_address\"", "]", "=", "ip_address", "reverse_dns", "=", "get_reverse_dns", "(", "ip_address", ",", "nameservers", "=", "nameservers", ",", "timeout", "=", "timeout", ")", "country", "=", "get_ip_address_country", "(", "ip_address", ",", "parallel", "=", "parallel", ")", "info", "[", "\"country\"", "]", "=", "country", "info", "[", "\"reverse_dns\"", "]", "=", "reverse_dns", "info", "[", "\"base_domain\"", "]", "=", "None", "if", "reverse_dns", "is", "not", "None", ":", "base_domain", "=", "get_base_domain", "(", "reverse_dns", ")", "info", "[", "\"base_domain\"", "]", "=", "base_domain", "return", "info" ]
34.222222
0.000789
def zoomTo(self, bbox): 'set visible area to bbox, maintaining aspectRatio if applicable' self.fixPoint(self.plotviewBox.xymin, bbox.xymin) self.zoomlevel=max(bbox.w/self.canvasBox.w, bbox.h/self.canvasBox.h)
[ "def", "zoomTo", "(", "self", ",", "bbox", ")", ":", "self", ".", "fixPoint", "(", "self", ".", "plotviewBox", ".", "xymin", ",", "bbox", ".", "xymin", ")", "self", ".", "zoomlevel", "=", "max", "(", "bbox", ".", "w", "/", "self", ".", "canvasBox", ".", "w", ",", "bbox", ".", "h", "/", "self", ".", "canvasBox", ".", "h", ")" ]
57.25
0.012931
def transcribe(records, transcribe): """ Perform transcription or back-transcription. transcribe must be one of the following: dna2rna rna2dna """ logging.info('Applying _transcribe generator: ' 'operation to perform is ' + transcribe + '.') for record in records: sequence = str(record.seq) description = record.description name = record.id if transcribe == 'dna2rna': dna = Seq(sequence, IUPAC.ambiguous_dna) rna = dna.transcribe() yield SeqRecord(rna, id=name, description=description) elif transcribe == 'rna2dna': rna = Seq(sequence, IUPAC.ambiguous_rna) dna = rna.back_transcribe() yield SeqRecord(dna, id=name, description=description)
[ "def", "transcribe", "(", "records", ",", "transcribe", ")", ":", "logging", ".", "info", "(", "'Applying _transcribe generator: '", "'operation to perform is '", "+", "transcribe", "+", "'.'", ")", "for", "record", "in", "records", ":", "sequence", "=", "str", "(", "record", ".", "seq", ")", "description", "=", "record", ".", "description", "name", "=", "record", ".", "id", "if", "transcribe", "==", "'dna2rna'", ":", "dna", "=", "Seq", "(", "sequence", ",", "IUPAC", ".", "ambiguous_dna", ")", "rna", "=", "dna", ".", "transcribe", "(", ")", "yield", "SeqRecord", "(", "rna", ",", "id", "=", "name", ",", "description", "=", "description", ")", "elif", "transcribe", "==", "'rna2dna'", ":", "rna", "=", "Seq", "(", "sequence", ",", "IUPAC", ".", "ambiguous_rna", ")", "dna", "=", "rna", ".", "back_transcribe", "(", ")", "yield", "SeqRecord", "(", "dna", ",", "id", "=", "name", ",", "description", "=", "description", ")" ]
37.666667
0.001233
def deleteSNPs(setName) : """deletes a set of polymorphisms""" con = conf.db try : SMaster = SNPMaster(setName = setName) con.beginTransaction() SNPType = SMaster.SNPType con.delete(SNPType, 'setName = ?', (setName,)) SMaster.delete() con.endTransaction() except KeyError : raise KeyError("Can't delete the setName %s because i can't find it in SNPMaster, maybe there's not set by that name" % setName) #~ printf("can't delete the setName %s because i can't find it in SNPMaster, maybe there's no set by that name" % setName) return False return True
[ "def", "deleteSNPs", "(", "setName", ")", ":", "con", "=", "conf", ".", "db", "try", ":", "SMaster", "=", "SNPMaster", "(", "setName", "=", "setName", ")", "con", ".", "beginTransaction", "(", ")", "SNPType", "=", "SMaster", ".", "SNPType", "con", ".", "delete", "(", "SNPType", ",", "'setName = ?'", ",", "(", "setName", ",", ")", ")", "SMaster", ".", "delete", "(", ")", "con", ".", "endTransaction", "(", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Can't delete the setName %s because i can't find it in SNPMaster, maybe there's not set by that name\"", "%", "setName", ")", "#~ printf(\"can't delete the setName %s because i can't find it in SNPMaster, maybe there's no set by that name\" % setName)", "return", "False", "return", "True" ]
37.266667
0.04014
def parse_slab_stats(slab_stats): """Convert output from memcached's `stats slabs` into a Python dict. Newlines are returned by memcached along with carriage returns (i.e. '\r\n'). >>> parse_slab_stats( "STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT " "active_slabs 1\r\nSTAT total_malloced 1048512\r\nEND\r\n") { 'slabs': { 1: { 'chunk_size': 96, 'chunks_per_page': 10922, # ... }, }, 'active_slabs': 1, 'total_malloced': 1048512, } """ stats_dict = {'slabs': defaultdict(lambda: {})} for line in slab_stats.splitlines(): if line == 'END': break # e.g.: "STAT 1:chunks_per_page 10922" cmd, key, value = line.split(' ') if cmd != 'STAT': continue # e.g.: "STAT active_slabs 1" if ":" not in key: stats_dict[key] = int(value) continue slab, key = key.split(':') stats_dict['slabs'][int(slab)][key] = int(value) return stats_dict
[ "def", "parse_slab_stats", "(", "slab_stats", ")", ":", "stats_dict", "=", "{", "'slabs'", ":", "defaultdict", "(", "lambda", ":", "{", "}", ")", "}", "for", "line", "in", "slab_stats", ".", "splitlines", "(", ")", ":", "if", "line", "==", "'END'", ":", "break", "# e.g.: \"STAT 1:chunks_per_page 10922\"", "cmd", ",", "key", ",", "value", "=", "line", ".", "split", "(", "' '", ")", "if", "cmd", "!=", "'STAT'", ":", "continue", "# e.g.: \"STAT active_slabs 1\"", "if", "\":\"", "not", "in", "key", ":", "stats_dict", "[", "key", "]", "=", "int", "(", "value", ")", "continue", "slab", ",", "key", "=", "key", ".", "split", "(", "':'", ")", "stats_dict", "[", "'slabs'", "]", "[", "int", "(", "slab", ")", "]", "[", "key", "]", "=", "int", "(", "value", ")", "return", "stats_dict" ]
28.657895
0.000888
def items(self): "Returns a list of (key, value) pairs as 2-tuples." return (list(self._pb.IntMap.items()) + list(self._pb.StringMap.items()) + list(self._pb.FloatMap.items()) + list(self._pb.BoolMap.items()))
[ "def", "items", "(", "self", ")", ":", "return", "(", "list", "(", "self", ".", "_pb", ".", "IntMap", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "_pb", ".", "StringMap", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "_pb", ".", "FloatMap", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "_pb", ".", "BoolMap", ".", "items", "(", ")", ")", ")" ]
59.5
0.016598
def array_to_schema(arr, **options): """ Generate a JSON schema object with type annotation added for given object. :param arr: Array of mapping objects like dicts :param options: Other keyword options such as: - ac_schema_strict: True if more strict (precise) schema is needed - ac_schema_typemap: Type to JSON schema type mappings :return: Another mapping objects represents JSON schema of items """ (typemap, strict) = _process_options(**options) arr = list(arr) scm = dict(type=typemap[list], items=gen_schema(arr[0] if arr else "str", **options)) if strict: nitems = len(arr) scm["minItems"] = nitems scm["uniqueItems"] = len(set(arr)) == nitems return scm
[ "def", "array_to_schema", "(", "arr", ",", "*", "*", "options", ")", ":", "(", "typemap", ",", "strict", ")", "=", "_process_options", "(", "*", "*", "options", ")", "arr", "=", "list", "(", "arr", ")", "scm", "=", "dict", "(", "type", "=", "typemap", "[", "list", "]", ",", "items", "=", "gen_schema", "(", "arr", "[", "0", "]", "if", "arr", "else", "\"str\"", ",", "*", "*", "options", ")", ")", "if", "strict", ":", "nitems", "=", "len", "(", "arr", ")", "scm", "[", "\"minItems\"", "]", "=", "nitems", "scm", "[", "\"uniqueItems\"", "]", "=", "len", "(", "set", "(", "arr", ")", ")", "==", "nitems", "return", "scm" ]
32.304348
0.001307
def _spin(coordinates, theta, around): """Rotate a set of coordinates in place around an arbitrary vector. Parameters ---------- coordinates : np.ndarray, shape=(n,3), dtype=float The coordinates being spun. theta : float The angle by which to spin the coordinates, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the coordinates. """ around = np.asarray(around).reshape(3) if np.array_equal(around, np.zeros(3)): raise ValueError('Cannot spin around a zero vector') center_pos = np.mean(coordinates, axis=0) coordinates -= center_pos coordinates = _rotate(coordinates, theta, around) coordinates += center_pos return coordinates
[ "def", "_spin", "(", "coordinates", ",", "theta", ",", "around", ")", ":", "around", "=", "np", ".", "asarray", "(", "around", ")", ".", "reshape", "(", "3", ")", "if", "np", ".", "array_equal", "(", "around", ",", "np", ".", "zeros", "(", "3", ")", ")", ":", "raise", "ValueError", "(", "'Cannot spin around a zero vector'", ")", "center_pos", "=", "np", ".", "mean", "(", "coordinates", ",", "axis", "=", "0", ")", "coordinates", "-=", "center_pos", "coordinates", "=", "_rotate", "(", "coordinates", ",", "theta", ",", "around", ")", "coordinates", "+=", "center_pos", "return", "coordinates" ]
35.095238
0.001321
def write_data(self, data, dstart=None, swap_axes=True): """Write ``data`` to `file`. Parameters ---------- data : `array-like` Data that should be written to `file`. dstart : non-negative int, optional Offset in bytes of the start position of the written data. If provided, reshaping and axis swapping of ``data`` is skipped. For ``None``, `header_size` is used. swap_axes : bool, optional If ``True``, use the ``'mapc', 'mapr', 'maps'`` header entries to swap the axes in the ``data`` before writing. Use ``False`` only if the data is already consistent with the final axis order. """ if dstart is None: shape = self.data_shape dstart = int(self.header_size) elif dstart < 0: raise ValueError('`dstart` must be non-negative, got {}' ''.format(dstart)) else: shape = -1 dstart = int(dstart) if dstart < self.header_size: raise ValueError('invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' ''.format(dstart, self.header_size)) data = np.asarray(data, dtype=self.data_dtype).reshape(shape) if swap_axes: # Need to argsort here since `data_axis_order` tells # "which axis comes from where", which is the inverse of what the # `transpose` function needs. data = np.transpose(data, axes=np.argsort(self.data_axis_order)) assert data.shape == self.data_storage_shape data = data.reshape(-1, order='F') self.file.seek(dstart) data.tofile(self.file)
[ "def", "write_data", "(", "self", ",", "data", ",", "dstart", "=", "None", ",", "swap_axes", "=", "True", ")", ":", "if", "dstart", "is", "None", ":", "shape", "=", "self", ".", "data_shape", "dstart", "=", "int", "(", "self", ".", "header_size", ")", "elif", "dstart", "<", "0", ":", "raise", "ValueError", "(", "'`dstart` must be non-negative, got {}'", "''", ".", "format", "(", "dstart", ")", ")", "else", ":", "shape", "=", "-", "1", "dstart", "=", "int", "(", "dstart", ")", "if", "dstart", "<", "self", ".", "header_size", ":", "raise", "ValueError", "(", "'invalid `dstart`, resulting in absolute '", "'`dstart` < `header_size` ({} < {})'", "''", ".", "format", "(", "dstart", ",", "self", ".", "header_size", ")", ")", "data", "=", "np", ".", "asarray", "(", "data", ",", "dtype", "=", "self", ".", "data_dtype", ")", ".", "reshape", "(", "shape", ")", "if", "swap_axes", ":", "# Need to argsort here since `data_axis_order` tells", "# \"which axis comes from where\", which is the inverse of what the", "# `transpose` function needs.", "data", "=", "np", ".", "transpose", "(", "data", ",", "axes", "=", "np", ".", "argsort", "(", "self", ".", "data_axis_order", ")", ")", "assert", "data", ".", "shape", "==", "self", ".", "data_storage_shape", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "order", "=", "'F'", ")", "self", ".", "file", ".", "seek", "(", "dstart", ")", "data", ".", "tofile", "(", "self", ".", "file", ")" ]
40.613636
0.001093
def num_available_breakpoints(self, arm=False, thumb=False, ram=False, flash=False, hw=False): """Returns the number of available breakpoints of the specified type. If ``arm`` is set, gets the number of available ARM breakpoint units. If ``thumb`` is set, gets the number of available THUMB breakpoint units. If ``ram`` is set, gets the number of available software RAM breakpoint units. If ``flash`` is set, gets the number of available software flash breakpoint units. If ``hw`` is set, gets the number of available hardware breakpoint units. If a combination of the flags is given, then ``num_available_breakpoints()`` returns the number of breakpoints specified by the given flags. If no flags are specified, then the count of available breakpoint units is returned. Args: self (JLink): the ``JLink`` instance arm (bool): Boolean indicating to get number of ARM breakpoints. thumb (bool): Boolean indicating to get number of THUMB breakpoints. ram (bool): Boolean indicating to get number of SW RAM breakpoints. flash (bool): Boolean indicating to get number of Flash breakpoints. hw (bool): Boolean indicating to get number of Hardware breakpoints. Returns: The number of available breakpoint units of the specified type. """ flags = [ enums.JLinkBreakpoint.ARM, enums.JLinkBreakpoint.THUMB, enums.JLinkBreakpoint.SW_RAM, enums.JLinkBreakpoint.SW_FLASH, enums.JLinkBreakpoint.HW ] set_flags = [ arm, thumb, ram, flash, hw ] if not any(set_flags): flags = enums.JLinkBreakpoint.ANY else: flags = list(f for i, f in enumerate(flags) if set_flags[i]) flags = functools.reduce(operator.__or__, flags, 0) return self._dll.JLINKARM_GetNumBPUnits(flags)
[ "def", "num_available_breakpoints", "(", "self", ",", "arm", "=", "False", ",", "thumb", "=", "False", ",", "ram", "=", "False", ",", "flash", "=", "False", ",", "hw", "=", "False", ")", ":", "flags", "=", "[", "enums", ".", "JLinkBreakpoint", ".", "ARM", ",", "enums", ".", "JLinkBreakpoint", ".", "THUMB", ",", "enums", ".", "JLinkBreakpoint", ".", "SW_RAM", ",", "enums", ".", "JLinkBreakpoint", ".", "SW_FLASH", ",", "enums", ".", "JLinkBreakpoint", ".", "HW", "]", "set_flags", "=", "[", "arm", ",", "thumb", ",", "ram", ",", "flash", ",", "hw", "]", "if", "not", "any", "(", "set_flags", ")", ":", "flags", "=", "enums", ".", "JLinkBreakpoint", ".", "ANY", "else", ":", "flags", "=", "list", "(", "f", "for", "i", ",", "f", "in", "enumerate", "(", "flags", ")", "if", "set_flags", "[", "i", "]", ")", "flags", "=", "functools", ".", "reduce", "(", "operator", ".", "__or__", ",", "flags", ",", "0", ")", "return", "self", ".", "_dll", ".", "JLINKARM_GetNumBPUnits", "(", "flags", ")" ]
40.979592
0.001459
def convert(in_file, out_file, in_fmt="", out_fmt=""): """ Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename """ # First verify that in_file exists and out_file doesn't. in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if not os.path.exists(in_file): raise IOError("Input file {0} does not exist, stopping..." .format(in_file)) # Get formats, either by explicitly naming them or by guessing. # TODO: It'd be neat to check here if an explicit fmt matches the guess. in_fmt = in_fmt.lower() or _guess_format_from_extension( in_file.split('.')[-1].lower()) out_fmt = out_fmt.lower() or _guess_format_from_extension( out_file.split('.')[-1].lower()) if not in_fmt or not out_fmt: raise ValueError("Cannot determine conversion formats.") return False if in_fmt is out_fmt: # This is the case when this module (intended for LONI) is used # indescriminately to 'funnel' data into one format. shutil.copyfileobj(in_file, out_file) return out_file # Import if in_fmt == 'hdf5': from . import hdf5 data = hdf5.load(in_file) elif in_fmt == 'tiff': from . import tiff data = tiff.load(in_file) elif in_fmt == 'png': from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) # Export if out_fmt == 'hdf5': from . import hdf5 return hdf5.save(out_file, data) elif out_fmt == 'tiff': from . import tiff return tiff.save(out_file, data) elif out_fmt == 'png': from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
[ "def", "convert", "(", "in_file", ",", "out_file", ",", "in_fmt", "=", "\"\"", ",", "out_fmt", "=", "\"\"", ")", ":", "# First verify that in_file exists and out_file doesn't.", "in_file", "=", "os", ".", "path", ".", "expanduser", "(", "in_file", ")", "out_file", "=", "os", ".", "path", ".", "expanduser", "(", "out_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "in_file", ")", ":", "raise", "IOError", "(", "\"Input file {0} does not exist, stopping...\"", ".", "format", "(", "in_file", ")", ")", "# Get formats, either by explicitly naming them or by guessing.", "# TODO: It'd be neat to check here if an explicit fmt matches the guess.", "in_fmt", "=", "in_fmt", ".", "lower", "(", ")", "or", "_guess_format_from_extension", "(", "in_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "lower", "(", ")", ")", "out_fmt", "=", "out_fmt", ".", "lower", "(", ")", "or", "_guess_format_from_extension", "(", "out_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "lower", "(", ")", ")", "if", "not", "in_fmt", "or", "not", "out_fmt", ":", "raise", "ValueError", "(", "\"Cannot determine conversion formats.\"", ")", "return", "False", "if", "in_fmt", "is", "out_fmt", ":", "# This is the case when this module (intended for LONI) is used", "# indescriminately to 'funnel' data into one format.", "shutil", ".", "copyfileobj", "(", "in_file", ",", "out_file", ")", "return", "out_file", "# Import", "if", "in_fmt", "==", "'hdf5'", ":", "from", ".", "import", "hdf5", "data", "=", "hdf5", ".", "load", "(", "in_file", ")", "elif", "in_fmt", "==", "'tiff'", ":", "from", ".", "import", "tiff", "data", "=", "tiff", ".", "load", "(", "in_file", ")", "elif", "in_fmt", "==", "'png'", ":", "from", ".", "import", "png", "data", "=", "png", ".", "load", "(", "in_file", ")", "else", ":", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")", "# Export", "if", "out_fmt", "==", "'hdf5'", ":", "from", ".", "import", "hdf5", "return", "hdf5", ".", "save", "(", "out_file", ",", "data", ")", "elif", "out_fmt", "==", "'tiff'", ":", "from", ".", "import", "tiff", "return", "tiff", ".", "save", "(", "out_file", ",", "data", ")", "elif", "out_fmt", "==", "'png'", ":", "from", ".", "import", "png", "return", "png", ".", "export_png", "(", "out_file", ",", "data", ")", "else", ":", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")" ]
33.272727
0.000442
def is_first(self, value): """The is_first property. Args: value (string). the property value. """ if value == self._defaults['ai.session.isFirst'] and 'ai.session.isFirst' in self._values: del self._values['ai.session.isFirst'] else: self._values['ai.session.isFirst'] = value
[ "def", "is_first", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'ai.session.isFirst'", "]", "and", "'ai.session.isFirst'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'ai.session.isFirst'", "]", "else", ":", "self", ".", "_values", "[", "'ai.session.isFirst'", "]", "=", "value" ]
35.3
0.01105
def value_nth_person(self, n, array, default = 0): """ Get the value of array for the person whose position in the entity is n. Note that this position is arbitrary, and that members are not sorted. If the nth person does not exist, return ``default`` instead. The result is a vector which dimension is the number of entities. """ self.members.check_array_compatible_with_entity(array) positions = self.members_position nb_persons_per_entity = self.nb_persons() members_map = self.ordered_members_map result = self.filled_array(default, dtype = array.dtype) # For households that have at least n persons, set the result as the value of criteria for the person for which the position is n. # The map is needed b/c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order. result[nb_persons_per_entity > n] = array[members_map][positions[members_map] == n] return result
[ "def", "value_nth_person", "(", "self", ",", "n", ",", "array", ",", "default", "=", "0", ")", ":", "self", ".", "members", ".", "check_array_compatible_with_entity", "(", "array", ")", "positions", "=", "self", ".", "members_position", "nb_persons_per_entity", "=", "self", ".", "nb_persons", "(", ")", "members_map", "=", "self", ".", "ordered_members_map", "result", "=", "self", ".", "filled_array", "(", "default", ",", "dtype", "=", "array", ".", "dtype", ")", "# For households that have at least n persons, set the result as the value of criteria for the person for which the position is n.", "# The map is needed b/c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order.", "result", "[", "nb_persons_per_entity", ">", "n", "]", "=", "array", "[", "members_map", "]", "[", "positions", "[", "members_map", "]", "==", "n", "]", "return", "result" ]
52.75
0.010242
def login_required(function=None, message=None, login_url=None): """ Decorator for views that checks that the user is logged in, redirecting to the log-in page if necessary. """ actual_decorator = user_passes_test( lambda u: u.is_authenticated(), message=message, login_url=login_url ) if function: return actual_decorator(function) return actual_decorator
[ "def", "login_required", "(", "function", "=", "None", ",", "message", "=", "None", ",", "login_url", "=", "None", ")", ":", "actual_decorator", "=", "user_passes_test", "(", "lambda", "u", ":", "u", ".", "is_authenticated", "(", ")", ",", "message", "=", "message", ",", "login_url", "=", "login_url", ")", "if", "function", ":", "return", "actual_decorator", "(", "function", ")", "return", "actual_decorator" ]
29.142857
0.002375
def path_regex(self): """Return the regex for the path to the build folder.""" if self.locale_build: return self.build_list_regex return '%s/' % urljoin(self.build_list_regex, self.builds[self.build_index])
[ "def", "path_regex", "(", "self", ")", ":", "if", "self", ".", "locale_build", ":", "return", "self", ".", "build_list_regex", "return", "'%s/'", "%", "urljoin", "(", "self", ".", "build_list_regex", ",", "self", ".", "builds", "[", "self", ".", "build_index", "]", ")" ]
39.666667
0.012346
def dropzoneAt(self, point): """ Returns the dropzone at the inputed point. :param point | <QPoint> """ for dropzone in self._dropzones: rect = dropzone.rect() if ( rect.contains(point) ): return dropzone return None
[ "def", "dropzoneAt", "(", "self", ",", "point", ")", ":", "for", "dropzone", "in", "self", ".", "_dropzones", ":", "rect", "=", "dropzone", ".", "rect", "(", ")", "if", "(", "rect", ".", "contains", "(", "point", ")", ")", ":", "return", "dropzone", "return", "None" ]
28
0.015723
def on_click(self, button, **kwargs): """ Maps a click event with its associated callback. Currently implemented events are: ============ ================ ========= Event Callback setting Button ID ============ ================ ========= Left click on_leftclick 1 Middle click on_middleclick 2 Right click on_rightclick 3 Scroll up on_upscroll 4 Scroll down on_downscroll 5 Others on_otherclick > 5 ============ ================ ========= The action is determined by the nature (type and value) of the callback setting in the following order: 1. If null callback (``None``), no action is taken. 2. If it's a `python function`, call it and pass any additional arguments. 3. If it's name of a `member method` of current module (string), call it and pass any additional arguments. 4. If the name does not match with `member method` name execute program with such name. .. seealso:: :ref:`callbacks` for more information about callback settings and examples. :param button: The ID of button event received from i3bar. :param kwargs: Further information received from i3bar like the positions of the mouse where the click occured. :return: Returns ``True`` if a valid callback action was executed. ``False`` otherwise. """ actions = ['leftclick', 'middleclick', 'rightclick', 'upscroll', 'downscroll'] try: action = actions[button - 1] except (TypeError, IndexError): self.__log_button_event(button, None, None, "Other button") action = "otherclick" m_click = self.__multi_click with m_click.lock: double = m_click.check_double(button) double_action = 'double%s' % action if double: action = double_action # Get callback function cb = getattr(self, 'on_%s' % action, None) double_handler = getattr(self, 'on_%s' % double_action, None) delay_execution = (not double and double_handler) if delay_execution: m_click.set_timer(button, cb, **kwargs) else: self.__button_callback_handler(button, cb, **kwargs)
[ "def", "on_click", "(", "self", ",", "button", ",", "*", "*", "kwargs", ")", ":", "actions", "=", "[", "'leftclick'", ",", "'middleclick'", ",", "'rightclick'", ",", "'upscroll'", ",", "'downscroll'", "]", "try", ":", "action", "=", "actions", "[", "button", "-", "1", "]", "except", "(", "TypeError", ",", "IndexError", ")", ":", "self", ".", "__log_button_event", "(", "button", ",", "None", ",", "None", ",", "\"Other button\"", ")", "action", "=", "\"otherclick\"", "m_click", "=", "self", ".", "__multi_click", "with", "m_click", ".", "lock", ":", "double", "=", "m_click", ".", "check_double", "(", "button", ")", "double_action", "=", "'double%s'", "%", "action", "if", "double", ":", "action", "=", "double_action", "# Get callback function", "cb", "=", "getattr", "(", "self", ",", "'on_%s'", "%", "action", ",", "None", ")", "double_handler", "=", "getattr", "(", "self", ",", "'on_%s'", "%", "double_action", ",", "None", ")", "delay_execution", "=", "(", "not", "double", "and", "double_handler", ")", "if", "delay_execution", ":", "m_click", ".", "set_timer", "(", "button", ",", "cb", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "__button_callback_handler", "(", "button", ",", "cb", ",", "*", "*", "kwargs", ")" ]
37.076923
0.000808
def variations(word): """Create variations of the word based on letter combinations like oo, sh, etc.""" if len(word) == 1: return [[word[0]]] elif word == 'aa': return [['A']] elif word == 'ee': return [['i']] elif word == 'ei': return [['ei']] elif word in ['oo', 'ou']: return [['u']] elif word == 'kha': return [['kha'], ['kh', 'a']] elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return [[word]] elif word in ["'ee", "'ei"]: return [["'i"]] elif word in ["'oo", "'ou"]: return [["'u"]] elif word in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[0] + "'"]] elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [["'" + word[1]]] elif len(word) == 2 and word[0] == word[1]: return [[word[0]]] if word[:2] == 'aa': return [['A'] + i for i in variations(word[2:])] elif word[:2] == 'ee': return [['i'] + i for i in variations(word[2:])] elif word[:2] in ['oo', 'ou']: return [['u'] + i for i in variations(word[2:])] elif word[:3] == 'kha': return \ [['kha'] + i for i in variations(word[3:])] + \ [['kh', 'a'] + i for i in variations(word[3:])] + \ [['k', 'h', 'a'] + i for i in variations(word[3:])] elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return \ [[word[:2]] + i for i in variations(word[2:])] + \ [[word[0]] + i for i in variations(word[1:])] elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[:2]] + i for i in variations(word[2:])] elif word[:3] in ["'ee", "'ei"]: return [["'i"] + i for i in variations(word[3:])] elif word[:3] in ["'oo", "'ou"]: return [["'u"] + i for i in variations(word[3:])] elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [[word[:2]] + i for i in variations(word[2:])] elif len(word) >= 2 and word[0] == word[1]: return [[word[0]] + i for i in variations(word[2:])] else: return [[word[0]] + i for i in variations(word[1:])]
[ "def", "variations", "(", "word", ")", ":", "if", "len", "(", "word", ")", "==", "1", ":", "return", "[", "[", "word", "[", "0", "]", "]", "]", "elif", "word", "==", "'aa'", ":", "return", "[", "[", "'A'", "]", "]", "elif", "word", "==", "'ee'", ":", "return", "[", "[", "'i'", "]", "]", "elif", "word", "==", "'ei'", ":", "return", "[", "[", "'ei'", "]", "]", "elif", "word", "in", "[", "'oo'", ",", "'ou'", "]", ":", "return", "[", "[", "'u'", "]", "]", "elif", "word", "==", "'kha'", ":", "return", "[", "[", "'kha'", "]", ",", "[", "'kh'", ",", "'a'", "]", "]", "elif", "word", "in", "[", "'kh'", ",", "'gh'", ",", "'ch'", ",", "'sh'", ",", "'zh'", ",", "'ck'", "]", ":", "return", "[", "[", "word", "]", "]", "elif", "word", "in", "[", "\"'ee\"", ",", "\"'ei\"", "]", ":", "return", "[", "[", "\"'i\"", "]", "]", "elif", "word", "in", "[", "\"'oo\"", ",", "\"'ou\"", "]", ":", "return", "[", "[", "\"'u\"", "]", "]", "elif", "word", "in", "[", "\"a'\"", ",", "\"e'\"", ",", "\"o'\"", ",", "\"i'\"", ",", "\"u'\"", ",", "\"A'\"", "]", ":", "return", "[", "[", "word", "[", "0", "]", "+", "\"'\"", "]", "]", "elif", "word", "in", "[", "\"'a\"", ",", "\"'e\"", ",", "\"'o\"", ",", "\"'i\"", ",", "\"'u\"", ",", "\"'A\"", "]", ":", "return", "[", "[", "\"'\"", "+", "word", "[", "1", "]", "]", "]", "elif", "len", "(", "word", ")", "==", "2", "and", "word", "[", "0", "]", "==", "word", "[", "1", "]", ":", "return", "[", "[", "word", "[", "0", "]", "]", "]", "if", "word", "[", ":", "2", "]", "==", "'aa'", ":", "return", "[", "[", "'A'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "==", "'ee'", ":", "return", "[", "[", "'i'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "'oo'", ",", "'ou'", "]", ":", "return", "[", "[", "'u'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "3", "]", "==", "'kha'", ":", "return", "[", "[", "'kha'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "+", "[", "[", "'kh'", ",", "'a'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "+", "[", "[", "'k'", ",", "'h'", ",", "'a'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "'kh'", ",", "'gh'", ",", "'ch'", ",", "'sh'", ",", "'zh'", ",", "'ck'", "]", ":", "return", "[", "[", "word", "[", ":", "2", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "+", "[", "[", "word", "[", "0", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "1", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "\"a'\"", ",", "\"e'\"", ",", "\"o'\"", ",", "\"i'\"", ",", "\"u'\"", ",", "\"A'\"", "]", ":", "return", "[", "[", "word", "[", ":", "2", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "3", "]", "in", "[", "\"'ee\"", ",", "\"'ei\"", "]", ":", "return", "[", "[", "\"'i\"", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "elif", "word", "[", ":", "3", "]", "in", "[", "\"'oo\"", ",", "\"'ou\"", "]", ":", "return", "[", "[", "\"'u\"", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "\"'a\"", ",", "\"'e\"", ",", "\"'o\"", ",", "\"'i\"", ",", "\"'u\"", ",", "\"'A\"", "]", ":", "return", "[", "[", "word", "[", ":", "2", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "len", "(", "word", ")", ">=", "2", "and", "word", "[", "0", "]", "==", "word", "[", "1", "]", ":", "return", "[", "[", "word", "[", "0", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "else", ":", "return", "[", "[", "word", "[", "0", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "1", ":", "]", ")", "]" ]
37.571429
0.000463
def tokenizer(text): """A lexical analyzer for the `mwtab` formatted files. :param str text: `mwtab` formatted text. :return: Tuples of data. :rtype: py:class:`~collections.namedtuple` """ stream = deque(text.split("\n")) while len(stream) > 0: line = stream.popleft() if line.startswith("#METABOLOMICS WORKBENCH"): yield KeyValue("#METABOLOMICS WORKBENCH", "\n") yield KeyValue("HEADER", line) for identifier in line.split(" "): if ":" in identifier: key, value = identifier.split(":") yield KeyValue(key, value) elif line.startswith("#ANALYSIS TYPE"): yield KeyValue("HEADER", line) elif line.startswith("#SUBJECT_SAMPLE_FACTORS:"): yield KeyValue("#ENDSECTION", "\n") yield KeyValue("#SUBJECT_SAMPLE_FACTORS", "\n") elif line.startswith("#"): yield KeyValue("#ENDSECTION", "\n") yield KeyValue(line.strip(), "\n") elif line.startswith("SUBJECT_SAMPLE_FACTORS"): key, subject_type, local_sample_id, factors, additional_sample_data = line.split("\t") # factors = [dict([[i.strip() for i in f.split(":")]]) for f in factors.split("|")] yield SubjectSampleFactors(key.strip(), subject_type, local_sample_id, factors, additional_sample_data) elif line.endswith("_START"): yield KeyValue(line, "\n") while not line.endswith("_END"): line = stream.popleft() if line.endswith("_END"): yield KeyValue(line.strip(), "\n") else: data = line.split("\t") yield KeyValue(data[0], tuple(data)) else: if line: if line.startswith("MS:MS_RESULTS_FILE") or line.startswith("NM:NMR_RESULTS_FILE"): try: key, value, extra = line.split("\t") extra_key, extra_value = extra.strip().split(":") yield KeyValueExtra(key.strip()[3:], value, extra_key, extra_value) except ValueError: key, value = line.split("\t") yield KeyValue(key.strip()[3:], value) else: try: key, value = line.split("\t") if ":" in key: if key.startswith("MS_METABOLITE_DATA:UNITS"): yield KeyValue(key.strip(), value) else: yield KeyValue(key.strip()[3:], value) else: yield KeyValue(key.strip(), value) except ValueError: print("LINE WITH ERROR:\n\t", repr(line)) raise yield KeyValue("#ENDSECTION", "\n") yield KeyValue("!#ENDFILE", "\n")
[ "def", "tokenizer", "(", "text", ")", ":", "stream", "=", "deque", "(", "text", ".", "split", "(", "\"\\n\"", ")", ")", "while", "len", "(", "stream", ")", ">", "0", ":", "line", "=", "stream", ".", "popleft", "(", ")", "if", "line", ".", "startswith", "(", "\"#METABOLOMICS WORKBENCH\"", ")", ":", "yield", "KeyValue", "(", "\"#METABOLOMICS WORKBENCH\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "\"HEADER\"", ",", "line", ")", "for", "identifier", "in", "line", ".", "split", "(", "\" \"", ")", ":", "if", "\":\"", "in", "identifier", ":", "key", ",", "value", "=", "identifier", ".", "split", "(", "\":\"", ")", "yield", "KeyValue", "(", "key", ",", "value", ")", "elif", "line", ".", "startswith", "(", "\"#ANALYSIS TYPE\"", ")", ":", "yield", "KeyValue", "(", "\"HEADER\"", ",", "line", ")", "elif", "line", ".", "startswith", "(", "\"#SUBJECT_SAMPLE_FACTORS:\"", ")", ":", "yield", "KeyValue", "(", "\"#ENDSECTION\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "\"#SUBJECT_SAMPLE_FACTORS\"", ",", "\"\\n\"", ")", "elif", "line", ".", "startswith", "(", "\"#\"", ")", ":", "yield", "KeyValue", "(", "\"#ENDSECTION\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "line", ".", "strip", "(", ")", ",", "\"\\n\"", ")", "elif", "line", ".", "startswith", "(", "\"SUBJECT_SAMPLE_FACTORS\"", ")", ":", "key", ",", "subject_type", ",", "local_sample_id", ",", "factors", ",", "additional_sample_data", "=", "line", ".", "split", "(", "\"\\t\"", ")", "# factors = [dict([[i.strip() for i in f.split(\":\")]]) for f in factors.split(\"|\")]", "yield", "SubjectSampleFactors", "(", "key", ".", "strip", "(", ")", ",", "subject_type", ",", "local_sample_id", ",", "factors", ",", "additional_sample_data", ")", "elif", "line", ".", "endswith", "(", "\"_START\"", ")", ":", "yield", "KeyValue", "(", "line", ",", "\"\\n\"", ")", "while", "not", "line", ".", "endswith", "(", "\"_END\"", ")", ":", "line", "=", "stream", ".", "popleft", "(", ")", "if", "line", ".", "endswith", "(", "\"_END\"", ")", ":", "yield", "KeyValue", "(", "line", ".", "strip", "(", ")", ",", "\"\\n\"", ")", "else", ":", "data", "=", "line", ".", "split", "(", "\"\\t\"", ")", "yield", "KeyValue", "(", "data", "[", "0", "]", ",", "tuple", "(", "data", ")", ")", "else", ":", "if", "line", ":", "if", "line", ".", "startswith", "(", "\"MS:MS_RESULTS_FILE\"", ")", "or", "line", ".", "startswith", "(", "\"NM:NMR_RESULTS_FILE\"", ")", ":", "try", ":", "key", ",", "value", ",", "extra", "=", "line", ".", "split", "(", "\"\\t\"", ")", "extra_key", ",", "extra_value", "=", "extra", ".", "strip", "(", ")", ".", "split", "(", "\":\"", ")", "yield", "KeyValueExtra", "(", "key", ".", "strip", "(", ")", "[", "3", ":", "]", ",", "value", ",", "extra_key", ",", "extra_value", ")", "except", "ValueError", ":", "key", ",", "value", "=", "line", ".", "split", "(", "\"\\t\"", ")", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", "[", "3", ":", "]", ",", "value", ")", "else", ":", "try", ":", "key", ",", "value", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "\":\"", "in", "key", ":", "if", "key", ".", "startswith", "(", "\"MS_METABOLITE_DATA:UNITS\"", ")", ":", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", ",", "value", ")", "else", ":", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", "[", "3", ":", "]", ",", "value", ")", "else", ":", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", ",", "value", ")", "except", "ValueError", ":", "print", "(", "\"LINE WITH ERROR:\\n\\t\"", ",", "repr", "(", "line", ")", ")", "raise", "yield", "KeyValue", "(", "\"#ENDSECTION\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "\"!#ENDFILE\"", ",", "\"\\n\"", ")" ]
39.466667
0.001978
def getAttributeName(self, name): '''represents the aname ''' if self.func_aname is None: return name assert callable(self.func_aname), \ 'expecting callable method for attribute func_aname, not %s' %type(self.func_aname) f = self.func_aname return f(name)
[ "def", "getAttributeName", "(", "self", ",", "name", ")", ":", "if", "self", ".", "func_aname", "is", "None", ":", "return", "name", "assert", "callable", "(", "self", ".", "func_aname", ")", ",", "'expecting callable method for attribute func_aname, not %s'", "%", "type", "(", "self", ".", "func_aname", ")", "f", "=", "self", ".", "func_aname", "return", "f", "(", "name", ")" ]
35.555556
0.012195
def create_shell(console, manage_dict=None, extra_vars=None, exit_hooks=None): """Creates the shell""" manage_dict = manage_dict or MANAGE_DICT _vars = globals() _vars.update(locals()) auto_imported = import_objects(manage_dict) if extra_vars: auto_imported.update(extra_vars) _vars.update(auto_imported) msgs = [] if manage_dict['shell']['banner']['enabled']: msgs.append( manage_dict['shell']['banner']['message'].format(**manage_dict) ) if auto_imported and manage_dict['shell']['auto_import']['display']: auto_imported_names = [ key for key in auto_imported.keys() if key not in ['__builtins__', 'builtins'] ] msgs.append('\tAuto imported: {0}\n'.format(auto_imported_names)) banner_msg = u'\n'.join(msgs) exec_init(manage_dict, _vars) exec_init_script(manage_dict, _vars) atexit_functions = [ import_string(func_name) for func_name in manage_dict['shell'].get('exit_hooks', []) ] atexit_functions += exit_hooks or [] for atexit_function in atexit_functions: atexit.register(atexit_function) if console == 'ptpython': try: from ptpython.repl import embed embed({}, _vars) except ImportError: click.echo("ptpython is not installed!") return if console == 'bpython': try: from bpython import embed embed(locals_=_vars, banner=banner_msg) except ImportError: click.echo("bpython is not installed!") return try: if console == 'ipython': from IPython import start_ipython from traitlets.config import Config c = Config() c.TerminalInteractiveShell.banner2 = banner_msg c.InteractiveShellApp.extensions = [ extension for extension in manage_dict['shell'].get('ipython_extensions', []) ] c.InteractiveShellApp.exec_lines = [ exec_line for exec_line in manage_dict['shell'].get('ipython_exec_lines', []) ] if manage_dict['shell'].get('ipython_auto_reload', True) is True: c.InteractiveShellApp.extensions.append('autoreload') c.InteractiveShellApp.exec_lines.append('%autoreload 2') start_ipython(argv=[], user_ns=_vars, config=c) else: raise ImportError except ImportError: if manage_dict['shell']['readline_enabled']: import readline import rlcompleter readline.set_completer(rlcompleter.Completer(_vars).complete) readline.parse_and_bind('tab: complete') shell = code.InteractiveConsole(_vars) shell.interact(banner=banner_msg)
[ "def", "create_shell", "(", "console", ",", "manage_dict", "=", "None", ",", "extra_vars", "=", "None", ",", "exit_hooks", "=", "None", ")", ":", "manage_dict", "=", "manage_dict", "or", "MANAGE_DICT", "_vars", "=", "globals", "(", ")", "_vars", ".", "update", "(", "locals", "(", ")", ")", "auto_imported", "=", "import_objects", "(", "manage_dict", ")", "if", "extra_vars", ":", "auto_imported", ".", "update", "(", "extra_vars", ")", "_vars", ".", "update", "(", "auto_imported", ")", "msgs", "=", "[", "]", "if", "manage_dict", "[", "'shell'", "]", "[", "'banner'", "]", "[", "'enabled'", "]", ":", "msgs", ".", "append", "(", "manage_dict", "[", "'shell'", "]", "[", "'banner'", "]", "[", "'message'", "]", ".", "format", "(", "*", "*", "manage_dict", ")", ")", "if", "auto_imported", "and", "manage_dict", "[", "'shell'", "]", "[", "'auto_import'", "]", "[", "'display'", "]", ":", "auto_imported_names", "=", "[", "key", "for", "key", "in", "auto_imported", ".", "keys", "(", ")", "if", "key", "not", "in", "[", "'__builtins__'", ",", "'builtins'", "]", "]", "msgs", ".", "append", "(", "'\\tAuto imported: {0}\\n'", ".", "format", "(", "auto_imported_names", ")", ")", "banner_msg", "=", "u'\\n'", ".", "join", "(", "msgs", ")", "exec_init", "(", "manage_dict", ",", "_vars", ")", "exec_init_script", "(", "manage_dict", ",", "_vars", ")", "atexit_functions", "=", "[", "import_string", "(", "func_name", ")", "for", "func_name", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'exit_hooks'", ",", "[", "]", ")", "]", "atexit_functions", "+=", "exit_hooks", "or", "[", "]", "for", "atexit_function", "in", "atexit_functions", ":", "atexit", ".", "register", "(", "atexit_function", ")", "if", "console", "==", "'ptpython'", ":", "try", ":", "from", "ptpython", ".", "repl", "import", "embed", "embed", "(", "{", "}", ",", "_vars", ")", "except", "ImportError", ":", "click", ".", "echo", "(", "\"ptpython is not installed!\"", ")", "return", "if", "console", "==", "'bpython'", ":", "try", ":", "from", "bpython", "import", "embed", "embed", "(", "locals_", "=", "_vars", ",", "banner", "=", "banner_msg", ")", "except", "ImportError", ":", "click", ".", "echo", "(", "\"bpython is not installed!\"", ")", "return", "try", ":", "if", "console", "==", "'ipython'", ":", "from", "IPython", "import", "start_ipython", "from", "traitlets", ".", "config", "import", "Config", "c", "=", "Config", "(", ")", "c", ".", "TerminalInteractiveShell", ".", "banner2", "=", "banner_msg", "c", ".", "InteractiveShellApp", ".", "extensions", "=", "[", "extension", "for", "extension", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_extensions'", ",", "[", "]", ")", "]", "c", ".", "InteractiveShellApp", ".", "exec_lines", "=", "[", "exec_line", "for", "exec_line", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_exec_lines'", ",", "[", "]", ")", "]", "if", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_auto_reload'", ",", "True", ")", "is", "True", ":", "c", ".", "InteractiveShellApp", ".", "extensions", ".", "append", "(", "'autoreload'", ")", "c", ".", "InteractiveShellApp", ".", "exec_lines", ".", "append", "(", "'%autoreload 2'", ")", "start_ipython", "(", "argv", "=", "[", "]", ",", "user_ns", "=", "_vars", ",", "config", "=", "c", ")", "else", ":", "raise", "ImportError", "except", "ImportError", ":", "if", "manage_dict", "[", "'shell'", "]", "[", "'readline_enabled'", "]", ":", "import", "readline", "import", "rlcompleter", "readline", ".", "set_completer", "(", "rlcompleter", ".", "Completer", "(", "_vars", ")", ".", "complete", ")", "readline", ".", "parse_and_bind", "(", "'tab: complete'", ")", "shell", "=", "code", ".", "InteractiveConsole", "(", "_vars", ")", "shell", ".", "interact", "(", "banner", "=", "banner_msg", ")" ]
35.679487
0.00035
def pt2leaf(self, x): """ Get the leaf which domain contains x. """ if self.leafnode: return self else: if x[self.split_dim] < self.split_value: return self.lower.pt2leaf(x) else: return self.greater.pt2leaf(x)
[ "def", "pt2leaf", "(", "self", ",", "x", ")", ":", "if", "self", ".", "leafnode", ":", "return", "self", "else", ":", "if", "x", "[", "self", ".", "split_dim", "]", "<", "self", ".", "split_value", ":", "return", "self", ".", "lower", ".", "pt2leaf", "(", "x", ")", "else", ":", "return", "self", ".", "greater", ".", "pt2leaf", "(", "x", ")" ]
26.333333
0.009174
def cmd_init_pull_from_cloud(args): """Initiate the local catalog by downloading the cloud catalog""" (lcat, ccat) = (args.local_catalog, args.cloud_catalog) logging.info("[init-pull-from-cloud]: %s => %s"%(ccat, lcat)) if isfile(lcat): args.error("[init-pull-from-cloud] The local catalog already exist: %s"%lcat) if not isfile(ccat): args.error("[init-pull-from-cloud] The cloud catalog does not exist: %s"%ccat) (lmeta, cmeta) = ("%s.lrcloud"%lcat, "%s.lrcloud"%ccat) if isfile(lmeta): args.error("[init-pull-from-cloud] The local meta-data already exist: %s"%lmeta) if not isfile(cmeta): args.error("[init-pull-from-cloud] The cloud meta-data does not exist: %s"%cmeta) #Let's "lock" the local catalog logging.info("Locking local catalog: %s"%(lcat)) if not lock_file(lcat): raise RuntimeError("The catalog %s is locked!"%lcat) #Copy base from cloud to local util.copy(ccat, lcat) #Apply changesets cloudDAG = ChangesetDAG(ccat) path = cloudDAG.path(cloudDAG.root.hash, cloudDAG.leafs[0].hash) util.apply_changesets(args, path, lcat) # Write meta-data both to local and cloud mfile = MetaFile(lmeta) utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-4] mfile['catalog']['hash'] = hashsum(lcat) mfile['catalog']['modification_utc'] = utcnow mfile['catalog']['filename'] = lcat mfile['last_push']['filename'] = cloudDAG.leafs[0].mfile['changeset']['filename'] mfile['last_push']['hash'] = cloudDAG.leafs[0].mfile['changeset']['hash'] mfile['last_push']['modification_utc'] = cloudDAG.leafs[0].mfile['changeset']['modification_utc'] mfile.flush() #Let's copy Smart Previews if not args.no_smart_previews: copy_smart_previews(lcat, ccat, local2cloud=False) #Finally, let's unlock the catalog files logging.info("Unlocking local catalog: %s"%(lcat)) unlock_file(lcat) logging.info("[init-pull-from-cloud]: Success!")
[ "def", "cmd_init_pull_from_cloud", "(", "args", ")", ":", "(", "lcat", ",", "ccat", ")", "=", "(", "args", ".", "local_catalog", ",", "args", ".", "cloud_catalog", ")", "logging", ".", "info", "(", "\"[init-pull-from-cloud]: %s => %s\"", "%", "(", "ccat", ",", "lcat", ")", ")", "if", "isfile", "(", "lcat", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The local catalog already exist: %s\"", "%", "lcat", ")", "if", "not", "isfile", "(", "ccat", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The cloud catalog does not exist: %s\"", "%", "ccat", ")", "(", "lmeta", ",", "cmeta", ")", "=", "(", "\"%s.lrcloud\"", "%", "lcat", ",", "\"%s.lrcloud\"", "%", "ccat", ")", "if", "isfile", "(", "lmeta", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The local meta-data already exist: %s\"", "%", "lmeta", ")", "if", "not", "isfile", "(", "cmeta", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The cloud meta-data does not exist: %s\"", "%", "cmeta", ")", "#Let's \"lock\" the local catalog", "logging", ".", "info", "(", "\"Locking local catalog: %s\"", "%", "(", "lcat", ")", ")", "if", "not", "lock_file", "(", "lcat", ")", ":", "raise", "RuntimeError", "(", "\"The catalog %s is locked!\"", "%", "lcat", ")", "#Copy base from cloud to local", "util", ".", "copy", "(", "ccat", ",", "lcat", ")", "#Apply changesets", "cloudDAG", "=", "ChangesetDAG", "(", "ccat", ")", "path", "=", "cloudDAG", ".", "path", "(", "cloudDAG", ".", "root", ".", "hash", ",", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "hash", ")", "util", ".", "apply_changesets", "(", "args", ",", "path", ",", "lcat", ")", "# Write meta-data both to local and cloud", "mfile", "=", "MetaFile", "(", "lmeta", ")", "utcnow", "=", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "DATETIME_FORMAT", ")", "[", ":", "-", "4", "]", "mfile", "[", "'catalog'", "]", "[", "'hash'", "]", "=", "hashsum", "(", "lcat", ")", "mfile", "[", "'catalog'", "]", "[", "'modification_utc'", "]", "=", "utcnow", "mfile", "[", "'catalog'", "]", "[", "'filename'", "]", "=", "lcat", "mfile", "[", "'last_push'", "]", "[", "'filename'", "]", "=", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "mfile", "[", "'changeset'", "]", "[", "'filename'", "]", "mfile", "[", "'last_push'", "]", "[", "'hash'", "]", "=", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "mfile", "[", "'changeset'", "]", "[", "'hash'", "]", "mfile", "[", "'last_push'", "]", "[", "'modification_utc'", "]", "=", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "mfile", "[", "'changeset'", "]", "[", "'modification_utc'", "]", "mfile", ".", "flush", "(", ")", "#Let's copy Smart Previews", "if", "not", "args", ".", "no_smart_previews", ":", "copy_smart_previews", "(", "lcat", ",", "ccat", ",", "local2cloud", "=", "False", ")", "#Finally, let's unlock the catalog files", "logging", ".", "info", "(", "\"Unlocking local catalog: %s\"", "%", "(", "lcat", ")", ")", "unlock_file", "(", "lcat", ")", "logging", ".", "info", "(", "\"[init-pull-from-cloud]: Success!\"", ")" ]
39.22
0.010945
def avail_locations(call=None): ''' List all available locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) ret = {} conn = get_conn(service='SoftLayer_Product_Package') locations = conn.getLocations(id=50) for location in locations: ret[location['id']] = { 'id': location['id'], 'name': location['name'], 'location': location['longName'], } available = conn.getAvailableLocations(id=50) for location in available: if location.get('isAvailable', 0) is 0: continue ret[location['locationId']]['available'] = True return ret
[ "def", "avail_locations", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_locations function must be called with '", "'-f or --function, or with the --list-locations option'", ")", "ret", "=", "{", "}", "conn", "=", "get_conn", "(", "service", "=", "'SoftLayer_Product_Package'", ")", "locations", "=", "conn", ".", "getLocations", "(", "id", "=", "50", ")", "for", "location", "in", "locations", ":", "ret", "[", "location", "[", "'id'", "]", "]", "=", "{", "'id'", ":", "location", "[", "'id'", "]", ",", "'name'", ":", "location", "[", "'name'", "]", ",", "'location'", ":", "location", "[", "'longName'", "]", ",", "}", "available", "=", "conn", ".", "getAvailableLocations", "(", "id", "=", "50", ")", "for", "location", "in", "available", ":", "if", "location", ".", "get", "(", "'isAvailable'", ",", "0", ")", "is", "0", ":", "continue", "ret", "[", "location", "[", "'locationId'", "]", "]", "[", "'available'", "]", "=", "True", "return", "ret" ]
27.928571
0.001236
def _CamelCaseToSnakeCase(path_name): """Converts a field name from camelCase to snake_case.""" result = [] for c in path_name: if c == '_': raise ParseError('Fail to parse FieldMask: Path name ' '{0} must not contain "_"s.'.format(path_name)) if c.isupper(): result += '_' result += c.lower() else: result += c return ''.join(result)
[ "def", "_CamelCaseToSnakeCase", "(", "path_name", ")", ":", "result", "=", "[", "]", "for", "c", "in", "path_name", ":", "if", "c", "==", "'_'", ":", "raise", "ParseError", "(", "'Fail to parse FieldMask: Path name '", "'{0} must not contain \"_\"s.'", ".", "format", "(", "path_name", ")", ")", "if", "c", ".", "isupper", "(", ")", ":", "result", "+=", "'_'", "result", "+=", "c", ".", "lower", "(", ")", "else", ":", "result", "+=", "c", "return", "''", ".", "join", "(", "result", ")" ]
29.923077
0.022444
def _is_valid_url(url): """ Helper function to validate that URLs are well formed, i.e that it contains a valid protocol and a valid domain. It does not actually check if the URL exists """ try: parsed = urlparse(url) mandatory_parts = [parsed.scheme, parsed.netloc] return all(mandatory_parts) except: return False
[ "def", "_is_valid_url", "(", "url", ")", ":", "try", ":", "parsed", "=", "urlparse", "(", "url", ")", "mandatory_parts", "=", "[", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", "]", "return", "all", "(", "mandatory_parts", ")", "except", ":", "return", "False" ]
39.8
0.012285
async def fetch(self, method, url, params=None, headers=None, data=None): """Make an HTTP request. Automatically uses configured HTTP proxy, and adds Google authorization header and cookies. Failures will be retried MAX_RETRIES times before raising NetworkError. Args: method (str): Request method. url (str): Request URL. params (dict): (optional) Request query string parameters. headers (dict): (optional) Request headers. data: (str): (optional) Request body data. Returns: FetchResponse: Response data. Raises: NetworkError: If the request fails. """ logger.debug('Sending request %s %s:\n%r', method, url, data) for retry_num in range(MAX_RETRIES): try: async with self.fetch_raw(method, url, params=params, headers=headers, data=data) as res: async with async_timeout.timeout(REQUEST_TIMEOUT): body = await res.read() logger.debug('Received response %d %s:\n%r', res.status, res.reason, body) except asyncio.TimeoutError: error_msg = 'Request timed out' except aiohttp.ServerDisconnectedError as err: error_msg = 'Server disconnected error: {}'.format(err) except (aiohttp.ClientError, ValueError) as err: error_msg = 'Request connection error: {}'.format(err) else: break logger.info('Request attempt %d failed: %s', retry_num, error_msg) else: logger.info('Request failed after %d attempts', MAX_RETRIES) raise exceptions.NetworkError(error_msg) if res.status != 200: logger.info('Request returned unexpected status: %d %s', res.status, res.reason) raise exceptions.NetworkError( 'Request return unexpected status: {}: {}' .format(res.status, res.reason) ) return FetchResponse(res.status, body)
[ "async", "def", "fetch", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "data", "=", "None", ")", ":", "logger", ".", "debug", "(", "'Sending request %s %s:\\n%r'", ",", "method", ",", "url", ",", "data", ")", "for", "retry_num", "in", "range", "(", "MAX_RETRIES", ")", ":", "try", ":", "async", "with", "self", ".", "fetch_raw", "(", "method", ",", "url", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "as", "res", ":", "async", "with", "async_timeout", ".", "timeout", "(", "REQUEST_TIMEOUT", ")", ":", "body", "=", "await", "res", ".", "read", "(", ")", "logger", ".", "debug", "(", "'Received response %d %s:\\n%r'", ",", "res", ".", "status", ",", "res", ".", "reason", ",", "body", ")", "except", "asyncio", ".", "TimeoutError", ":", "error_msg", "=", "'Request timed out'", "except", "aiohttp", ".", "ServerDisconnectedError", "as", "err", ":", "error_msg", "=", "'Server disconnected error: {}'", ".", "format", "(", "err", ")", "except", "(", "aiohttp", ".", "ClientError", ",", "ValueError", ")", "as", "err", ":", "error_msg", "=", "'Request connection error: {}'", ".", "format", "(", "err", ")", "else", ":", "break", "logger", ".", "info", "(", "'Request attempt %d failed: %s'", ",", "retry_num", ",", "error_msg", ")", "else", ":", "logger", ".", "info", "(", "'Request failed after %d attempts'", ",", "MAX_RETRIES", ")", "raise", "exceptions", ".", "NetworkError", "(", "error_msg", ")", "if", "res", ".", "status", "!=", "200", ":", "logger", ".", "info", "(", "'Request returned unexpected status: %d %s'", ",", "res", ".", "status", ",", "res", ".", "reason", ")", "raise", "exceptions", ".", "NetworkError", "(", "'Request return unexpected status: {}: {}'", ".", "format", "(", "res", ".", "status", ",", "res", ".", "reason", ")", ")", "return", "FetchResponse", "(", "res", ".", "status", ",", "body", ")" ]
41.288462
0.00091
def _get_config(**kwargs): ''' Return configuration ''' config = { 'filter_id_regex': ['.*!doc_skip'], 'filter_function_regex': [], 'replace_text_regex': {}, 'proccesser': 'highstate_doc.proccesser_markdown', 'max_render_file_size': 10000, 'note': None } if '__salt__' in globals(): config_key = '{0}.config'.format(__virtualname__) config.update(__salt__['config.get'](config_key, {})) # pylint: disable=C0201 for k in set(config.keys()) & set(kwargs.keys()): config[k] = kwargs[k] return config
[ "def", "_get_config", "(", "*", "*", "kwargs", ")", ":", "config", "=", "{", "'filter_id_regex'", ":", "[", "'.*!doc_skip'", "]", ",", "'filter_function_regex'", ":", "[", "]", ",", "'replace_text_regex'", ":", "{", "}", ",", "'proccesser'", ":", "'highstate_doc.proccesser_markdown'", ",", "'max_render_file_size'", ":", "10000", ",", "'note'", ":", "None", "}", "if", "'__salt__'", "in", "globals", "(", ")", ":", "config_key", "=", "'{0}.config'", ".", "format", "(", "__virtualname__", ")", "config", ".", "update", "(", "__salt__", "[", "'config.get'", "]", "(", "config_key", ",", "{", "}", ")", ")", "# pylint: disable=C0201", "for", "k", "in", "set", "(", "config", ".", "keys", "(", ")", ")", "&", "set", "(", "kwargs", ".", "keys", "(", ")", ")", ":", "config", "[", "k", "]", "=", "kwargs", "[", "k", "]", "return", "config" ]
30.842105
0.001656
def load_json(json_file, **kwargs): """ Open and load data from a JSON file .. code:: python reusables.load_json("example.json") # {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}} :param json_file: Path to JSON file as string :param kwargs: Additional arguments for the json.load command :return: Dictionary """ with open(json_file) as f: return json.load(f, **kwargs)
[ "def", "load_json", "(", "json_file", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "json_file", ")", "as", "f", ":", "return", "json", ".", "load", "(", "f", ",", "*", "*", "kwargs", ")" ]
28.266667
0.002283
def generate_words(files): """ Transform list of files to list of words, removing new line character and replace name entity '<NE>...</NE>' and abbreviation '<AB>...</AB>' symbol """ repls = {'<NE>' : '','</NE>' : '','<AB>': '','</AB>': ''} words_all = [] for i, file in enumerate(files): lines = open(file, 'r') for line in lines: line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line) words = [word for word in line.split("|") if word is not '\n'] words_all.extend(words) return words_all
[ "def", "generate_words", "(", "files", ")", ":", "repls", "=", "{", "'<NE>'", ":", "''", ",", "'</NE>'", ":", "''", ",", "'<AB>'", ":", "''", ",", "'</AB>'", ":", "''", "}", "words_all", "=", "[", "]", "for", "i", ",", "file", "in", "enumerate", "(", "files", ")", ":", "lines", "=", "open", "(", "file", ",", "'r'", ")", "for", "line", "in", "lines", ":", "line", "=", "reduce", "(", "lambda", "a", ",", "kv", ":", "a", ".", "replace", "(", "*", "kv", ")", ",", "repls", ".", "items", "(", ")", ",", "line", ")", "words", "=", "[", "word", "for", "word", "in", "line", ".", "split", "(", "\"|\"", ")", "if", "word", "is", "not", "'\\n'", "]", "words_all", ".", "extend", "(", "words", ")", "return", "words_all" ]
33.764706
0.011864
def message(self, message=None): """ Set response message """ if message is not None: self.response_model.message = message return self.response_model.message
[ "def", "message", "(", "self", ",", "message", "=", "None", ")", ":", "if", "message", "is", "not", "None", ":", "self", ".", "response_model", ".", "message", "=", "message", "return", "self", ".", "response_model", ".", "message" ]
38.8
0.010101
def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() if not '{}' in args.output: raise argparse.ArgumentError(args.output, 'The output argument string must contain the sequence "{}".') return args
[ "def", "getArguments", "(", "parser", ")", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "not", "'{}'", "in", "args", ".", "output", ":", "raise", "argparse", ".", "ArgumentError", "(", "args", ".", "output", ",", "'The output argument string must contain the sequence \"{}\".'", ")", "return", "args" ]
48
0.010239
def srfrec(body, longitude, latitude): """ Convert planetocentric latitude and longitude of a surface point on a specified body to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfrec_c.html :param body: NAIF integer code of an extended body. :type body: int :param longitude: Longitude of point in radians. :type longitude: float :param latitude: Latitude of point in radians. :type latitude: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ body = ctypes.c_int(body) longitude = ctypes.c_double(longitude) latitude = ctypes.c_double(latitude) rectan = stypes.emptyDoubleVector(3) libspice.srfrec_c(body, longitude, latitude, rectan) return stypes.cVectorToPython(rectan)
[ "def", "srfrec", "(", "body", ",", "longitude", ",", "latitude", ")", ":", "body", "=", "ctypes", ".", "c_int", "(", "body", ")", "longitude", "=", "ctypes", ".", "c_double", "(", "longitude", ")", "latitude", "=", "ctypes", ".", "c_double", "(", "latitude", ")", "rectan", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "libspice", ".", "srfrec_c", "(", "body", ",", "longitude", ",", "latitude", ",", "rectan", ")", "return", "stypes", ".", "cVectorToPython", "(", "rectan", ")" ]
36.636364
0.001209
def connected_components(G): """ Check if G is connected and return list of sets. Every set contains all vertices in one connected component. """ result = [] vertices = set(G.vertices) while vertices: n = vertices.pop() group = {n} queue = Queue() queue.put(n) while not queue.empty(): n = queue.get() neighbors = set(G.vertices[n]) neighbors.difference_update(group) vertices.difference_update(neighbors) group.update(neighbors) for element in neighbors: queue.put(element) result.append(group) return result
[ "def", "connected_components", "(", "G", ")", ":", "result", "=", "[", "]", "vertices", "=", "set", "(", "G", ".", "vertices", ")", "while", "vertices", ":", "n", "=", "vertices", ".", "pop", "(", ")", "group", "=", "{", "n", "}", "queue", "=", "Queue", "(", ")", "queue", ".", "put", "(", "n", ")", "while", "not", "queue", ".", "empty", "(", ")", ":", "n", "=", "queue", ".", "get", "(", ")", "neighbors", "=", "set", "(", "G", ".", "vertices", "[", "n", "]", ")", "neighbors", ".", "difference_update", "(", "group", ")", "vertices", ".", "difference_update", "(", "neighbors", ")", "group", ".", "update", "(", "neighbors", ")", "for", "element", "in", "neighbors", ":", "queue", ".", "put", "(", "element", ")", "result", ".", "append", "(", "group", ")", "return", "result" ]
30.772727
0.001433
def fuse_list( mafs ): """ Try to fuse a list of blocks by progressively fusing each adjacent pair. """ last = None for m in mafs: if last is None: last = m else: fused = fuse( last, m ) if fused: last = fused else: yield last last = m if last: yield last
[ "def", "fuse_list", "(", "mafs", ")", ":", "last", "=", "None", "for", "m", "in", "mafs", ":", "if", "last", "is", "None", ":", "last", "=", "m", "else", ":", "fused", "=", "fuse", "(", "last", ",", "m", ")", "if", "fused", ":", "last", "=", "fused", "else", ":", "yield", "last", "last", "=", "m", "if", "last", ":", "yield", "last" ]
22.529412
0.012531
def replace(input, **params): """ Replaces field value :param input: :param params: :return: """ PARAM_REPLACE_LIST = 'replace' REPLACE_FIELD = 'field' REPLACE_FIND_VALUE = 'value.to_find' REPLACE_WITH_VALUE = 'value.replace_with' replace_list = params.get(PARAM_REPLACE_LIST) for row in input: for replace in replace_list: if row[replace[REPLACE_FIELD]] == replace[REPLACE_FIND_VALUE]: row[replace[REPLACE_FIELD]] = replace[REPLACE_WITH_VALUE] return input
[ "def", "replace", "(", "input", ",", "*", "*", "params", ")", ":", "PARAM_REPLACE_LIST", "=", "'replace'", "REPLACE_FIELD", "=", "'field'", "REPLACE_FIND_VALUE", "=", "'value.to_find'", "REPLACE_WITH_VALUE", "=", "'value.replace_with'", "replace_list", "=", "params", ".", "get", "(", "PARAM_REPLACE_LIST", ")", "for", "row", "in", "input", ":", "for", "replace", "in", "replace_list", ":", "if", "row", "[", "replace", "[", "REPLACE_FIELD", "]", "]", "==", "replace", "[", "REPLACE_FIND_VALUE", "]", ":", "row", "[", "replace", "[", "REPLACE_FIELD", "]", "]", "=", "replace", "[", "REPLACE_WITH_VALUE", "]", "return", "input" ]
29.388889
0.001832
def print_map(self): """Open impact report dialog used to tune report when printing.""" # Check if selected layer is valid impact_layer = self.iface.activeLayer() if impact_layer is None: # noinspection PyCallByClass,PyTypeChecker QMessageBox.warning( self, self.tr('InaSAFE'), self.tr('Please select a valid impact layer before ' 'trying to print.')) return # Get output path from datastore # Fetch report for pdfs report report_path = os.path.dirname(impact_layer.source()) # Get the hazard and exposure definition used in current IF hazard = definition( QgsExpressionContextUtils.projectScope( QgsProject.instance()).variable( 'hazard_keywords__hazard') ) exposure = definition( QgsExpressionContextUtils.projectScope( QgsProject.instance()).variable( 'exposure_keywords__exposure') ) # TODO: temporary hack until Impact Function becomes serializable # need to have impact report standard_impact_report_metadata = ReportMetadata( metadata_dict=standard_impact_report_metadata_pdf) standard_map_report_metadata = ReportMetadata( metadata_dict=update_template_component( component=map_report, hazard=hazard, exposure=exposure )) standard_infographic_report_metadata = ReportMetadata( metadata_dict=update_template_component(infographic_report)) standard_report_metadata = [ standard_impact_report_metadata, standard_map_report_metadata, standard_infographic_report_metadata ] def retrieve_components(tags): products = [] for report_metadata in standard_report_metadata: products += (report_metadata.component_by_tags(tags)) return products def retrieve_paths(products, suffix=None): paths = [] for c in products: path = ImpactReport.absolute_output_path( os.path.join(report_path, 'output'), products, c.key) if isinstance(path, list): for p in path: paths.append(p) elif isinstance(path, dict): for p in list(path.values()): paths.append(p) else: paths.append(path) if suffix: paths = [p for p in paths if p.endswith(suffix)] paths = [p for p in paths if os.path.exists(p)] return paths def wrap_output_paths(paths): """Make sure the file paths can wrap nicely.""" return [p.replace(os.sep, '<wbr>' + os.sep) for p in paths] pdf_products = retrieve_components( [final_product_tag, pdf_product_tag]) pdf_output_paths = retrieve_paths(pdf_products, '.pdf') html_products = retrieve_components( [final_product_tag, html_product_tag]) html_output_paths = retrieve_paths(html_products, '.html') qpt_products = retrieve_components( [final_product_tag, qpt_product_tag]) qpt_output_paths = retrieve_paths(qpt_products, '.qpt') # create message to user status = m.Message( m.Heading(self.tr('Map Creator'), **INFO_STYLE), m.Paragraph(self.tr( 'Your PDF was created....opening using the default PDF ' 'viewer on your system.')), m.ImportantText(self.tr( 'The generated pdfs were saved ' 'as:'))) for path in wrap_output_paths(pdf_output_paths): status.add(m.Paragraph(path)) status.add(m.Paragraph( m.ImportantText(self.tr('The generated htmls were saved as:')))) for path in wrap_output_paths(html_output_paths): status.add(m.Paragraph(path)) status.add(m.Paragraph( m.ImportantText(self.tr('The generated qpts were saved as:')))) for path in wrap_output_paths(qpt_output_paths): status.add(m.Paragraph(path)) send_static_message(self, status) for path in pdf_output_paths: # noinspection PyCallByClass,PyTypeChecker,PyTypeChecker QDesktopServices.openUrl(QUrl.fromLocalFile(path))
[ "def", "print_map", "(", "self", ")", ":", "# Check if selected layer is valid", "impact_layer", "=", "self", ".", "iface", ".", "activeLayer", "(", ")", "if", "impact_layer", "is", "None", ":", "# noinspection PyCallByClass,PyTypeChecker", "QMessageBox", ".", "warning", "(", "self", ",", "self", ".", "tr", "(", "'InaSAFE'", ")", ",", "self", ".", "tr", "(", "'Please select a valid impact layer before '", "'trying to print.'", ")", ")", "return", "# Get output path from datastore", "# Fetch report for pdfs report", "report_path", "=", "os", ".", "path", ".", "dirname", "(", "impact_layer", ".", "source", "(", ")", ")", "# Get the hazard and exposure definition used in current IF", "hazard", "=", "definition", "(", "QgsExpressionContextUtils", ".", "projectScope", "(", "QgsProject", ".", "instance", "(", ")", ")", ".", "variable", "(", "'hazard_keywords__hazard'", ")", ")", "exposure", "=", "definition", "(", "QgsExpressionContextUtils", ".", "projectScope", "(", "QgsProject", ".", "instance", "(", ")", ")", ".", "variable", "(", "'exposure_keywords__exposure'", ")", ")", "# TODO: temporary hack until Impact Function becomes serializable", "# need to have impact report", "standard_impact_report_metadata", "=", "ReportMetadata", "(", "metadata_dict", "=", "standard_impact_report_metadata_pdf", ")", "standard_map_report_metadata", "=", "ReportMetadata", "(", "metadata_dict", "=", "update_template_component", "(", "component", "=", "map_report", ",", "hazard", "=", "hazard", ",", "exposure", "=", "exposure", ")", ")", "standard_infographic_report_metadata", "=", "ReportMetadata", "(", "metadata_dict", "=", "update_template_component", "(", "infographic_report", ")", ")", "standard_report_metadata", "=", "[", "standard_impact_report_metadata", ",", "standard_map_report_metadata", ",", "standard_infographic_report_metadata", "]", "def", "retrieve_components", "(", "tags", ")", ":", "products", "=", "[", "]", "for", "report_metadata", "in", "standard_report_metadata", ":", "products", "+=", "(", "report_metadata", ".", "component_by_tags", "(", "tags", ")", ")", "return", "products", "def", "retrieve_paths", "(", "products", ",", "suffix", "=", "None", ")", ":", "paths", "=", "[", "]", "for", "c", "in", "products", ":", "path", "=", "ImpactReport", ".", "absolute_output_path", "(", "os", ".", "path", ".", "join", "(", "report_path", ",", "'output'", ")", ",", "products", ",", "c", ".", "key", ")", "if", "isinstance", "(", "path", ",", "list", ")", ":", "for", "p", "in", "path", ":", "paths", ".", "append", "(", "p", ")", "elif", "isinstance", "(", "path", ",", "dict", ")", ":", "for", "p", "in", "list", "(", "path", ".", "values", "(", ")", ")", ":", "paths", ".", "append", "(", "p", ")", "else", ":", "paths", ".", "append", "(", "path", ")", "if", "suffix", ":", "paths", "=", "[", "p", "for", "p", "in", "paths", "if", "p", ".", "endswith", "(", "suffix", ")", "]", "paths", "=", "[", "p", "for", "p", "in", "paths", "if", "os", ".", "path", ".", "exists", "(", "p", ")", "]", "return", "paths", "def", "wrap_output_paths", "(", "paths", ")", ":", "\"\"\"Make sure the file paths can wrap nicely.\"\"\"", "return", "[", "p", ".", "replace", "(", "os", ".", "sep", ",", "'<wbr>'", "+", "os", ".", "sep", ")", "for", "p", "in", "paths", "]", "pdf_products", "=", "retrieve_components", "(", "[", "final_product_tag", ",", "pdf_product_tag", "]", ")", "pdf_output_paths", "=", "retrieve_paths", "(", "pdf_products", ",", "'.pdf'", ")", "html_products", "=", "retrieve_components", "(", "[", "final_product_tag", ",", "html_product_tag", "]", ")", "html_output_paths", "=", "retrieve_paths", "(", "html_products", ",", "'.html'", ")", "qpt_products", "=", "retrieve_components", "(", "[", "final_product_tag", ",", "qpt_product_tag", "]", ")", "qpt_output_paths", "=", "retrieve_paths", "(", "qpt_products", ",", "'.qpt'", ")", "# create message to user", "status", "=", "m", ".", "Message", "(", "m", ".", "Heading", "(", "self", ".", "tr", "(", "'Map Creator'", ")", ",", "*", "*", "INFO_STYLE", ")", ",", "m", ".", "Paragraph", "(", "self", ".", "tr", "(", "'Your PDF was created....opening using the default PDF '", "'viewer on your system.'", ")", ")", ",", "m", ".", "ImportantText", "(", "self", ".", "tr", "(", "'The generated pdfs were saved '", "'as:'", ")", ")", ")", "for", "path", "in", "wrap_output_paths", "(", "pdf_output_paths", ")", ":", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "path", ")", ")", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "m", ".", "ImportantText", "(", "self", ".", "tr", "(", "'The generated htmls were saved as:'", ")", ")", ")", ")", "for", "path", "in", "wrap_output_paths", "(", "html_output_paths", ")", ":", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "path", ")", ")", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "m", ".", "ImportantText", "(", "self", ".", "tr", "(", "'The generated qpts were saved as:'", ")", ")", ")", ")", "for", "path", "in", "wrap_output_paths", "(", "qpt_output_paths", ")", ":", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "path", ")", ")", "send_static_message", "(", "self", ",", "status", ")", "for", "path", "in", "pdf_output_paths", ":", "# noinspection PyCallByClass,PyTypeChecker,PyTypeChecker", "QDesktopServices", ".", "openUrl", "(", "QUrl", ".", "fromLocalFile", "(", "path", ")", ")" ]
37.239669
0.000432
def deploy( config, name, bucket, timeout, memory, description, subnet_ids, security_group_ids ): """ Deploy/Update a function from a project directory """ # options should override config if it is there myname = name or config.name mybucket = bucket or config.bucket mytimeout = timeout or config.timeout mymemory = memory or config.memory mydescription = description or config.description mysubnet_ids = subnet_ids or config.subnet_ids mysecurity_group_ids = security_group_ids or config.security_group_ids vpc_config = {} if mysubnet_ids and mysecurity_group_ids: vpc_config = { 'SubnetIds': mysubnet_ids.split(','), 'SecurityGroupIds': mysecurity_group_ids.split(',') } click.echo('Deploying {} to {}'.format(myname, mybucket)) lambder.deploy_function( myname, mybucket, mytimeout, mymemory, mydescription, vpc_config )
[ "def", "deploy", "(", "config", ",", "name", ",", "bucket", ",", "timeout", ",", "memory", ",", "description", ",", "subnet_ids", ",", "security_group_ids", ")", ":", "# options should override config if it is there", "myname", "=", "name", "or", "config", ".", "name", "mybucket", "=", "bucket", "or", "config", ".", "bucket", "mytimeout", "=", "timeout", "or", "config", ".", "timeout", "mymemory", "=", "memory", "or", "config", ".", "memory", "mydescription", "=", "description", "or", "config", ".", "description", "mysubnet_ids", "=", "subnet_ids", "or", "config", ".", "subnet_ids", "mysecurity_group_ids", "=", "security_group_ids", "or", "config", ".", "security_group_ids", "vpc_config", "=", "{", "}", "if", "mysubnet_ids", "and", "mysecurity_group_ids", ":", "vpc_config", "=", "{", "'SubnetIds'", ":", "mysubnet_ids", ".", "split", "(", "','", ")", ",", "'SecurityGroupIds'", ":", "mysecurity_group_ids", ".", "split", "(", "','", ")", "}", "click", ".", "echo", "(", "'Deploying {} to {}'", ".", "format", "(", "myname", ",", "mybucket", ")", ")", "lambder", ".", "deploy_function", "(", "myname", ",", "mybucket", ",", "mytimeout", ",", "mymemory", ",", "mydescription", ",", "vpc_config", ")" ]
26.833333
0.000999
def cleanup_dataset(dataset, data_home=None, ext=".zip"): """ Removes the dataset directory and archive file from the data home directory. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- removed : int The number of objects removed from data_home. """ removed = 0 data_home = get_data_home(data_home) # Paths to remove datadir = os.path.join(data_home, dataset) archive = os.path.join(data_home, dataset+ext) # Remove directory and contents if os.path.exists(datadir): shutil.rmtree(datadir) removed += 1 # Remove the archive file if os.path.exists(archive): os.remove(archive) removed += 1 return removed
[ "def", "cleanup_dataset", "(", "dataset", ",", "data_home", "=", "None", ",", "ext", "=", "\".zip\"", ")", ":", "removed", "=", "0", "data_home", "=", "get_data_home", "(", "data_home", ")", "# Paths to remove", "datadir", "=", "os", ".", "path", ".", "join", "(", "data_home", ",", "dataset", ")", "archive", "=", "os", ".", "path", ".", "join", "(", "data_home", ",", "dataset", "+", "ext", ")", "# Remove directory and contents", "if", "os", ".", "path", ".", "exists", "(", "datadir", ")", ":", "shutil", ".", "rmtree", "(", "datadir", ")", "removed", "+=", "1", "# Remove the archive file", "if", "os", ".", "path", ".", "exists", "(", "archive", ")", ":", "os", ".", "remove", "(", "archive", ")", "removed", "+=", "1", "return", "removed" ]
27.35
0.001765
def submit(self, subreddit, title, text=None, url=None, captcha=None, save=None, send_replies=None, resubmit=None, **kwargs): """Submit a new link to the given subreddit. Accepts either a Subreddit object or a str containing the subreddit's display name. :param resubmit: If True, submit the link even if it has already been submitted. :param save: If True the new Submission will be saved after creation. :param send_replies: If True, inbox replies will be received when people comment on the submission. If set to None, the default of True for text posts and False for link posts will be used. :returns: The newly created Submission object if the reddit instance can access it. Otherwise, return the url to the submission. This function may result in a captcha challenge. PRAW will automatically prompt you for a response. See :ref:`handling-captchas` if you want to manually handle captchas. """ if isinstance(text, six.string_types) == bool(url): raise TypeError('One (and only one) of text or url is required!') data = {'sr': six.text_type(subreddit), 'title': title} if text or text == '': data['kind'] = 'self' data['text'] = text else: data['kind'] = 'link' data['url'] = url if captcha: data.update(captcha) if resubmit is not None: data['resubmit'] = resubmit if save is not None: data['save'] = save if send_replies is not None: data['sendreplies'] = send_replies result = self.request_json(self.config['submit'], data=data, retry_on_error=False) url = result['data']['url'] # Clear the OAuth setting when attempting to fetch the submission if self._use_oauth: self._use_oauth = False if url.startswith(self.config.oauth_url): url = self.config.api_url + url[len(self.config.oauth_url):] try: return self.get_submission(url) except errors.Forbidden: # While the user may be able to submit to a subreddit, # that does not guarantee they have read access. return url
[ "def", "submit", "(", "self", ",", "subreddit", ",", "title", ",", "text", "=", "None", ",", "url", "=", "None", ",", "captcha", "=", "None", ",", "save", "=", "None", ",", "send_replies", "=", "None", ",", "resubmit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "text", ",", "six", ".", "string_types", ")", "==", "bool", "(", "url", ")", ":", "raise", "TypeError", "(", "'One (and only one) of text or url is required!'", ")", "data", "=", "{", "'sr'", ":", "six", ".", "text_type", "(", "subreddit", ")", ",", "'title'", ":", "title", "}", "if", "text", "or", "text", "==", "''", ":", "data", "[", "'kind'", "]", "=", "'self'", "data", "[", "'text'", "]", "=", "text", "else", ":", "data", "[", "'kind'", "]", "=", "'link'", "data", "[", "'url'", "]", "=", "url", "if", "captcha", ":", "data", ".", "update", "(", "captcha", ")", "if", "resubmit", "is", "not", "None", ":", "data", "[", "'resubmit'", "]", "=", "resubmit", "if", "save", "is", "not", "None", ":", "data", "[", "'save'", "]", "=", "save", "if", "send_replies", "is", "not", "None", ":", "data", "[", "'sendreplies'", "]", "=", "send_replies", "result", "=", "self", ".", "request_json", "(", "self", ".", "config", "[", "'submit'", "]", ",", "data", "=", "data", ",", "retry_on_error", "=", "False", ")", "url", "=", "result", "[", "'data'", "]", "[", "'url'", "]", "# Clear the OAuth setting when attempting to fetch the submission", "if", "self", ".", "_use_oauth", ":", "self", ".", "_use_oauth", "=", "False", "if", "url", ".", "startswith", "(", "self", ".", "config", ".", "oauth_url", ")", ":", "url", "=", "self", ".", "config", ".", "api_url", "+", "url", "[", "len", "(", "self", ".", "config", ".", "oauth_url", ")", ":", "]", "try", ":", "return", "self", ".", "get_submission", "(", "url", ")", "except", "errors", ".", "Forbidden", ":", "# While the user may be able to submit to a subreddit,", "# that does not guarantee they have read access.", "return", "url" ]
43.277778
0.001255
def start(self): """ Find the first data entry and prepare to parse. """ while not self.is_start(self.current_tag): self.next() self.new_entry()
[ "def", "start", "(", "self", ")", ":", "while", "not", "self", ".", "is_start", "(", "self", ".", "current_tag", ")", ":", "self", ".", "next", "(", ")", "self", ".", "new_entry", "(", ")" ]
23.75
0.010152
def all_options(self): """Returns the set of all options used in all export entries""" items = chain.from_iterable(hosts.values() for hosts in self.data.values()) return set(chain.from_iterable(items))
[ "def", "all_options", "(", "self", ")", ":", "items", "=", "chain", ".", "from_iterable", "(", "hosts", ".", "values", "(", ")", "for", "hosts", "in", "self", ".", "data", ".", "values", "(", ")", ")", "return", "set", "(", "chain", ".", "from_iterable", "(", "items", ")", ")" ]
55.5
0.013333
def find_visible_elements(driver, selector, by=By.CSS_SELECTOR): """ Finds all WebElements that match a selector and are visible. Similar to webdriver.find_elements. @Params driver - the webdriver object (required) selector - the locator that is used to search the DOM (required) by - the method to search for the locator (Default: By.CSS_SELECTOR) """ elements = driver.find_elements(by=by, value=selector) return [element for element in elements if element.is_displayed()]
[ "def", "find_visible_elements", "(", "driver", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ")", ":", "elements", "=", "driver", ".", "find_elements", "(", "by", "=", "by", ",", "value", "=", "selector", ")", "return", "[", "element", "for", "element", "in", "elements", "if", "element", ".", "is_displayed", "(", ")", "]" ]
45.818182
0.001946
def global_items(self): """Iterate over (key, value) pairs in the ``globals`` table.""" for (k, v) in self.sql('global_dump'): yield (self.unpack(k), self.unpack(v))
[ "def", "global_items", "(", "self", ")", ":", "for", "(", "k", ",", "v", ")", "in", "self", ".", "sql", "(", "'global_dump'", ")", ":", "yield", "(", "self", ".", "unpack", "(", "k", ")", ",", "self", ".", "unpack", "(", "v", ")", ")" ]
47.5
0.010363
def parse_java_version(cls, version): """Parses the java version (given a string or Revision object). Handles java version-isms, converting things like '7' -> '1.7' appropriately. Truncates input versions down to just the major and minor numbers (eg, 1.6), ignoring extra versioning information after the second number. :param version: the input version, given as a string or Revision object. :return: the parsed and cleaned version, suitable as a javac -source or -target argument. :rtype: Revision """ conversion = {str(i): '1.{}'.format(i) for i in cls.SUPPORTED_CONVERSION_VERSIONS} if str(version) in conversion: return Revision.lenient(conversion[str(version)]) if not hasattr(version, 'components'): version = Revision.lenient(version) if len(version.components) <= 2: return version return Revision(*version.components[:2])
[ "def", "parse_java_version", "(", "cls", ",", "version", ")", ":", "conversion", "=", "{", "str", "(", "i", ")", ":", "'1.{}'", ".", "format", "(", "i", ")", "for", "i", "in", "cls", ".", "SUPPORTED_CONVERSION_VERSIONS", "}", "if", "str", "(", "version", ")", "in", "conversion", ":", "return", "Revision", ".", "lenient", "(", "conversion", "[", "str", "(", "version", ")", "]", ")", "if", "not", "hasattr", "(", "version", ",", "'components'", ")", ":", "version", "=", "Revision", ".", "lenient", "(", "version", ")", "if", "len", "(", "version", ".", "components", ")", "<=", "2", ":", "return", "version", "return", "Revision", "(", "*", "version", ".", "components", "[", ":", "2", "]", ")" ]
42.142857
0.00884
def drop_table(self, table): """ Drop a table from the MyDB context. ## Arguments * `table` (str): The name of the table to drop. """ job_id = self.submit("DROP TABLE %s"%table, context="MYDB") status = self.monitor(job_id) if status[0] != 5: raise Exception("Couldn't drop table %s"%table)
[ "def", "drop_table", "(", "self", ",", "table", ")", ":", "job_id", "=", "self", ".", "submit", "(", "\"DROP TABLE %s\"", "%", "table", ",", "context", "=", "\"MYDB\"", ")", "status", "=", "self", ".", "monitor", "(", "job_id", ")", "if", "status", "[", "0", "]", "!=", "5", ":", "raise", "Exception", "(", "\"Couldn't drop table %s\"", "%", "table", ")" ]
27.461538
0.01084
def plfit_lsq(x,y): """ Returns A and B in y=Ax^B http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html """ n = len(x) btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum() bbottom = n*(log(x)**2).sum() - (log(x).sum())**2 b = btop / bbottom a = ( log(y).sum() - b * log(x).sum() ) / n A = exp(a) return A,b
[ "def", "plfit_lsq", "(", "x", ",", "y", ")", ":", "n", "=", "len", "(", "x", ")", "btop", "=", "n", "*", "(", "log", "(", "x", ")", "*", "log", "(", "y", ")", ")", ".", "sum", "(", ")", "-", "(", "log", "(", "x", ")", ")", ".", "sum", "(", ")", "*", "(", "log", "(", "y", ")", ")", ".", "sum", "(", ")", "bbottom", "=", "n", "*", "(", "log", "(", "x", ")", "**", "2", ")", ".", "sum", "(", ")", "-", "(", "log", "(", "x", ")", ".", "sum", "(", ")", ")", "**", "2", "b", "=", "btop", "/", "bbottom", "a", "=", "(", "log", "(", "y", ")", ".", "sum", "(", ")", "-", "b", "*", "log", "(", "x", ")", ".", "sum", "(", ")", ")", "/", "n", "A", "=", "exp", "(", "a", ")", "return", "A", ",", "b" ]
27.615385
0.013477
async def validate(state, holdout_glob): """Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games. """ if not glob.glob(holdout_glob): print('Glob "{}" didn\'t match any files, skipping validation'.format( holdout_glob)) else: await run( 'python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')), '--work_dir={}'.format(fsdb.working_dir()))
[ "async", "def", "validate", "(", "state", ",", "holdout_glob", ")", ":", "if", "not", "glob", ".", "glob", "(", "holdout_glob", ")", ":", "print", "(", "'Glob \"{}\" didn\\'t match any files, skipping validation'", ".", "format", "(", "holdout_glob", ")", ")", "else", ":", "await", "run", "(", "'python3'", ",", "'validate.py'", ",", "holdout_glob", ",", "'--flagfile={}'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "flags_dir", ",", "'validate.flags'", ")", ")", ",", "'--work_dir={}'", ".", "format", "(", "fsdb", ".", "working_dir", "(", ")", ")", ")" ]
32.875
0.009242
def create_pywbem_ssl_context(): """ Create an SSL context based on what is commonly accepted as the required limitations. This code attempts to create the same context for Python 2 and Python 3 except for the ciphers This list is based on what is currently defined in the Python SSL module create_default_context function This includes: * Disallow SSLV2 and SSLV3 * Allow TLSV1 TLSV1.1, TLSV1.2 * No compression * Single DH Use and Single ECDH use cacerts info is set independently so is not part of our context setter. """ if six.PY2: context = SSL.Context('sslv23') # Many of the flags are not in the M2Crypto source so they were taken # from OpenSSL SSL.h module as flags. SSL.context.set_options(SSL.SSL_OP_NO_SSLv2 | 0x02000000 | # OP_NO_SSLV3 0x00020000 | # OP_NO_COMPRESSION 0x00100000 | # OP_SINGLE_DH_USE 0x00400000 | # OP_CIPHER_SERVER_PREFERENCE 0x00080000) # OP_SINGLE_ECDH_USE else: # The choice for the Python SSL module is whether to use the # create_default directly and possibly have different limits depending # on which version of Python you use or to set the attributes # directly based on a currently used SSL context = SSL.create_default_context(purpose=SSL.Purpose.CLIENT_AUTH) # Variable settings per SSL create_default_context. These are what # the function above sets for Python 3.4 # context = SSLContext(PROTOCOL_SSLv23) # context.options |= OP_NO_SSLv2 # context.options |= OP_NO_SSLv3 # context.options |= getattr(SSL, "OP_NO_COMPRESSION", 0) # context.options |= getattr(SSL, "OP_CIPHER_SERVER_PREFERENCE", 0) # context.options |= getattr(SSL, "OP_SINGLE_DH_USE", 0) # context.options |= getattr(SSL, "OP_SINGLE_ECDH_USE", 0) # context.set_ciphers(_RESTRICTED_SERVER_CIPHERS) return context
[ "def", "create_pywbem_ssl_context", "(", ")", ":", "if", "six", ".", "PY2", ":", "context", "=", "SSL", ".", "Context", "(", "'sslv23'", ")", "# Many of the flags are not in the M2Crypto source so they were taken", "# from OpenSSL SSL.h module as flags.", "SSL", ".", "context", ".", "set_options", "(", "SSL", ".", "SSL_OP_NO_SSLv2", "|", "0x02000000", "|", "# OP_NO_SSLV3", "0x00020000", "|", "# OP_NO_COMPRESSION", "0x00100000", "|", "# OP_SINGLE_DH_USE", "0x00400000", "|", "# OP_CIPHER_SERVER_PREFERENCE", "0x00080000", ")", "# OP_SINGLE_ECDH_USE", "else", ":", "# The choice for the Python SSL module is whether to use the", "# create_default directly and possibly have different limits depending", "# on which version of Python you use or to set the attributes", "# directly based on a currently used SSL", "context", "=", "SSL", ".", "create_default_context", "(", "purpose", "=", "SSL", ".", "Purpose", ".", "CLIENT_AUTH", ")", "# Variable settings per SSL create_default_context. These are what", "# the function above sets for Python 3.4", "# context = SSLContext(PROTOCOL_SSLv23)", "# context.options |= OP_NO_SSLv2", "# context.options |= OP_NO_SSLv3", "# context.options |= getattr(SSL, \"OP_NO_COMPRESSION\", 0)", "# context.options |= getattr(SSL, \"OP_CIPHER_SERVER_PREFERENCE\", 0)", "# context.options |= getattr(SSL, \"OP_SINGLE_DH_USE\", 0)", "# context.options |= getattr(SSL, \"OP_SINGLE_ECDH_USE\", 0)", "# context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)", "return", "context" ]
48.113636
0.000463
def indexer_receiver(sender, json=None, record=None, index=None, **dummy_kwargs): """Connect to before_record_index signal to transform record for ES.""" if index and index.startswith('grants-'): # Generate suggest field suggestions = [ json.get('code'), json.get('acronym'), json.get('title') ] json['suggest'] = { 'input': [s for s in suggestions if s], 'output': json['title'], 'context': { 'funder': [json['funder']['doi']] }, 'payload': { 'id': json['internal_id'], 'legacy_id': (json['code'] if json.get('program') == 'FP7' else json['internal_id']), 'code': json['code'], 'title': json['title'], 'acronym': json.get('acronym'), 'program': json.get('program'), }, } elif index and index.startswith('funders-'): # Generate suggest field suggestions = json.get('acronyms', []) + [json.get('name')] json['suggest'] = { 'input': [s for s in suggestions if s], 'output': json['name'], 'payload': { 'id': json['doi'] }, }
[ "def", "indexer_receiver", "(", "sender", ",", "json", "=", "None", ",", "record", "=", "None", ",", "index", "=", "None", ",", "*", "*", "dummy_kwargs", ")", ":", "if", "index", "and", "index", ".", "startswith", "(", "'grants-'", ")", ":", "# Generate suggest field", "suggestions", "=", "[", "json", ".", "get", "(", "'code'", ")", ",", "json", ".", "get", "(", "'acronym'", ")", ",", "json", ".", "get", "(", "'title'", ")", "]", "json", "[", "'suggest'", "]", "=", "{", "'input'", ":", "[", "s", "for", "s", "in", "suggestions", "if", "s", "]", ",", "'output'", ":", "json", "[", "'title'", "]", ",", "'context'", ":", "{", "'funder'", ":", "[", "json", "[", "'funder'", "]", "[", "'doi'", "]", "]", "}", ",", "'payload'", ":", "{", "'id'", ":", "json", "[", "'internal_id'", "]", ",", "'legacy_id'", ":", "(", "json", "[", "'code'", "]", "if", "json", ".", "get", "(", "'program'", ")", "==", "'FP7'", "else", "json", "[", "'internal_id'", "]", ")", ",", "'code'", ":", "json", "[", "'code'", "]", ",", "'title'", ":", "json", "[", "'title'", "]", ",", "'acronym'", ":", "json", ".", "get", "(", "'acronym'", ")", ",", "'program'", ":", "json", ".", "get", "(", "'program'", ")", ",", "}", ",", "}", "elif", "index", "and", "index", ".", "startswith", "(", "'funders-'", ")", ":", "# Generate suggest field", "suggestions", "=", "json", ".", "get", "(", "'acronyms'", ",", "[", "]", ")", "+", "[", "json", ".", "get", "(", "'name'", ")", "]", "json", "[", "'suggest'", "]", "=", "{", "'input'", ":", "[", "s", "for", "s", "in", "suggestions", "if", "s", "]", ",", "'output'", ":", "json", "[", "'name'", "]", ",", "'payload'", ":", "{", "'id'", ":", "json", "[", "'doi'", "]", "}", ",", "}" ]
36.25
0.000746
def guess_filename(filename): """Guess filename""" if osp.isfile(filename): return filename if not filename.endswith('.py'): filename += '.py' for path in [getcwd_or_home()] + sys.path: fname = osp.join(path, filename) if osp.isfile(fname): return fname elif osp.isfile(fname+'.py'): return fname+'.py' elif osp.isfile(fname+'.pyw'): return fname+'.pyw' return filename
[ "def", "guess_filename", "(", "filename", ")", ":", "if", "osp", ".", "isfile", "(", "filename", ")", ":", "return", "filename", "if", "not", "filename", ".", "endswith", "(", "'.py'", ")", ":", "filename", "+=", "'.py'", "for", "path", "in", "[", "getcwd_or_home", "(", ")", "]", "+", "sys", ".", "path", ":", "fname", "=", "osp", ".", "join", "(", "path", ",", "filename", ")", "if", "osp", ".", "isfile", "(", "fname", ")", ":", "return", "fname", "elif", "osp", ".", "isfile", "(", "fname", "+", "'.py'", ")", ":", "return", "fname", "+", "'.py'", "elif", "osp", ".", "isfile", "(", "fname", "+", "'.pyw'", ")", ":", "return", "fname", "+", "'.pyw'", "return", "filename" ]
31.533333
0.002053
def segmenttable_get_by_name(xmldoc, name): """ Retrieve the segmentlists whose name equals name. The result is a segmentlistdict indexed by instrument. The output of this function is not coalesced, each segmentlist contains the segments as found in the segment table. NOTE: this is a light-weight version of the .get_by_name() method of the LigolwSegments class intended for use when the full machinery of that class is not required. Considerably less document validation and error checking is performed by this version. Consider using that method instead if your application will be interfacing with the document via that class anyway. """ # # find required tables # def_table = lsctables.SegmentDefTable.get_table(xmldoc) seg_table = lsctables.SegmentTable.get_table(xmldoc) # # segment_def_id --> instrument names mapping but only for # segment_definer entries bearing the requested name # instrument_index = dict((row.segment_def_id, row.instruments) for row in def_table if row.name == name) # # populate result segmentlistdict object from segment_def_map table # and index # instruments = set(instrument for instruments in instrument_index.values() for instrument in instruments) result = segments.segmentlistdict((instrument, segments.segmentlist()) for instrument in instruments) for row in seg_table: if row.segment_def_id in instrument_index: seg = row.segment for instrument in instrument_index[row.segment_def_id]: result[instrument].append(seg) # # done # return result
[ "def", "segmenttable_get_by_name", "(", "xmldoc", ",", "name", ")", ":", "#", "# find required tables", "#", "def_table", "=", "lsctables", ".", "SegmentDefTable", ".", "get_table", "(", "xmldoc", ")", "seg_table", "=", "lsctables", ".", "SegmentTable", ".", "get_table", "(", "xmldoc", ")", "#", "# segment_def_id --> instrument names mapping but only for", "# segment_definer entries bearing the requested name", "#", "instrument_index", "=", "dict", "(", "(", "row", ".", "segment_def_id", ",", "row", ".", "instruments", ")", "for", "row", "in", "def_table", "if", "row", ".", "name", "==", "name", ")", "#", "# populate result segmentlistdict object from segment_def_map table", "# and index", "#", "instruments", "=", "set", "(", "instrument", "for", "instruments", "in", "instrument_index", ".", "values", "(", ")", "for", "instrument", "in", "instruments", ")", "result", "=", "segments", ".", "segmentlistdict", "(", "(", "instrument", ",", "segments", ".", "segmentlist", "(", ")", ")", "for", "instrument", "in", "instruments", ")", "for", "row", "in", "seg_table", ":", "if", "row", ".", "segment_def_id", "in", "instrument_index", ":", "seg", "=", "row", ".", "segment", "for", "instrument", "in", "instrument_index", "[", "row", ".", "segment_def_id", "]", ":", "result", "[", "instrument", "]", ".", "append", "(", "seg", ")", "#", "# done", "#", "return", "result" ]
31.145833
0.026589
def get_aggregate_by_id(self, account_id: str) -> AccountAggregate: """ Returns the aggregate for the given id """ account = self.get_by_id(account_id) return self.get_account_aggregate(account)
[ "def", "get_aggregate_by_id", "(", "self", ",", "account_id", ":", "str", ")", "->", "AccountAggregate", ":", "account", "=", "self", ".", "get_by_id", "(", "account_id", ")", "return", "self", ".", "get_account_aggregate", "(", "account", ")" ]
53.75
0.009174
def get_genetic_profiles(study_id, profile_filter=None): """Return all the genetic profiles (data sets) for a given study. Genetic profiles are different types of data for a given study. For instance the study 'cellline_ccle_broad' has profiles such as 'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA' for copy number alterations, etc. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' profile_filter : Optional[str] A string used to filter the profiles to return. Will be one of: - MUTATION - MUTATION_EXTENDED - COPY_NUMBER_ALTERATION - MRNA_EXPRESSION - METHYLATION The genetic profiles can include "mutation", "CNA", "rppa", "methylation", etc. Returns ------- genetic_profiles : list[str] A list of genetic profiles available for the given study. """ data = {'cmd': 'getGeneticProfiles', 'cancer_study_id': study_id} df = send_request(**data) res = _filter_data_frame(df, ['genetic_profile_id'], 'genetic_alteration_type', profile_filter) genetic_profiles = list(res['genetic_profile_id'].values()) return genetic_profiles
[ "def", "get_genetic_profiles", "(", "study_id", ",", "profile_filter", "=", "None", ")", ":", "data", "=", "{", "'cmd'", ":", "'getGeneticProfiles'", ",", "'cancer_study_id'", ":", "study_id", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "res", "=", "_filter_data_frame", "(", "df", ",", "[", "'genetic_profile_id'", "]", ",", "'genetic_alteration_type'", ",", "profile_filter", ")", "genetic_profiles", "=", "list", "(", "res", "[", "'genetic_profile_id'", "]", ".", "values", "(", ")", ")", "return", "genetic_profiles" ]
35.083333
0.00077
def retry_request(method, url, headers=None, payload=None, auth=None, tries=10, initial_interval=5, callback=None): """Retry an HTTP request with linear backoff. Returns the response if the status code is < 400 or waits (try * initial_interval) seconds and retries (up to tries times) if it is not. Parameters ---------- method: `str` Method: `GET`, `PUT`, or `POST` url: `str` URL of HTTP request headers: `dict` HTTP headers to supply. payload: `dict` Payload for request; passed as parameters to `GET`, JSON message body for `PUT`/`POST`. auth: `tuple` Authentication tuple for Basic/Digest/Custom HTTP Auth. tries: `int` Number of attempts to make. Defaults to `10`. initial_interval: `int` Interval between first and second try, and amount of time added before each successive attempt is made. Defaults to `5`. callback : callable A callable (function) object that is called each time a retry is needed. The callable has a keyword argument signature: - ``n``: number of tries completed (integer). - ``remaining``: number of tries remaining (integer). - ``status``: HTTP status of the previous call. - ``content``: body content of the previous call. Returns ------- :class:`requests.Response` The final HTTP Response received. Raises ------ :class:`apikit.BackendError` The `status_code` will be `500`, and the reason `Internal Server Error`. Its `content` will be diagnostic of the last response received. """ method = method.lower() attempt = 1 while True: if method == "get": resp = requests.get(url, headers=headers, params=payload, auth=auth) elif method == "put" or method == "post": resp = requests.put(url, headers=headers, json=payload, auth=auth) else: raise_ise("Bad method %s: must be 'get', 'put', or 'post" % method) if resp.status_code < 400: break delay = initial_interval * attempt if attempt >= tries: raise_ise("Failed to '%s' %s after %d attempts." % (method, url, tries) + " Last response was '%d %s' [%s]" % (resp.status_code, resp.reason, resp.text.strip())) if callback is not None: callback(n=attempt, remaining=tries - attempt, status=resp.status_code, content=resp.text.strip()) time.sleep(delay) attempt += 1 return resp
[ "def", "retry_request", "(", "method", ",", "url", ",", "headers", "=", "None", ",", "payload", "=", "None", ",", "auth", "=", "None", ",", "tries", "=", "10", ",", "initial_interval", "=", "5", ",", "callback", "=", "None", ")", ":", "method", "=", "method", ".", "lower", "(", ")", "attempt", "=", "1", "while", "True", ":", "if", "method", "==", "\"get\"", ":", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "params", "=", "payload", ",", "auth", "=", "auth", ")", "elif", "method", "==", "\"put\"", "or", "method", "==", "\"post\"", ":", "resp", "=", "requests", ".", "put", "(", "url", ",", "headers", "=", "headers", ",", "json", "=", "payload", ",", "auth", "=", "auth", ")", "else", ":", "raise_ise", "(", "\"Bad method %s: must be 'get', 'put', or 'post\"", "%", "method", ")", "if", "resp", ".", "status_code", "<", "400", ":", "break", "delay", "=", "initial_interval", "*", "attempt", "if", "attempt", ">=", "tries", ":", "raise_ise", "(", "\"Failed to '%s' %s after %d attempts.\"", "%", "(", "method", ",", "url", ",", "tries", ")", "+", "\" Last response was '%d %s' [%s]\"", "%", "(", "resp", ".", "status_code", ",", "resp", ".", "reason", ",", "resp", ".", "text", ".", "strip", "(", ")", ")", ")", "if", "callback", "is", "not", "None", ":", "callback", "(", "n", "=", "attempt", ",", "remaining", "=", "tries", "-", "attempt", ",", "status", "=", "resp", ".", "status_code", ",", "content", "=", "resp", ".", "text", ".", "strip", "(", ")", ")", "time", ".", "sleep", "(", "delay", ")", "attempt", "+=", "1", "return", "resp" ]
37.267606
0.000368
def find_offsets(data, ofs): '''find mag offsets by applying Bills "offsets revisited" algorithm on the data This is an implementation of the algorithm from: http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf ''' # a limit on the maximum change in each step max_change = args.max_change # the gain factor for the algorithm gain = args.gain data2 = [] for d in data: d = d.copy() + noise() d.x = float(int(d.x + 0.5)) d.y = float(int(d.y + 0.5)) d.z = float(int(d.z + 0.5)) data2.append(d) data = data2 history_idx = 0 mag_history = data[0:args.history] for i in range(args.history, len(data)): B1 = mag_history[history_idx] + ofs B2 = data[i] + ofs diff = B2 - B1 diff_length = diff.length() if diff_length <= args.min_diff: # the mag vector hasn't changed enough - we don't get any # information from this history_idx = (history_idx+1) % args.history continue mag_history[history_idx] = data[i] history_idx = (history_idx+1) % args.history # equation 6 of Bills paper delta = diff * (gain * (B2.length() - B1.length()) / diff_length) # limit the change from any one reading. This is to prevent # single crazy readings from throwing off the offsets for a long # time delta_length = delta.length() if max_change != 0 and delta_length > max_change: delta *= max_change / delta_length # set the new offsets ofs = ofs - delta if args.verbose: print(ofs) return ofs
[ "def", "find_offsets", "(", "data", ",", "ofs", ")", ":", "# a limit on the maximum change in each step", "max_change", "=", "args", ".", "max_change", "# the gain factor for the algorithm", "gain", "=", "args", ".", "gain", "data2", "=", "[", "]", "for", "d", "in", "data", ":", "d", "=", "d", ".", "copy", "(", ")", "+", "noise", "(", ")", "d", ".", "x", "=", "float", "(", "int", "(", "d", ".", "x", "+", "0.5", ")", ")", "d", ".", "y", "=", "float", "(", "int", "(", "d", ".", "y", "+", "0.5", ")", ")", "d", ".", "z", "=", "float", "(", "int", "(", "d", ".", "z", "+", "0.5", ")", ")", "data2", ".", "append", "(", "d", ")", "data", "=", "data2", "history_idx", "=", "0", "mag_history", "=", "data", "[", "0", ":", "args", ".", "history", "]", "for", "i", "in", "range", "(", "args", ".", "history", ",", "len", "(", "data", ")", ")", ":", "B1", "=", "mag_history", "[", "history_idx", "]", "+", "ofs", "B2", "=", "data", "[", "i", "]", "+", "ofs", "diff", "=", "B2", "-", "B1", "diff_length", "=", "diff", ".", "length", "(", ")", "if", "diff_length", "<=", "args", ".", "min_diff", ":", "# the mag vector hasn't changed enough - we don't get any", "# information from this", "history_idx", "=", "(", "history_idx", "+", "1", ")", "%", "args", ".", "history", "continue", "mag_history", "[", "history_idx", "]", "=", "data", "[", "i", "]", "history_idx", "=", "(", "history_idx", "+", "1", ")", "%", "args", ".", "history", "# equation 6 of Bills paper", "delta", "=", "diff", "*", "(", "gain", "*", "(", "B2", ".", "length", "(", ")", "-", "B1", ".", "length", "(", ")", ")", "/", "diff_length", ")", "# limit the change from any one reading. This is to prevent", "# single crazy readings from throwing off the offsets for a long", "# time", "delta_length", "=", "delta", ".", "length", "(", ")", "if", "max_change", "!=", "0", "and", "delta_length", ">", "max_change", ":", "delta", "*=", "max_change", "/", "delta_length", "# set the new offsets", "ofs", "=", "ofs", "-", "delta", "if", "args", ".", "verbose", ":", "print", "(", "ofs", ")", "return", "ofs" ]
29.280702
0.00058
def _ReraiseTypeErrorWithFieldName(message_name, field_name): """Re-raise the currently-handled TypeError with the field name added.""" exc = sys.exc_info()[1] if len(exc.args) == 1 and type(exc) is TypeError: # simple TypeError; add field name to exception message exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) # re-raise possibly-amended exception with original traceback: six.reraise(type(exc), exc, sys.exc_info()[2])
[ "def", "_ReraiseTypeErrorWithFieldName", "(", "message_name", ",", "field_name", ")", ":", "exc", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "len", "(", "exc", ".", "args", ")", "==", "1", "and", "type", "(", "exc", ")", "is", "TypeError", ":", "# simple TypeError; add field name to exception message", "exc", "=", "TypeError", "(", "'%s for field %s.%s'", "%", "(", "str", "(", "exc", ")", ",", "message_name", ",", "field_name", ")", ")", "# re-raise possibly-amended exception with original traceback:", "six", ".", "reraise", "(", "type", "(", "exc", ")", ",", "exc", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")" ]
51.444444
0.014862
def ensure_running(self): '''Make sure that semaphore tracker process is running. This can be run from any process. Usually a child process will use the semaphore created by its parent.''' with self._lock: if self._fd is not None: # semaphore tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) try: # Clean-up to avoid dangling processes. os.waitpid(self._pid, 0) except OSError: # The process was terminated or is a child from an ancestor # of the current process. pass self._fd = None self._pid = None warnings.warn('semaphore_tracker: process died unexpectedly, ' 'relaunching. Some semaphores might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass r, w = os.pipe() cmd = 'from {} import main; main({}, {})'.format( main.__module__, r, VERBOSE) try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() # In python 3.3, there is a bug which put `-RRRRR..` instead of # `-R` in args. Replace it to get the correct flags. # See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488 if sys.version_info[:2] <= (3, 3): import re for i in range(1, len(args)): args[i] = re.sub("-R+", "-R", args[i]) args += ['-c', cmd] util.debug("launching Semaphore tracker: {}".format(args)) # bpo-33613: Register a signal mask that will block the # signals. This signal mask will be inherited by the child # that is going to be spawned and will protect the child from a # race condition that can make the child die before it # registers signal handlers for SIGINT and SIGTERM. The mask is # unregistered after spawning the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except BaseException: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r)
[ "def", "ensure_running", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_fd", "is", "not", "None", ":", "# semaphore tracker was launched before, is it still running?", "if", "self", ".", "_check_alive", "(", ")", ":", "# => still alive", "return", "# => dead, launch it again", "os", ".", "close", "(", "self", ".", "_fd", ")", "try", ":", "# Clean-up to avoid dangling processes.", "os", ".", "waitpid", "(", "self", ".", "_pid", ",", "0", ")", "except", "OSError", ":", "# The process was terminated or is a child from an ancestor", "# of the current process.", "pass", "self", ".", "_fd", "=", "None", "self", ".", "_pid", "=", "None", "warnings", ".", "warn", "(", "'semaphore_tracker: process died unexpectedly, '", "'relaunching. Some semaphores might leak.'", ")", "fds_to_pass", "=", "[", "]", "try", ":", "fds_to_pass", ".", "append", "(", "sys", ".", "stderr", ".", "fileno", "(", ")", ")", "except", "Exception", ":", "pass", "r", ",", "w", "=", "os", ".", "pipe", "(", ")", "cmd", "=", "'from {} import main; main({}, {})'", ".", "format", "(", "main", ".", "__module__", ",", "r", ",", "VERBOSE", ")", "try", ":", "fds_to_pass", ".", "append", "(", "r", ")", "# process will out live us, so no need to wait on pid", "exe", "=", "spawn", ".", "get_executable", "(", ")", "args", "=", "[", "exe", "]", "+", "util", ".", "_args_from_interpreter_flags", "(", ")", "# In python 3.3, there is a bug which put `-RRRRR..` instead of", "# `-R` in args. Replace it to get the correct flags.", "# See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488", "if", "sys", ".", "version_info", "[", ":", "2", "]", "<=", "(", "3", ",", "3", ")", ":", "import", "re", "for", "i", "in", "range", "(", "1", ",", "len", "(", "args", ")", ")", ":", "args", "[", "i", "]", "=", "re", ".", "sub", "(", "\"-R+\"", ",", "\"-R\"", ",", "args", "[", "i", "]", ")", "args", "+=", "[", "'-c'", ",", "cmd", "]", "util", ".", "debug", "(", "\"launching Semaphore tracker: {}\"", ".", "format", "(", "args", ")", ")", "# bpo-33613: Register a signal mask that will block the", "# signals. This signal mask will be inherited by the child", "# that is going to be spawned and will protect the child from a", "# race condition that can make the child die before it", "# registers signal handlers for SIGINT and SIGTERM. The mask is", "# unregistered after spawning the child.", "try", ":", "if", "_HAVE_SIGMASK", ":", "signal", ".", "pthread_sigmask", "(", "signal", ".", "SIG_BLOCK", ",", "_IGNORED_SIGNALS", ")", "pid", "=", "spawnv_passfds", "(", "exe", ",", "args", ",", "fds_to_pass", ")", "finally", ":", "if", "_HAVE_SIGMASK", ":", "signal", ".", "pthread_sigmask", "(", "signal", ".", "SIG_UNBLOCK", ",", "_IGNORED_SIGNALS", ")", "except", "BaseException", ":", "os", ".", "close", "(", "w", ")", "raise", "else", ":", "self", ".", "_fd", "=", "w", "self", ".", "_pid", "=", "pid", "finally", ":", "os", ".", "close", "(", "r", ")" ]
43.902778
0.000928
def client_detect(self, client, starttime, endtime, threshold, threshold_type, trig_int, plotvar, min_gap=None, daylong=False, parallel_process=True, xcorr_func=None, concurrency=None, cores=None, ignore_length=False, group_size=None, debug=0, return_stream=False, full_peaks=False, save_progress=False, process_cores=None, retries=3, **kwargs): """ Detect using a Tribe of templates within a continuous stream. :type client: `obspy.clients.*.Client` :param client: Any obspy client with a dataselect service. :type starttime: :class:`obspy.core.UTCDateTime` :param starttime: Start-time for detections. :type endtime: :class:`obspy.core.UTCDateTime` :param endtime: End-time for detections :type threshold: float :param threshold: Threshold level, if using `threshold_type='MAD'` then this will be the multiple of the median absolute deviation. :type threshold_type: str :param threshold_type: The type of threshold to be used, can be MAD, absolute or av_chan_corr. See Note on thresholding below. :type trig_int: float :param trig_int: Minimum gap between detections in seconds. If multiple detections occur within trig_int of one-another, the one with the highest cross-correlation sum will be selected. :type plotvar: bool :param plotvar: Turn plotting on or off, see warning about plotting below :type min_gap: float :param min_gap: Minimum gap allowed in data - use to remove traces with known issues :type daylong: bool :param daylong: Set to True to use the :func:`eqcorrscan.utils.pre_processing.dayproc` routine, which preforms additional checks and is more efficient for day-long data over other methods. :type parallel_process: bool :param parallel_process: :type xcorr_func: str or callable :param xcorr_func: A str of a registered xcorr function or a callable for implementing a custom xcorr function. For more information see: :func:`eqcorrscan.utils.correlate.register_array_xcorr` :type concurrency: str :param concurrency: The type of concurrency to apply to the xcorr function. Options are 'multithread', 'multiprocess', 'concurrent'. For more details see :func:`eqcorrscan.utils.correlate.get_stream_xcorr` :type cores: int :param cores: Number of workers for processing and detection. :type ignore_length: bool :param ignore_length: If using daylong=True, then dayproc will try check that the data are there for at least 80% of the day, if you don't want this check (which will raise an error if too much data are missing) then set ignore_length=True. This is not recommended! :type group_size: int :param group_size: Maximum number of templates to run at once, use to reduce memory consumption, if unset will use all templates. :type full_peaks: bool :param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short` :type save_progress: bool :param save_progress: Whether to save the resulting party at every data step or not. Useful for long-running processes. :type process_cores: int :param process_cores: Number of processes to use for pre-processing (if different to `cores`). :type debug: int :param debug: Debug level from 0-5 where five is more output, for debug levels 4 and 5, detections will not be computed in parallel. :type return_stream: bool :param return_stream: Whether to also output the stream downloaded, useful if you plan to use the stream for something else, e.g. lag_calc. :type retries: int :param retries: Number of attempts allowed for downloading - allows for transient server issues. :return: :class:`eqcorrscan.core.match_filter.Party` of Families of detections. .. Note:: Detections are not corrected for `pre-pick`, the detection.detect_time corresponds to the beginning of the earliest template channel at detection. .. warning:: Picks included in the output Party.get_catalog() will not be corrected for pre-picks in the template. .. Note:: Ensures that data overlap between loops, which will lead to no missed detections at data start-stop points (see note for :meth:`eqcorrscan.core.match_filter.Tribe.detect` method). This will result in end-time not being strictly honoured, so detections may occur after the end-time set. This is because data must be run in the correct process-length. .. warning:: Plotting within the match-filter routine uses the Agg backend with interactive plotting turned off. This is because the function is designed to work in bulk. If you wish to turn interactive plotting on you must import matplotlib in your script first, when you then import match_filter you will get the warning that this call to matplotlib has no effect, which will mean that match_filter has not changed the plotting behaviour. .. note:: **Thresholding:** **MAD** threshold is calculated as the: .. math:: threshold {\\times} (median(abs(cccsum))) where :math:`cccsum` is the cross-correlation sum for a given template. **absolute** threshold is a true absolute threshold based on the cccsum value. **av_chan_corr** is based on the mean values of single-channel cross-correlations assuming all data are present as required for the template, e.g: .. math:: av\_chan\_corr\_thresh=threshold \\times (cccsum / len(template)) where :math:`template` is a single template from the input and the length is the number of channels within this template. """ party = Party() buff = 300 # Apply a buffer, often data downloaded is not the correct length data_length = max([t.process_length for t in self.templates]) pad = 0 for template in self.templates: max_delay = (template.st.sort(['starttime'])[-1].stats.starttime - template.st.sort(['starttime'])[0].stats.starttime) if max_delay > pad: pad = max_delay download_groups = int(endtime - starttime) / data_length template_channel_ids = [] for template in self.templates: for tr in template.st: if tr.stats.network not in [None, '']: chan_id = (tr.stats.network,) else: chan_id = ('*',) if tr.stats.station not in [None, '']: chan_id += (tr.stats.station,) else: chan_id += ('*',) if tr.stats.location not in [None, '']: chan_id += (tr.stats.location,) else: chan_id += ('*',) if tr.stats.channel not in [None, '']: if len(tr.stats.channel) == 2: chan_id += (tr.stats.channel[0] + '?' + tr.stats.channel[-1],) else: chan_id += (tr.stats.channel,) else: chan_id += ('*',) template_channel_ids.append(chan_id) template_channel_ids = list(set(template_channel_ids)) if return_stream: stream = Stream() if int(download_groups) < download_groups: download_groups = int(download_groups) + 1 else: download_groups = int(download_groups) for i in range(download_groups): bulk_info = [] for chan_id in template_channel_ids: bulk_info.append(( chan_id[0], chan_id[1], chan_id[2], chan_id[3], starttime + (i * data_length) - (pad + buff), starttime + ((i + 1) * data_length) + (pad + buff))) for retry_attempt in range(retries): try: st = client.get_waveforms_bulk(bulk_info) break except Exception as e: print(e) continue else: raise MatchFilterError( "Could not download data after {0} attempts".format( retries)) # Get gaps and remove traces as necessary if min_gap: gaps = st.get_gaps(min_gap=min_gap) if len(gaps) > 0: print("Large gaps in downloaded data") st.merge() gappy_channels = list( set([(gap[0], gap[1], gap[2], gap[3]) for gap in gaps])) _st = Stream() for tr in st: tr_stats = (tr.stats.network, tr.stats.station, tr.stats.location, tr.stats.channel) if tr_stats in gappy_channels: print("Removing gappy channel: %s" % str(tr)) else: _st += tr st = _st st.split() st.merge() st.trim(starttime=starttime + (i * data_length) - pad, endtime=starttime + ((i + 1) * data_length) + pad) for tr in st: if not _check_daylong(tr): st.remove(tr) print("{0} contains more zeros than non-zero, " "removed".format(tr.id)) for tr in st: if tr.stats.endtime - tr.stats.starttime < \ 0.8 * data_length: st.remove(tr) print("{0} is less than 80% of the required length" ", removed".format(tr.id)) if return_stream: stream += st try: party += self.detect( stream=st, threshold=threshold, threshold_type=threshold_type, trig_int=trig_int, plotvar=plotvar, daylong=daylong, parallel_process=parallel_process, xcorr_func=xcorr_func, concurrency=concurrency, cores=cores, ignore_length=ignore_length, group_size=group_size, overlap=None, debug=debug, full_peaks=full_peaks, process_cores=process_cores, **kwargs) if save_progress: party.write("eqcorrscan_temporary_party") except Exception as e: print('Error, routine incomplete, returning incomplete Party') print('Error: %s' % str(e)) if return_stream: return party, stream else: return party for family in party: if family is not None: family.detections = family._uniq().detections if return_stream: return party, stream else: return party
[ "def", "client_detect", "(", "self", ",", "client", ",", "starttime", ",", "endtime", ",", "threshold", ",", "threshold_type", ",", "trig_int", ",", "plotvar", ",", "min_gap", "=", "None", ",", "daylong", "=", "False", ",", "parallel_process", "=", "True", ",", "xcorr_func", "=", "None", ",", "concurrency", "=", "None", ",", "cores", "=", "None", ",", "ignore_length", "=", "False", ",", "group_size", "=", "None", ",", "debug", "=", "0", ",", "return_stream", "=", "False", ",", "full_peaks", "=", "False", ",", "save_progress", "=", "False", ",", "process_cores", "=", "None", ",", "retries", "=", "3", ",", "*", "*", "kwargs", ")", ":", "party", "=", "Party", "(", ")", "buff", "=", "300", "# Apply a buffer, often data downloaded is not the correct length", "data_length", "=", "max", "(", "[", "t", ".", "process_length", "for", "t", "in", "self", ".", "templates", "]", ")", "pad", "=", "0", "for", "template", "in", "self", ".", "templates", ":", "max_delay", "=", "(", "template", ".", "st", ".", "sort", "(", "[", "'starttime'", "]", ")", "[", "-", "1", "]", ".", "stats", ".", "starttime", "-", "template", ".", "st", ".", "sort", "(", "[", "'starttime'", "]", ")", "[", "0", "]", ".", "stats", ".", "starttime", ")", "if", "max_delay", ">", "pad", ":", "pad", "=", "max_delay", "download_groups", "=", "int", "(", "endtime", "-", "starttime", ")", "/", "data_length", "template_channel_ids", "=", "[", "]", "for", "template", "in", "self", ".", "templates", ":", "for", "tr", "in", "template", ".", "st", ":", "if", "tr", ".", "stats", ".", "network", "not", "in", "[", "None", ",", "''", "]", ":", "chan_id", "=", "(", "tr", ".", "stats", ".", "network", ",", ")", "else", ":", "chan_id", "=", "(", "'*'", ",", ")", "if", "tr", ".", "stats", ".", "station", "not", "in", "[", "None", ",", "''", "]", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "station", ",", ")", "else", ":", "chan_id", "+=", "(", "'*'", ",", ")", "if", "tr", ".", "stats", ".", "location", "not", "in", "[", "None", ",", "''", "]", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "location", ",", ")", "else", ":", "chan_id", "+=", "(", "'*'", ",", ")", "if", "tr", ".", "stats", ".", "channel", "not", "in", "[", "None", ",", "''", "]", ":", "if", "len", "(", "tr", ".", "stats", ".", "channel", ")", "==", "2", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "channel", "[", "0", "]", "+", "'?'", "+", "tr", ".", "stats", ".", "channel", "[", "-", "1", "]", ",", ")", "else", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "channel", ",", ")", "else", ":", "chan_id", "+=", "(", "'*'", ",", ")", "template_channel_ids", ".", "append", "(", "chan_id", ")", "template_channel_ids", "=", "list", "(", "set", "(", "template_channel_ids", ")", ")", "if", "return_stream", ":", "stream", "=", "Stream", "(", ")", "if", "int", "(", "download_groups", ")", "<", "download_groups", ":", "download_groups", "=", "int", "(", "download_groups", ")", "+", "1", "else", ":", "download_groups", "=", "int", "(", "download_groups", ")", "for", "i", "in", "range", "(", "download_groups", ")", ":", "bulk_info", "=", "[", "]", "for", "chan_id", "in", "template_channel_ids", ":", "bulk_info", ".", "append", "(", "(", "chan_id", "[", "0", "]", ",", "chan_id", "[", "1", "]", ",", "chan_id", "[", "2", "]", ",", "chan_id", "[", "3", "]", ",", "starttime", "+", "(", "i", "*", "data_length", ")", "-", "(", "pad", "+", "buff", ")", ",", "starttime", "+", "(", "(", "i", "+", "1", ")", "*", "data_length", ")", "+", "(", "pad", "+", "buff", ")", ")", ")", "for", "retry_attempt", "in", "range", "(", "retries", ")", ":", "try", ":", "st", "=", "client", ".", "get_waveforms_bulk", "(", "bulk_info", ")", "break", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "continue", "else", ":", "raise", "MatchFilterError", "(", "\"Could not download data after {0} attempts\"", ".", "format", "(", "retries", ")", ")", "# Get gaps and remove traces as necessary", "if", "min_gap", ":", "gaps", "=", "st", ".", "get_gaps", "(", "min_gap", "=", "min_gap", ")", "if", "len", "(", "gaps", ")", ">", "0", ":", "print", "(", "\"Large gaps in downloaded data\"", ")", "st", ".", "merge", "(", ")", "gappy_channels", "=", "list", "(", "set", "(", "[", "(", "gap", "[", "0", "]", ",", "gap", "[", "1", "]", ",", "gap", "[", "2", "]", ",", "gap", "[", "3", "]", ")", "for", "gap", "in", "gaps", "]", ")", ")", "_st", "=", "Stream", "(", ")", "for", "tr", "in", "st", ":", "tr_stats", "=", "(", "tr", ".", "stats", ".", "network", ",", "tr", ".", "stats", ".", "station", ",", "tr", ".", "stats", ".", "location", ",", "tr", ".", "stats", ".", "channel", ")", "if", "tr_stats", "in", "gappy_channels", ":", "print", "(", "\"Removing gappy channel: %s\"", "%", "str", "(", "tr", ")", ")", "else", ":", "_st", "+=", "tr", "st", "=", "_st", "st", ".", "split", "(", ")", "st", ".", "merge", "(", ")", "st", ".", "trim", "(", "starttime", "=", "starttime", "+", "(", "i", "*", "data_length", ")", "-", "pad", ",", "endtime", "=", "starttime", "+", "(", "(", "i", "+", "1", ")", "*", "data_length", ")", "+", "pad", ")", "for", "tr", "in", "st", ":", "if", "not", "_check_daylong", "(", "tr", ")", ":", "st", ".", "remove", "(", "tr", ")", "print", "(", "\"{0} contains more zeros than non-zero, \"", "\"removed\"", ".", "format", "(", "tr", ".", "id", ")", ")", "for", "tr", "in", "st", ":", "if", "tr", ".", "stats", ".", "endtime", "-", "tr", ".", "stats", ".", "starttime", "<", "0.8", "*", "data_length", ":", "st", ".", "remove", "(", "tr", ")", "print", "(", "\"{0} is less than 80% of the required length\"", "\", removed\"", ".", "format", "(", "tr", ".", "id", ")", ")", "if", "return_stream", ":", "stream", "+=", "st", "try", ":", "party", "+=", "self", ".", "detect", "(", "stream", "=", "st", ",", "threshold", "=", "threshold", ",", "threshold_type", "=", "threshold_type", ",", "trig_int", "=", "trig_int", ",", "plotvar", "=", "plotvar", ",", "daylong", "=", "daylong", ",", "parallel_process", "=", "parallel_process", ",", "xcorr_func", "=", "xcorr_func", ",", "concurrency", "=", "concurrency", ",", "cores", "=", "cores", ",", "ignore_length", "=", "ignore_length", ",", "group_size", "=", "group_size", ",", "overlap", "=", "None", ",", "debug", "=", "debug", ",", "full_peaks", "=", "full_peaks", ",", "process_cores", "=", "process_cores", ",", "*", "*", "kwargs", ")", "if", "save_progress", ":", "party", ".", "write", "(", "\"eqcorrscan_temporary_party\"", ")", "except", "Exception", "as", "e", ":", "print", "(", "'Error, routine incomplete, returning incomplete Party'", ")", "print", "(", "'Error: %s'", "%", "str", "(", "e", ")", ")", "if", "return_stream", ":", "return", "party", ",", "stream", "else", ":", "return", "party", "for", "family", "in", "party", ":", "if", "family", "is", "not", "None", ":", "family", ".", "detections", "=", "family", ".", "_uniq", "(", ")", ".", "detections", "if", "return_stream", ":", "return", "party", ",", "stream", "else", ":", "return", "party" ]
44.161049
0.000912
def has_split(self, split_name): """ Checks whether or not the split with the given name exists. Parameters ---------- split_name : str name of the split """ if os.path.exists(os.path.join(self.split_dir, split_name)): return True return False
[ "def", "has_split", "(", "self", ",", "split_name", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "self", ".", "split_dir", ",", "split_name", ")", ")", ":", "return", "True", "return", "False" ]
29.272727
0.009036
def register_view(self, view): """Creates treeview columns, and connect missing signals""" # if stand-alone, connects the window delete event to # kill the loop if self.view.is_stand_alone(): import gtk self.view.get_top_widget().connect('delete-event', lambda w,e: gtk.main_quit()) pass return
[ "def", "register_view", "(", "self", ",", "view", ")", ":", "# if stand-alone, connects the window delete event to", "# kill the loop", "if", "self", ".", "view", ".", "is_stand_alone", "(", ")", ":", "import", "gtk", "self", ".", "view", ".", "get_top_widget", "(", ")", ".", "connect", "(", "'delete-event'", ",", "lambda", "w", ",", "e", ":", "gtk", ".", "main_quit", "(", ")", ")", "pass", "return" ]
32.166667
0.012594
def setCentralWidget(self, widget): """ Sets the central widget for this button. :param widget | <QWidget> """ self.setEnabled(widget is not None) self._popupWidget.setCentralWidget(widget)
[ "def", "setCentralWidget", "(", "self", ",", "widget", ")", ":", "self", ".", "setEnabled", "(", "widget", "is", "not", "None", ")", "self", ".", "_popupWidget", ".", "setCentralWidget", "(", "widget", ")" ]
31.375
0.011628
def generator(self) -> Iterator[str]: """ Create a generate that iterates the whole content of the file or string. :return: An iterator iterating the lines of the text stream, separated by ``'\\n'`` or ``'\\r'``. """ stream = self.stream # In case that ``self.stream`` is changed. stream.seek(0) for line in stream: yield line
[ "def", "generator", "(", "self", ")", "->", "Iterator", "[", "str", "]", ":", "stream", "=", "self", ".", "stream", "# In case that ``self.stream`` is changed.", "stream", ".", "seek", "(", "0", ")", "for", "line", "in", "stream", ":", "yield", "line" ]
38.7
0.010101
def _make_section_node(self, template, tag_type, tag_key, parsed_section, section_start_index, section_end_index): """ Create and return a section node for the parse tree. """ if tag_type == '#': return _SectionNode(tag_key, parsed_section, self._delimiters, template, section_start_index, section_end_index) if tag_type == '^': return _InvertedNode(tag_key, parsed_section) raise Exception("Invalid symbol for section tag: %s" % repr(tag_type))
[ "def", "_make_section_node", "(", "self", ",", "template", ",", "tag_type", ",", "tag_key", ",", "parsed_section", ",", "section_start_index", ",", "section_end_index", ")", ":", "if", "tag_type", "==", "'#'", ":", "return", "_SectionNode", "(", "tag_key", ",", "parsed_section", ",", "self", ".", "_delimiters", ",", "template", ",", "section_start_index", ",", "section_end_index", ")", "if", "tag_type", "==", "'^'", ":", "return", "_InvertedNode", "(", "tag_key", ",", "parsed_section", ")", "raise", "Exception", "(", "\"Invalid symbol for section tag: %s\"", "%", "repr", "(", "tag_type", ")", ")" ]
40.357143
0.008651
def configs_for_writer(writer=None, ppp_config_dir=None): """Generator of writer configuration files for one or more writers Args: writer (Optional[str]): Yield configs only for this writer ppp_config_dir (Optional[str]): Additional configuration directory to search for writer configuration files. Returns: Generator of lists of configuration files """ search_paths = (ppp_config_dir,) if ppp_config_dir else tuple() if writer is not None: if not isinstance(writer, (list, tuple)): writer = [writer] # given a config filename or writer name config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer] else: writer_configs = glob_config(os.path.join('writers', '*.yaml'), *search_paths) config_files = set(writer_configs) for config_file in config_files: config_basename = os.path.basename(config_file) writer_configs = config_search_paths( os.path.join("writers", config_basename), *search_paths) if not writer_configs: LOG.warning("No writer configs found for '%s'", writer) continue yield writer_configs
[ "def", "configs_for_writer", "(", "writer", "=", "None", ",", "ppp_config_dir", "=", "None", ")", ":", "search_paths", "=", "(", "ppp_config_dir", ",", ")", "if", "ppp_config_dir", "else", "tuple", "(", ")", "if", "writer", "is", "not", "None", ":", "if", "not", "isinstance", "(", "writer", ",", "(", "list", ",", "tuple", ")", ")", ":", "writer", "=", "[", "writer", "]", "# given a config filename or writer name", "config_files", "=", "[", "w", "if", "w", ".", "endswith", "(", "'.yaml'", ")", "else", "w", "+", "'.yaml'", "for", "w", "in", "writer", "]", "else", ":", "writer_configs", "=", "glob_config", "(", "os", ".", "path", ".", "join", "(", "'writers'", ",", "'*.yaml'", ")", ",", "*", "search_paths", ")", "config_files", "=", "set", "(", "writer_configs", ")", "for", "config_file", "in", "config_files", ":", "config_basename", "=", "os", ".", "path", ".", "basename", "(", "config_file", ")", "writer_configs", "=", "config_search_paths", "(", "os", ".", "path", ".", "join", "(", "\"writers\"", ",", "config_basename", ")", ",", "*", "search_paths", ")", "if", "not", "writer_configs", ":", "LOG", ".", "warning", "(", "\"No writer configs found for '%s'\"", ",", "writer", ")", "continue", "yield", "writer_configs" ]
37.90625
0.001608
def default_links_factory_with_additional(additional_links): """Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. """ def factory(pid, **kwargs): links = default_links_factory(pid) for link in additional_links: links[link] = additional_links[link].format(pid=pid, scheme=request.scheme, host=request.host) return links return factory
[ "def", "default_links_factory_with_additional", "(", "additional_links", ")", ":", "def", "factory", "(", "pid", ",", "*", "*", "kwargs", ")", ":", "links", "=", "default_links_factory", "(", "pid", ")", "for", "link", "in", "additional_links", ":", "links", "[", "link", "]", "=", "additional_links", "[", "link", "]", ".", "format", "(", "pid", "=", "pid", ",", "scheme", "=", "request", ".", "scheme", ",", "host", "=", "request", ".", "host", ")", "return", "links", "return", "factory" ]
40.875
0.001495
def col (loc,strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ s = strg return 1 if loc<len(s) and s[loc] == '\n' else loc - s.rfind("\n", 0, loc)
[ "def", "col", "(", "loc", ",", "strg", ")", ":", "s", "=", "strg", "return", "1", "if", "loc", "<", "len", "(", "s", ")", "and", "s", "[", "loc", "]", "==", "'\\n'", "else", "loc", "-", "s", ".", "rfind", "(", "\"\\n\"", ",", "0", ",", "loc", ")" ]
52.25
0.010972
def find_all(soup, name=None, attrs=None, recursive=True, text=None, limit=None, **kwargs): """The `find` and `find_all` methods of `BeautifulSoup` don't handle the `text` parameter combined with other parameters. This is necessary for e.g. finding links containing a string or pattern. This method first searches by text content, and then by the standard BeautifulSoup arguments. """ if text is None: return soup.find_all( name, attrs or {}, recursive, text, limit, **kwargs ) if isinstance(text, string_types): text = re.compile(re.escape(text), re.I) tags = soup.find_all( name, attrs or {}, recursive, **kwargs ) rv = [] for tag in tags: if match_text(text, tag): rv.append(tag) if limit is not None and len(rv) >= limit: break return rv
[ "def", "find_all", "(", "soup", ",", "name", "=", "None", ",", "attrs", "=", "None", ",", "recursive", "=", "True", ",", "text", "=", "None", ",", "limit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "text", "is", "None", ":", "return", "soup", ".", "find_all", "(", "name", ",", "attrs", "or", "{", "}", ",", "recursive", ",", "text", ",", "limit", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "text", ",", "string_types", ")", ":", "text", "=", "re", ".", "compile", "(", "re", ".", "escape", "(", "text", ")", ",", "re", ".", "I", ")", "tags", "=", "soup", ".", "find_all", "(", "name", ",", "attrs", "or", "{", "}", ",", "recursive", ",", "*", "*", "kwargs", ")", "rv", "=", "[", "]", "for", "tag", "in", "tags", ":", "if", "match_text", "(", "text", ",", "tag", ")", ":", "rv", ".", "append", "(", "tag", ")", "if", "limit", "is", "not", "None", "and", "len", "(", "rv", ")", ">=", "limit", ":", "break", "return", "rv" ]
36.041667
0.002252
def move_selection(reverse=False): """ Goes through the list of gunicorns, setting the selected as the one after the currently selected. """ global selected_pid if selected_pid not in gunicorns: selected_pid = None found = False pids = sorted(gunicorns.keys(), reverse=reverse) # Iterate items twice to enable wrapping. for pid in pids + pids: if selected_pid is None or found: selected_pid = pid return found = pid == selected_pid
[ "def", "move_selection", "(", "reverse", "=", "False", ")", ":", "global", "selected_pid", "if", "selected_pid", "not", "in", "gunicorns", ":", "selected_pid", "=", "None", "found", "=", "False", "pids", "=", "sorted", "(", "gunicorns", ".", "keys", "(", ")", ",", "reverse", "=", "reverse", ")", "# Iterate items twice to enable wrapping.", "for", "pid", "in", "pids", "+", "pids", ":", "if", "selected_pid", "is", "None", "or", "found", ":", "selected_pid", "=", "pid", "return", "found", "=", "pid", "==", "selected_pid" ]
31.5
0.001927
def _sign(private_key, data, hash_algorithm, rsa_pss_padding=False): """ Generates an RSA, DSA or ECDSA signature :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw" :param rsa_pss_padding: If PSS padding should be used for RSA keys :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the signature """ if not isinstance(private_key, PrivateKey): raise TypeError(pretty_message( ''' private_key must be an instance of PrivateKey, not %s ''', type_name(private_key) )) if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) valid_hash_algorithms = set(['md5', 'sha1', 'sha256', 'sha384', 'sha512']) if private_key.algorithm == 'rsa' and not rsa_pss_padding: valid_hash_algorithms |= set(['raw']) if hash_algorithm not in valid_hash_algorithms: valid_hash_algorithms_error = '"md5", "sha1", "sha256", "sha384", "sha512"' if private_key.algorithm == 'rsa' and not rsa_pss_padding: valid_hash_algorithms_error += ', "raw"' raise ValueError(pretty_message( ''' hash_algorithm must be one of %s, not %s ''', valid_hash_algorithms_error, repr(hash_algorithm) )) if private_key.algorithm != 'rsa' and rsa_pss_padding is not False: raise ValueError(pretty_message( ''' PSS padding may only be used with RSA keys - signing via a %s key was requested ''', private_key.algorithm.upper() )) if hash_algorithm == 'raw': if len(data) > private_key.byte_size - 11: raise ValueError(pretty_message( ''' data must be 11 bytes shorter than the key size when hash_algorithm is "raw" - key size is %s bytes, but data is %s bytes long ''', private_key.byte_size, len(data) )) if _backend == 'winlegacy': if private_key.algorithm == 'ec': return _pure_python_ecdsa_sign(private_key, data, hash_algorithm) return _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding) return _bcrypt_sign(private_key, data, hash_algorithm, rsa_pss_padding)
[ "def", "_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ",", "rsa_pss_padding", "=", "False", ")", ":", "if", "not", "isinstance", "(", "private_key", ",", "PrivateKey", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n private_key must be an instance of PrivateKey, not %s\n '''", ",", "type_name", "(", "private_key", ")", ")", ")", "if", "not", "isinstance", "(", "data", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n data must be a byte string, not %s\n '''", ",", "type_name", "(", "data", ")", ")", ")", "valid_hash_algorithms", "=", "set", "(", "[", "'md5'", ",", "'sha1'", ",", "'sha256'", ",", "'sha384'", ",", "'sha512'", "]", ")", "if", "private_key", ".", "algorithm", "==", "'rsa'", "and", "not", "rsa_pss_padding", ":", "valid_hash_algorithms", "|=", "set", "(", "[", "'raw'", "]", ")", "if", "hash_algorithm", "not", "in", "valid_hash_algorithms", ":", "valid_hash_algorithms_error", "=", "'\"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\"'", "if", "private_key", ".", "algorithm", "==", "'rsa'", "and", "not", "rsa_pss_padding", ":", "valid_hash_algorithms_error", "+=", "', \"raw\"'", "raise", "ValueError", "(", "pretty_message", "(", "'''\n hash_algorithm must be one of %s, not %s\n '''", ",", "valid_hash_algorithms_error", ",", "repr", "(", "hash_algorithm", ")", ")", ")", "if", "private_key", ".", "algorithm", "!=", "'rsa'", "and", "rsa_pss_padding", "is", "not", "False", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n PSS padding may only be used with RSA keys - signing via a %s key\n was requested\n '''", ",", "private_key", ".", "algorithm", ".", "upper", "(", ")", ")", ")", "if", "hash_algorithm", "==", "'raw'", ":", "if", "len", "(", "data", ")", ">", "private_key", ".", "byte_size", "-", "11", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n data must be 11 bytes shorter than the key size when\n hash_algorithm is \"raw\" - key size is %s bytes, but data\n is %s bytes long\n '''", ",", "private_key", ".", "byte_size", ",", "len", "(", "data", ")", ")", ")", "if", "_backend", "==", "'winlegacy'", ":", "if", "private_key", ".", "algorithm", "==", "'ec'", ":", "return", "_pure_python_ecdsa_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ")", "return", "_advapi32_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ",", "rsa_pss_padding", ")", "return", "_bcrypt_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ",", "rsa_pss_padding", ")" ]
33.698795
0.001389
def unregister(self, measurement_class, callback): """Stop notifying ``callback`` of new values of ``measurement_class``. If the callback wasn't previously registered, this method will have no effect. """ self.callbacks[Measurement.name_from_class(measurement_class) ].remove(callback)
[ "def", "unregister", "(", "self", ",", "measurement_class", ",", "callback", ")", ":", "self", ".", "callbacks", "[", "Measurement", ".", "name_from_class", "(", "measurement_class", ")", "]", ".", "remove", "(", "callback", ")" ]
41.875
0.008772
def owned_ecs(self): '''A list of the execution contexts owned by this component.''' with self._mutex: if not self._owned_ecs: self._owned_ecs = [ExecutionContext(ec, self._obj.get_context_handle(ec)) \ for ec in self._obj.get_owned_contexts()] return self._owned_ecs
[ "def", "owned_ecs", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "not", "self", ".", "_owned_ecs", ":", "self", ".", "_owned_ecs", "=", "[", "ExecutionContext", "(", "ec", ",", "self", ".", "_obj", ".", "get_context_handle", "(", "ec", ")", ")", "for", "ec", "in", "self", ".", "_obj", ".", "get_owned_contexts", "(", ")", "]", "return", "self", ".", "_owned_ecs" ]
44
0.011142
def listDatasets(self, dataset="", parent_dataset="", is_dataset_valid=1, release_version="", pset_hash="", app_name="", output_module_label="", global_tag="", processing_version=0, acquisition_era_name="", run_num=-1, physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="", processed_ds_name='', data_tier_name="", dataset_access_type="VALID", prep_id='', create_by="", last_modified_by="", min_cdate='0', max_cdate='0', min_ldate='0', max_ldate='0', cdate='0', ldate='0', detail=False, dataset_id=-1): """ API to list dataset(s) in DBS * You can use ANY combination of these parameters in this API * In absence of parameters, all valid datasets known to the DBS instance will be returned :param dataset: Full dataset (path) of the dataset. :type dataset: str :param parent_dataset: Full dataset (path) of the dataset :type parent_dataset: str :param release_version: cmssw version :type release_version: str :param pset_hash: pset hash :type pset_hash: str :param app_name: Application name (generally it is cmsRun) :type app_name: str :param output_module_label: output_module_label :type output_module_label: str :param global_tag: global_tag :type global_tag: str :param processing_version: Processing Version :type processing_version: str :param acquisition_era_name: Acquisition Era :type acquisition_era_name: str :param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed. :type run_num: int,list,str :param physics_group_name: List only dataset having physics_group_name attribute :type physics_group_name: str :param logical_file_name: List dataset containing the logical_file_name :type logical_file_name: str :param primary_ds_name: Primary Dataset Name :type primary_ds_name: str :param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA) :type primary_ds_type: str :param processed_ds_name: List datasets having this processed dataset name :type processed_ds_name: str :param data_tier_name: Data Tier :type data_tier_name: str :param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.) :type dataset_access_type: str :param prep_id: prep_id :type prep_id: str :param create_by: Creator of the dataset :type create_by: str :param last_modified_by: Last modifier of the dataset :type last_modified_by: str :param min_cdate: Lower limit for the creation date (unixtime) (Optional) :type min_cdate: int, str :param max_cdate: Upper limit for the creation date (unixtime) (Optional) :type max_cdate: int, str :param min_ldate: Lower limit for the last modification date (unixtime) (Optional) :type min_ldate: int, str :param max_ldate: Upper limit for the last modification date (unixtime) (Optional) :type max_ldate: int, str :param cdate: creation date (unixtime) (Optional) :type cdate: int, str :param ldate: last modification date (unixtime) (Optional) :type ldate: int, str :param detail: List all details of a dataset :type detail: bool :param dataset_id: dataset table primary key used by CMS Computing Analytics. :type dataset_id: int, long, str :returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type) :rtype: list of dicts """ dataset = dataset.replace("*", "%") parent_dataset = parent_dataset.replace("*", "%") release_version = release_version.replace("*", "%") pset_hash = pset_hash.replace("*", "%") app_name = app_name.replace("*", "%") output_module_label = output_module_label.replace("*", "%") global_tag = global_tag.replace("*", "%") logical_file_name = logical_file_name.replace("*", "%") physics_group_name = physics_group_name.replace("*", "%") primary_ds_name = primary_ds_name.replace("*", "%") primary_ds_type = primary_ds_type.replace("*", "%") data_tier_name = data_tier_name.replace("*", "%") dataset_access_type = dataset_access_type.replace("*", "%") processed_ds_name = processed_ds_name.replace("*", "%") acquisition_era_name = acquisition_era_name.replace("*", "%") #processing_version = processing_version.replace("*", "%") #create_by and last_modified_by have be full spelled, no wildcard will allowed. #We got them from request head so they can be either HN account name or DN. #This is depended on how an user's account is set up. # # In the next release we will require dataset has no wildcard in it. # DBS will reject wildcard search with dataset name with listDatasets call. # One should seperate the dataset into primary , process and datatier if any wildcard. # YG Oct 26, 2016 # Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client. # YG Dec. 9 2016 # # run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours # We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given. # YG Jan. 15 2019 # if (run_num != -1 and logical_file_name ==''): for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): if r == 1 or r == '1': dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.", self.logger.exception) elif isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) elif r[0] <= 1 <= r[1]: dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.", self.logger.exception) if( dataset and ( dataset == "/%/%/%" or dataset== "/%" or dataset == "/%/%" ) ): dataset='' elif( dataset and ( dataset.find('%') != -1 ) ) : junk, primary_ds_name, processed_ds_name, data_tier_name = dataset.split('/') dataset = '' if ( primary_ds_name == '%' ): primary_ds_name = '' if( processed_ds_name == '%' ): processed_ds_name = '' if ( data_tier_name == '%' ): data_tier_name = '' try: dataset_id = int(dataset_id) except: dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for dataset_id that has to be an int.", self.logger.exception, 'dataset_id has to be an int.') if create_by.find('*')!=-1 or create_by.find('%')!=-1 or last_modified_by.find('*')!=-1\ or last_modified_by.find('%')!=-1: dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for create_by or last_modified_by.\ No wildcard allowed.", self.logger.exception, 'No wildcards allowed for create_by or last_modified_by') try: if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate): min_cdate = 0 else: try: min_cdate = int(min_cdate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_cdate") if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate): max_cdate = 0 else: try: max_cdate = int(max_cdate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate") if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate): min_ldate = 0 else: try: min_ldate = int(min_ldate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_ldate") if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate): max_ldate = 0 else: try: max_ldate = int(max_ldate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_ldate") if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate): cdate = 0 else: try: cdate = int(cdate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for cdate") if isinstance(ldate, basestring) and ('*' in ldate or '%' in ldate): ldate = 0 else: try: ldate = int(ldate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for ldate") except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listDatasets. %s \n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) detail = detail in (True, 1, "True", "1", 'true') try: return self.dbsDataset.listDatasets(dataset, parent_dataset, is_dataset_valid, release_version, pset_hash, app_name, output_module_label, global_tag, processing_version, acquisition_era_name, run_num, physics_group_name, logical_file_name, primary_ds_name, primary_ds_type, processed_ds_name, data_tier_name, dataset_access_type, prep_id, create_by, last_modified_by, min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, detail, dataset_id) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listdatasets. %s.\n Exception trace: \n %s" % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "listDatasets", "(", "self", ",", "dataset", "=", "\"\"", ",", "parent_dataset", "=", "\"\"", ",", "is_dataset_valid", "=", "1", ",", "release_version", "=", "\"\"", ",", "pset_hash", "=", "\"\"", ",", "app_name", "=", "\"\"", ",", "output_module_label", "=", "\"\"", ",", "global_tag", "=", "\"\"", ",", "processing_version", "=", "0", ",", "acquisition_era_name", "=", "\"\"", ",", "run_num", "=", "-", "1", ",", "physics_group_name", "=", "\"\"", ",", "logical_file_name", "=", "\"\"", ",", "primary_ds_name", "=", "\"\"", ",", "primary_ds_type", "=", "\"\"", ",", "processed_ds_name", "=", "''", ",", "data_tier_name", "=", "\"\"", ",", "dataset_access_type", "=", "\"VALID\"", ",", "prep_id", "=", "''", ",", "create_by", "=", "\"\"", ",", "last_modified_by", "=", "\"\"", ",", "min_cdate", "=", "'0'", ",", "max_cdate", "=", "'0'", ",", "min_ldate", "=", "'0'", ",", "max_ldate", "=", "'0'", ",", "cdate", "=", "'0'", ",", "ldate", "=", "'0'", ",", "detail", "=", "False", ",", "dataset_id", "=", "-", "1", ")", ":", "dataset", "=", "dataset", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "parent_dataset", "=", "parent_dataset", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "release_version", "=", "release_version", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "pset_hash", "=", "pset_hash", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "app_name", "=", "app_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "output_module_label", "=", "output_module_label", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "global_tag", "=", "global_tag", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "logical_file_name", "=", "logical_file_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "physics_group_name", "=", "physics_group_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "primary_ds_name", "=", "primary_ds_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "primary_ds_type", "=", "primary_ds_type", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "data_tier_name", "=", "data_tier_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "dataset_access_type", "=", "dataset_access_type", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "processed_ds_name", "=", "processed_ds_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "acquisition_era_name", "=", "acquisition_era_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "#processing_version = processing_version.replace(\"*\", \"%\")", "#create_by and last_modified_by have be full spelled, no wildcard will allowed.", "#We got them from request head so they can be either HN account name or DN.", "#This is depended on how an user's account is set up.", "#", "# In the next release we will require dataset has no wildcard in it. ", "# DBS will reject wildcard search with dataset name with listDatasets call. ", "# One should seperate the dataset into primary , process and datatier if any wildcard.", "# YG Oct 26, 2016", "# Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.", "# YG Dec. 9 2016", "#", "# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours", "# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.", "# YG Jan. 15 2019", "#", "if", "(", "run_num", "!=", "-", "1", "and", "logical_file_name", "==", "''", ")", ":", "for", "r", "in", "parseRunRange", "(", "run_num", ")", ":", "if", "isinstance", "(", "r", ",", "basestring", ")", "or", "isinstance", "(", "r", ",", "int", ")", "or", "isinstance", "(", "r", ",", "long", ")", ":", "if", "r", "==", "1", "or", "r", "==", "'1'", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"Run_num=1 is not a valid input.\"", ",", "self", ".", "logger", ".", "exception", ")", "elif", "isinstance", "(", "r", ",", "run_tuple", ")", ":", "if", "r", "[", "0", "]", "==", "r", "[", "1", "]", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "\"DBS run range must be apart at least by 1.\"", ",", "self", ".", "logger", ".", "exception", ")", "elif", "r", "[", "0", "]", "<=", "1", "<=", "r", "[", "1", "]", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"Run_num=1 is not a valid input.\"", ",", "self", ".", "logger", ".", "exception", ")", "if", "(", "dataset", "and", "(", "dataset", "==", "\"/%/%/%\"", "or", "dataset", "==", "\"/%\"", "or", "dataset", "==", "\"/%/%\"", ")", ")", ":", "dataset", "=", "''", "elif", "(", "dataset", "and", "(", "dataset", ".", "find", "(", "'%'", ")", "!=", "-", "1", ")", ")", ":", "junk", ",", "primary_ds_name", ",", "processed_ds_name", ",", "data_tier_name", "=", "dataset", ".", "split", "(", "'/'", ")", "dataset", "=", "''", "if", "(", "primary_ds_name", "==", "'%'", ")", ":", "primary_ds_name", "=", "''", "if", "(", "processed_ds_name", "==", "'%'", ")", ":", "processed_ds_name", "=", "''", "if", "(", "data_tier_name", "==", "'%'", ")", ":", "data_tier_name", "=", "''", "try", ":", "dataset_id", "=", "int", "(", "dataset_id", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Invalid Input for dataset_id that has to be an int.\"", ",", "self", ".", "logger", ".", "exception", ",", "'dataset_id has to be an int.'", ")", "if", "create_by", ".", "find", "(", "'*'", ")", "!=", "-", "1", "or", "create_by", ".", "find", "(", "'%'", ")", "!=", "-", "1", "or", "last_modified_by", ".", "find", "(", "'*'", ")", "!=", "-", "1", "or", "last_modified_by", ".", "find", "(", "'%'", ")", "!=", "-", "1", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Invalid Input for create_by or last_modified_by.\\\n No wildcard allowed.\"", ",", "self", ".", "logger", ".", "exception", ",", "'No wildcards allowed for create_by or last_modified_by'", ")", "try", ":", "if", "isinstance", "(", "min_cdate", ",", "basestring", ")", "and", "(", "'*'", "in", "min_cdate", "or", "'%'", "in", "min_cdate", ")", ":", "min_cdate", "=", "0", "else", ":", "try", ":", "min_cdate", "=", "int", "(", "min_cdate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for min_cdate\"", ")", "if", "isinstance", "(", "max_cdate", ",", "basestring", ")", "and", "(", "'*'", "in", "max_cdate", "or", "'%'", "in", "max_cdate", ")", ":", "max_cdate", "=", "0", "else", ":", "try", ":", "max_cdate", "=", "int", "(", "max_cdate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for max_cdate\"", ")", "if", "isinstance", "(", "min_ldate", ",", "basestring", ")", "and", "(", "'*'", "in", "min_ldate", "or", "'%'", "in", "min_ldate", ")", ":", "min_ldate", "=", "0", "else", ":", "try", ":", "min_ldate", "=", "int", "(", "min_ldate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for min_ldate\"", ")", "if", "isinstance", "(", "max_ldate", ",", "basestring", ")", "and", "(", "'*'", "in", "max_ldate", "or", "'%'", "in", "max_ldate", ")", ":", "max_ldate", "=", "0", "else", ":", "try", ":", "max_ldate", "=", "int", "(", "max_ldate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for max_ldate\"", ")", "if", "isinstance", "(", "cdate", ",", "basestring", ")", "and", "(", "'*'", "in", "cdate", "or", "'%'", "in", "cdate", ")", ":", "cdate", "=", "0", "else", ":", "try", ":", "cdate", "=", "int", "(", "cdate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for cdate\"", ")", "if", "isinstance", "(", "ldate", ",", "basestring", ")", "and", "(", "'*'", "in", "ldate", "or", "'%'", "in", "ldate", ")", ":", "ldate", "=", "0", "else", ":", "try", ":", "ldate", "=", "int", "(", "ldate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for ldate\"", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "serverError", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listDatasets. %s \\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")", "detail", "=", "detail", "in", "(", "True", ",", "1", ",", "\"True\"", ",", "\"1\"", ",", "'true'", ")", "try", ":", "return", "self", ".", "dbsDataset", ".", "listDatasets", "(", "dataset", ",", "parent_dataset", ",", "is_dataset_valid", ",", "release_version", ",", "pset_hash", ",", "app_name", ",", "output_module_label", ",", "global_tag", ",", "processing_version", ",", "acquisition_era_name", ",", "run_num", ",", "physics_group_name", ",", "logical_file_name", ",", "primary_ds_name", ",", "primary_ds_type", ",", "processed_ds_name", ",", "data_tier_name", ",", "dataset_access_type", ",", "prep_id", ",", "create_by", ",", "last_modified_by", ",", "min_cdate", ",", "max_cdate", ",", "min_ldate", ",", "max_ldate", ",", "cdate", ",", "ldate", ",", "detail", ",", "dataset_id", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "serverError", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listdatasets. %s.\\n Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")" ]
55.219512
0.009979
def hist_calls_with_dims(**dims): """Decorator to check the distribution of return values of a function with dimensions. """ def hist_wrapper(fn): @functools.wraps(fn) def fn_wrapper(*args, **kwargs): _histogram = histogram( "%s_calls" % pyformance.registry.get_qualname(fn), **dims) rtn = fn(*args, **kwargs) if type(rtn) in (int, float): _histogram.add(rtn) return rtn return fn_wrapper return hist_wrapper
[ "def", "hist_calls_with_dims", "(", "*", "*", "dims", ")", ":", "def", "hist_wrapper", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "fn_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_histogram", "=", "histogram", "(", "\"%s_calls\"", "%", "pyformance", ".", "registry", ".", "get_qualname", "(", "fn", ")", ",", "*", "*", "dims", ")", "rtn", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "type", "(", "rtn", ")", "in", "(", "int", ",", "float", ")", ":", "_histogram", ".", "add", "(", "rtn", ")", "return", "rtn", "return", "fn_wrapper", "return", "hist_wrapper" ]
34.533333
0.00188
def get_referer(req, replace_ampersands=False): """ Return the referring page of a request. Referer (wikipedia): Referer is a common misspelling of the word "referrer"; so common, in fact, that it made it into the official specification of HTTP. When visiting a webpage, the referer or referring page is the URL of the previous webpage from which a link was followed. @param req: request @param replace_ampersands: if 1, replace & by &amp; in url (correct HTML cannot contain & characters alone) """ try: referer = req.headers_in['Referer'] if replace_ampersands == 1: return referer.replace('&', '&amp;') return referer except KeyError: return ''
[ "def", "get_referer", "(", "req", ",", "replace_ampersands", "=", "False", ")", ":", "try", ":", "referer", "=", "req", ".", "headers_in", "[", "'Referer'", "]", "if", "replace_ampersands", "==", "1", ":", "return", "referer", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", "return", "referer", "except", "KeyError", ":", "return", "''" ]
41.611111
0.001305
def extractall(filename, directory, backend='auto', auto_create_dir=False): ''' :param backend: auto, patool or zipfile :param filename: path to archive file :param directory: directory to extract to :param auto_create_dir: auto create directory ''' Archive(filename, backend).extractall(directory, auto_create_dir=auto_create_dir)
[ "def", "extractall", "(", "filename", ",", "directory", ",", "backend", "=", "'auto'", ",", "auto_create_dir", "=", "False", ")", ":", "Archive", "(", "filename", ",", "backend", ")", ".", "extractall", "(", "directory", ",", "auto_create_dir", "=", "auto_create_dir", ")" ]
43.666667
0.002494
def dispatch(self, *args, **kwargs): """This decorator sets this view to have restricted permissions.""" return super(AnimalYearArchive, self).dispatch(*args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "AnimalYearArchive", ",", "self", ")", ".", "dispatch", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
60.666667
0.01087
def setChatPhoto(self, chat_id, photo): """ See: https://core.telegram.org/bots/api#setchatphoto """ p = _strip(locals(), more=['photo']) return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo)
[ "def", "setChatPhoto", "(", "self", ",", "chat_id", ",", "photo", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ",", "more", "=", "[", "'photo'", "]", ")", "return", "self", ".", "_api_request_with_file", "(", "'setChatPhoto'", ",", "_rectify", "(", "p", ")", ",", "'photo'", ",", "photo", ")" ]
59.5
0.012448
def before_add_field(self, field): """ If extract_fields is set to True, then '*' fields will be removed and each individual field will read from the model meta data and added. """ if self.extract_fields and field.name == '*': field.ignore = True fields = [model_field.column for model_field in self.model._meta.fields] self.add_fields(fields)
[ "def", "before_add_field", "(", "self", ",", "field", ")", ":", "if", "self", ".", "extract_fields", "and", "field", ".", "name", "==", "'*'", ":", "field", ".", "ignore", "=", "True", "fields", "=", "[", "model_field", ".", "column", "for", "model_field", "in", "self", ".", "model", ".", "_meta", ".", "fields", "]", "self", ".", "add_fields", "(", "fields", ")" ]
45.666667
0.009547
def decode(self, litmap): """Convert the DNF to an expression.""" return Or(*[And(*[litmap[idx] for idx in clause]) for clause in self.clauses])
[ "def", "decode", "(", "self", ",", "litmap", ")", ":", "return", "Or", "(", "*", "[", "And", "(", "*", "[", "litmap", "[", "idx", "]", "for", "idx", "in", "clause", "]", ")", "for", "clause", "in", "self", ".", "clauses", "]", ")" ]
44.25
0.011111
def date_to_long_form_string(dt, locale_ = 'en_US.utf8'): '''dt should be a datetime.date object.''' if locale_: old_locale = locale.getlocale() locale.setlocale(locale.LC_ALL, locale_) v = dt.strftime("%A %B %d %Y") if locale_: locale.setlocale(locale.LC_ALL, old_locale) return v
[ "def", "date_to_long_form_string", "(", "dt", ",", "locale_", "=", "'en_US.utf8'", ")", ":", "if", "locale_", ":", "old_locale", "=", "locale", ".", "getlocale", "(", ")", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "locale_", ")", "v", "=", "dt", ".", "strftime", "(", "\"%A %B %d %Y\"", ")", "if", "locale_", ":", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "old_locale", ")", "return", "v" ]
35.222222
0.009231
def first_up(ofile, Rec, file_type): """ writes the header for a MagIC template file """ keylist = [] pmag_out = open(ofile, 'a') outstring = "tab \t" + file_type + "\n" pmag_out.write(outstring) keystring = "" for key in list(Rec.keys()): keystring = keystring + '\t' + key keylist.append(key) keystring = keystring + '\n' pmag_out.write(keystring[1:]) pmag_out.close() return keylist
[ "def", "first_up", "(", "ofile", ",", "Rec", ",", "file_type", ")", ":", "keylist", "=", "[", "]", "pmag_out", "=", "open", "(", "ofile", ",", "'a'", ")", "outstring", "=", "\"tab \\t\"", "+", "file_type", "+", "\"\\n\"", "pmag_out", ".", "write", "(", "outstring", ")", "keystring", "=", "\"\"", "for", "key", "in", "list", "(", "Rec", ".", "keys", "(", ")", ")", ":", "keystring", "=", "keystring", "+", "'\\t'", "+", "key", "keylist", ".", "append", "(", "key", ")", "keystring", "=", "keystring", "+", "'\\n'", "pmag_out", ".", "write", "(", "keystring", "[", "1", ":", "]", ")", "pmag_out", ".", "close", "(", ")", "return", "keylist" ]
27.375
0.002208
def color(requestContext, seriesList, theColor): """ Assigns the given color to the seriesList Example:: &target=color(collectd.hostname.cpu.0.user, 'green') &target=color(collectd.hostname.cpu.0.system, 'ff0000') &target=color(collectd.hostname.cpu.0.idle, 'gray') &target=color(collectd.hostname.cpu.0.idle, '6464ffaa') """ for series in seriesList: series.color = theColor return seriesList
[ "def", "color", "(", "requestContext", ",", "seriesList", ",", "theColor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "color", "=", "theColor", "return", "seriesList" ]
29.733333
0.002174
def dumps(o, preserve=False): """Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict """ retval = "" addtoretval, sections = _dump_sections(o, "") retval += addtoretval while sections != {}: newsections = {} for section in sections: addtoretval, addtosections = _dump_sections(sections[section], section, preserve) if addtoretval or (not addtoretval and not addtosections): if retval and retval[-2:] != "\n\n": retval += "\n" retval += "[" + section + "]\n" if addtoretval: retval += addtoretval for s in addtosections: newsections[section + "." + s] = addtosections[s] sections = newsections return retval
[ "def", "dumps", "(", "o", ",", "preserve", "=", "False", ")", ":", "retval", "=", "\"\"", "addtoretval", ",", "sections", "=", "_dump_sections", "(", "o", ",", "\"\"", ")", "retval", "+=", "addtoretval", "while", "sections", "!=", "{", "}", ":", "newsections", "=", "{", "}", "for", "section", "in", "sections", ":", "addtoretval", ",", "addtosections", "=", "_dump_sections", "(", "sections", "[", "section", "]", ",", "section", ",", "preserve", ")", "if", "addtoretval", "or", "(", "not", "addtoretval", "and", "not", "addtosections", ")", ":", "if", "retval", "and", "retval", "[", "-", "2", ":", "]", "!=", "\"\\n\\n\"", ":", "retval", "+=", "\"\\n\"", "retval", "+=", "\"[\"", "+", "section", "+", "\"]\\n\"", "if", "addtoretval", ":", "retval", "+=", "addtoretval", "for", "s", "in", "addtosections", ":", "newsections", "[", "section", "+", "\".\"", "+", "s", "]", "=", "addtosections", "[", "s", "]", "sections", "=", "newsections", "return", "retval" ]
33.133333
0.000978
def abs_energy(self, x): """ As in tsfresh `abs_energy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L390>`_ \ Returns the absolute energy of the time series which is the sum over the squared values\ .. math:: E=\\sum_{i=1,\ldots, n}x_i^2 :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :rtype: float """ _energy = feature_calculators.abs_energy(x) logging.debug("abs energy by tsfresh calculated") return _energy
[ "def", "abs_energy", "(", "self", ",", "x", ")", ":", "_energy", "=", "feature_calculators", ".", "abs_energy", "(", "x", ")", "logging", ".", "debug", "(", "\"abs energy by tsfresh calculated\"", ")", "return", "_energy" ]
33.5
0.014514
def create_token_generator(input_list): """SQL Generator to select from list of values in Oracle""" ###Generator trick from http://betteratoracle.com/posts/20-how-do-i-bind-a-variable-in-list ###The maximum length of the comma separated list is 4000 characters, therefore we need to split the list ###ORA-01460: unimplemented or unreasonable conversion requested will thrown if list is larger oracle_limit = 4000 grp_list = [] if type(input_list[0]) == int : input_str = ','.join(map(str, input_list)) else: input_str = ','.join(input_list) if len(input_str) >= oracle_limit: index = 0 while True: begin, end = index, index+oracle_limit if end > len(input_str): end = len(input_str) grp_list.append(input_str[begin:end]) break else: index = input_str.rfind(',', begin, end) if index == -1: break grp_list.append(input_str[begin:index]) index += 1 #to remove the leading comma else: grp_list.append(input_str) token_generator = """ WITH TOKEN_GENERATOR AS ( """ binds = {} for index, chunk in enumerate(grp_list): if index: token_generator += """ UNION ALL """ bind = "token_%s" % index token_generator += """SELECT REGEXP_SUBSTR(:{bind}, '[^,]+', 1, LEVEL) token FROM DUAL CONNECT BY LEVEL <= LENGTH(:{bind}) - LENGTH(REPLACE(:{bind}, ',', '')) + 1 """.format(bind=bind) binds.update({bind: chunk}) token_generator += ")" return token_generator, binds
[ "def", "create_token_generator", "(", "input_list", ")", ":", "###Generator trick from http://betteratoracle.com/posts/20-how-do-i-bind-a-variable-in-list", "###The maximum length of the comma separated list is 4000 characters, therefore we need to split the list", "###ORA-01460: unimplemented or unreasonable conversion requested will thrown if list is larger", "oracle_limit", "=", "4000", "grp_list", "=", "[", "]", "if", "type", "(", "input_list", "[", "0", "]", ")", "==", "int", ":", "input_str", "=", "','", ".", "join", "(", "map", "(", "str", ",", "input_list", ")", ")", "else", ":", "input_str", "=", "','", ".", "join", "(", "input_list", ")", "if", "len", "(", "input_str", ")", ">=", "oracle_limit", ":", "index", "=", "0", "while", "True", ":", "begin", ",", "end", "=", "index", ",", "index", "+", "oracle_limit", "if", "end", ">", "len", "(", "input_str", ")", ":", "end", "=", "len", "(", "input_str", ")", "grp_list", ".", "append", "(", "input_str", "[", "begin", ":", "end", "]", ")", "break", "else", ":", "index", "=", "input_str", ".", "rfind", "(", "','", ",", "begin", ",", "end", ")", "if", "index", "==", "-", "1", ":", "break", "grp_list", ".", "append", "(", "input_str", "[", "begin", ":", "index", "]", ")", "index", "+=", "1", "#to remove the leading comma", "else", ":", "grp_list", ".", "append", "(", "input_str", ")", "token_generator", "=", "\"\"\"\n WITH TOKEN_GENERATOR AS (\n \"\"\"", "binds", "=", "{", "}", "for", "index", ",", "chunk", "in", "enumerate", "(", "grp_list", ")", ":", "if", "index", ":", "token_generator", "+=", "\"\"\"\n UNION ALL\n \"\"\"", "bind", "=", "\"token_%s\"", "%", "index", "token_generator", "+=", "\"\"\"SELECT REGEXP_SUBSTR(:{bind}, '[^,]+', 1, LEVEL) token\n FROM DUAL\n CONNECT BY LEVEL <= LENGTH(:{bind}) - LENGTH(REPLACE(:{bind}, ',', '')) + 1\n \"\"\"", ".", "format", "(", "bind", "=", "bind", ")", "binds", ".", "update", "(", "{", "bind", ":", "chunk", "}", ")", "token_generator", "+=", "\")\"", "return", "token_generator", ",", "binds" ]
33.87234
0.015263
def init_file(self, filename, lines, expected, line_offset): """Prepare storage for errors.""" super(_PycodestyleReport, self).init_file( filename, lines, expected, line_offset) self.errors = []
[ "def", "init_file", "(", "self", ",", "filename", ",", "lines", ",", "expected", ",", "line_offset", ")", ":", "super", "(", "_PycodestyleReport", ",", "self", ")", ".", "init_file", "(", "filename", ",", "lines", ",", "expected", ",", "line_offset", ")", "self", ".", "errors", "=", "[", "]" ]
45.2
0.008696
def detectSonyMylo(self): """Return detection of a Sony Mylo device Detects if the current browser is a Sony Mylo device. """ return UAgentInfo.manuSony in self.__userAgent \ and (UAgentInfo.qtembedded in self.__userAgent or UAgentInfo.mylocom2 in self.__userAgent)
[ "def", "detectSonyMylo", "(", "self", ")", ":", "return", "UAgentInfo", ".", "manuSony", "in", "self", ".", "__userAgent", "and", "(", "UAgentInfo", ".", "qtembedded", "in", "self", ".", "__userAgent", "or", "UAgentInfo", ".", "mylocom2", "in", "self", ".", "__userAgent", ")" ]
39.875
0.009202
def close(self): '''Clean up.''' for path in self._temp_filenames: if os.path.exists(path): os.remove(path)
[ "def", "close", "(", "self", ")", ":", "for", "path", "in", "self", ".", "_temp_filenames", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")" ]
29.4
0.013245
def index_queryset(self, using=None): """Used when the entire index for model is updated.""" return self.get_model().objects.filter( modified__lte=datetime.datetime.now(), status=STATUS.published )
[ "def", "index_queryset", "(", "self", ",", "using", "=", "None", ")", ":", "return", "self", ".", "get_model", "(", ")", ".", "objects", ".", "filter", "(", "modified__lte", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "status", "=", "STATUS", ".", "published", ")" ]
40
0.008163
def decode_timeseries(self, resp_ttb, tsobj, convert_timestamp=False): """ Fills an TsObject with the appropriate data and metadata from a TTB-encoded TsGetResp / TsQueryResp. :param resp_ttb: the decoded TTB data :type resp_ttb: TTB-encoded tsqueryrsp or tsgetresp :param tsobj: a TsObject :type tsobj: TsObject :param convert_timestamp: Convert timestamps to datetime objects :type tsobj: boolean """ if resp_ttb is None: return tsobj self.maybe_err_ttb(resp_ttb) # NB: some queries return a BARE 'tsqueryresp' atom # catch that here: if resp_ttb == tsqueryresp_a: return tsobj # The response atom is the first element in the response tuple resp_a = resp_ttb[0] if resp_a == tsputresp_a: return elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a: resp_data = resp_ttb[1] if len(resp_data) == 0: return elif len(resp_data) == 3: resp_colnames = resp_data[0] resp_coltypes = resp_data[1] tsobj.columns = self.decode_timeseries_cols( resp_colnames, resp_coltypes) resp_rows = resp_data[2] tsobj.rows = [] for resp_row in resp_rows: tsobj.rows.append( self.decode_timeseries_row(resp_row, resp_coltypes, convert_timestamp)) else: raise RiakError( "Expected 3-tuple in response, got: {}".format(resp_data)) else: raise RiakError("Unknown TTB response type: {}".format(resp_a))
[ "def", "decode_timeseries", "(", "self", ",", "resp_ttb", ",", "tsobj", ",", "convert_timestamp", "=", "False", ")", ":", "if", "resp_ttb", "is", "None", ":", "return", "tsobj", "self", ".", "maybe_err_ttb", "(", "resp_ttb", ")", "# NB: some queries return a BARE 'tsqueryresp' atom", "# catch that here:", "if", "resp_ttb", "==", "tsqueryresp_a", ":", "return", "tsobj", "# The response atom is the first element in the response tuple", "resp_a", "=", "resp_ttb", "[", "0", "]", "if", "resp_a", "==", "tsputresp_a", ":", "return", "elif", "resp_a", "==", "tsgetresp_a", "or", "resp_a", "==", "tsqueryresp_a", ":", "resp_data", "=", "resp_ttb", "[", "1", "]", "if", "len", "(", "resp_data", ")", "==", "0", ":", "return", "elif", "len", "(", "resp_data", ")", "==", "3", ":", "resp_colnames", "=", "resp_data", "[", "0", "]", "resp_coltypes", "=", "resp_data", "[", "1", "]", "tsobj", ".", "columns", "=", "self", ".", "decode_timeseries_cols", "(", "resp_colnames", ",", "resp_coltypes", ")", "resp_rows", "=", "resp_data", "[", "2", "]", "tsobj", ".", "rows", "=", "[", "]", "for", "resp_row", "in", "resp_rows", ":", "tsobj", ".", "rows", ".", "append", "(", "self", ".", "decode_timeseries_row", "(", "resp_row", ",", "resp_coltypes", ",", "convert_timestamp", ")", ")", "else", ":", "raise", "RiakError", "(", "\"Expected 3-tuple in response, got: {}\"", ".", "format", "(", "resp_data", ")", ")", "else", ":", "raise", "RiakError", "(", "\"Unknown TTB response type: {}\"", ".", "format", "(", "resp_a", ")", ")" ]
37.93617
0.00164