text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def remove_binaries(package_dir=False): """Remove all binaries for the current platform Parameters ---------- package_dir: bool If True, remove all binaries from the `resources` directory of the qpsphere package. If False, remove all binaries from the user's cache directory. """ paths = [] if package_dir: pdir = RESCR_PATH else: pdir = CACHE_PATH for pp in pdir.iterdir(): if pp.name != "shipped_resources_go_here": paths.append(pp) for pp in paths: pp.unlink()
[ "def", "remove_binaries", "(", "package_dir", "=", "False", ")", ":", "paths", "=", "[", "]", "if", "package_dir", ":", "pdir", "=", "RESCR_PATH", "else", ":", "pdir", "=", "CACHE_PATH", "for", "pp", "in", "pdir", ".", "iterdir", "(", ")", ":", "if", "pp", ".", "name", "!=", "\"shipped_resources_go_here\"", ":", "paths", ".", "append", "(", "pp", ")", "for", "pp", "in", "paths", ":", "pp", ".", "unlink", "(", ")" ]
24.043478
0.001739
def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret
[ "def", "_gather_pillar", "(", "pillarenv", ",", "pillar_override", ")", ":", "pillar", "=", "salt", ".", "pillar", ".", "get_pillar", "(", "__opts__", ",", "__grains__", ",", "__opts__", "[", "'id'", "]", ",", "__opts__", "[", "'saltenv'", "]", ",", "pillar_override", "=", "pillar_override", ",", "pillarenv", "=", "pillarenv", ")", "ret", "=", "pillar", ".", "compile_pillar", "(", ")", "if", "pillar_override", "and", "isinstance", "(", "pillar_override", ",", "dict", ")", ":", "ret", ".", "update", "(", "pillar_override", ")", "return", "ret" ]
28.75
0.002105
def set_result(self, rval: bool) -> None: """ Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it :param rval: Result of evaluation """ self.result = rval if self.result: self.nodes = [pn for pn in self.nodes if pn.result]
[ "def", "set_result", "(", "self", ",", "rval", ":", "bool", ")", "->", "None", ":", "self", ".", "result", "=", "rval", "if", "self", ".", "result", ":", "self", ".", "nodes", "=", "[", "pn", "for", "pn", "in", "self", ".", "nodes", "if", "pn", ".", "result", "]" ]
39.75
0.009231
def on_timer(self, event): """Timer event handler Parameters ---------- event : instance of Event The event. """ # Set relative speed and acceleration rel_speed = event.dt rel_acc = 0.1 # Get what's forward pf, pr, pl, pu = self._get_directions() # Increase speed through acceleration # Note that self._speed is relative. We can balance rel_acc and # rel_speed to get a nice smooth or direct control self._speed += self._acc * rel_acc # Reduce speed. Simulate resistance. Using brakes slows down faster. # Note that the way that we reduce speed, allows for higher # speeds if keys ar bound to higher acc values (i.e. turbo) reduce = np.array([0.05, 0.05, 0.05, 0.1, 0.1, 0.1]) reduce[self._brake > 0] = 0.2 self._speed -= self._speed * reduce if np.abs(self._speed).max() < 0.05: self._speed *= 0.0 # --- Determine new position from translation speed if self._speed[:3].any(): # Create speed vectors, use scale_factor as a reference dv = np.array([1.0/d for d in self._flip_factors]) # vf = pf * dv * rel_speed * self._scale_factor vr = pr * dv * rel_speed * self._scale_factor vu = pu * dv * rel_speed * self._scale_factor direction = vf, vr, vu # Set position center_loc = np.array(self._center, dtype='float32') center_loc += (self._speed[0] * direction[0] + self._speed[1] * direction[1] + self._speed[2] * direction[2]) self._center = tuple(center_loc) # --- Determine new orientation from rotation speed roll_angle = 0 # Calculate manual roll (from speed) if self._speed[3:].any(): angleGain = np.array([1.0, 1.5, 1.0]) * 3 * math.pi / 180 angles = self._speed[3:] * angleGain q1 = Quaternion.create_from_axis_angle(angles[0], -1, 0, 0) q2 = Quaternion.create_from_axis_angle(angles[1], 0, 1, 0) q3 = Quaternion.create_from_axis_angle(angles[2], 0, 0, -1) q = q1 * q2 * q3 self._rotation1 = (q * self._rotation1).normalize() # Calculate auto-roll if self.auto_roll: up = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}[self.up[1]] up = np.array(up) * {'+': +1, '-': -1}[self.up[0]] def angle(p1, p2): return np.arccos(p1.dot(p2)) #au = angle(pu, (0, 0, 1)) ar = angle(pr, up) al = angle(pl, up) af = angle(pf, up) # Roll angle that's off from being leveled (in unit strength) roll_angle = math.sin(0.5*(al - ar)) # Correct for pitch roll_angle *= abs(math.sin(af)) # abs(math.sin(au)) if abs(roll_angle) < 0.05: roll_angle = 0 if roll_angle: # Correct to soften the force at 90 degree angle roll_angle = np.sign(roll_angle) * np.abs(roll_angle)**0.5 # Get correction for this iteration and apply angle_correction = 1.0 * roll_angle * math.pi / 180 q = Quaternion.create_from_axis_angle(angle_correction, 0, 0, 1) self._rotation1 = (q * self._rotation1).normalize() # Update if self._speed.any() or roll_angle or self._update_from_mouse: self._update_from_mouse = False self.view_changed()
[ "def", "on_timer", "(", "self", ",", "event", ")", ":", "# Set relative speed and acceleration", "rel_speed", "=", "event", ".", "dt", "rel_acc", "=", "0.1", "# Get what's forward", "pf", ",", "pr", ",", "pl", ",", "pu", "=", "self", ".", "_get_directions", "(", ")", "# Increase speed through acceleration", "# Note that self._speed is relative. We can balance rel_acc and", "# rel_speed to get a nice smooth or direct control", "self", ".", "_speed", "+=", "self", ".", "_acc", "*", "rel_acc", "# Reduce speed. Simulate resistance. Using brakes slows down faster.", "# Note that the way that we reduce speed, allows for higher", "# speeds if keys ar bound to higher acc values (i.e. turbo)", "reduce", "=", "np", ".", "array", "(", "[", "0.05", ",", "0.05", ",", "0.05", ",", "0.1", ",", "0.1", ",", "0.1", "]", ")", "reduce", "[", "self", ".", "_brake", ">", "0", "]", "=", "0.2", "self", ".", "_speed", "-=", "self", ".", "_speed", "*", "reduce", "if", "np", ".", "abs", "(", "self", ".", "_speed", ")", ".", "max", "(", ")", "<", "0.05", ":", "self", ".", "_speed", "*=", "0.0", "# --- Determine new position from translation speed", "if", "self", ".", "_speed", "[", ":", "3", "]", ".", "any", "(", ")", ":", "# Create speed vectors, use scale_factor as a reference", "dv", "=", "np", ".", "array", "(", "[", "1.0", "/", "d", "for", "d", "in", "self", ".", "_flip_factors", "]", ")", "#", "vf", "=", "pf", "*", "dv", "*", "rel_speed", "*", "self", ".", "_scale_factor", "vr", "=", "pr", "*", "dv", "*", "rel_speed", "*", "self", ".", "_scale_factor", "vu", "=", "pu", "*", "dv", "*", "rel_speed", "*", "self", ".", "_scale_factor", "direction", "=", "vf", ",", "vr", ",", "vu", "# Set position", "center_loc", "=", "np", ".", "array", "(", "self", ".", "_center", ",", "dtype", "=", "'float32'", ")", "center_loc", "+=", "(", "self", ".", "_speed", "[", "0", "]", "*", "direction", "[", "0", "]", "+", "self", ".", "_speed", "[", "1", "]", "*", "direction", "[", "1", "]", "+", "self", ".", "_speed", "[", "2", "]", "*", "direction", "[", "2", "]", ")", "self", ".", "_center", "=", "tuple", "(", "center_loc", ")", "# --- Determine new orientation from rotation speed", "roll_angle", "=", "0", "# Calculate manual roll (from speed)", "if", "self", ".", "_speed", "[", "3", ":", "]", ".", "any", "(", ")", ":", "angleGain", "=", "np", ".", "array", "(", "[", "1.0", ",", "1.5", ",", "1.0", "]", ")", "*", "3", "*", "math", ".", "pi", "/", "180", "angles", "=", "self", ".", "_speed", "[", "3", ":", "]", "*", "angleGain", "q1", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angles", "[", "0", "]", ",", "-", "1", ",", "0", ",", "0", ")", "q2", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angles", "[", "1", "]", ",", "0", ",", "1", ",", "0", ")", "q3", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angles", "[", "2", "]", ",", "0", ",", "0", ",", "-", "1", ")", "q", "=", "q1", "*", "q2", "*", "q3", "self", ".", "_rotation1", "=", "(", "q", "*", "self", ".", "_rotation1", ")", ".", "normalize", "(", ")", "# Calculate auto-roll", "if", "self", ".", "auto_roll", ":", "up", "=", "{", "'x'", ":", "(", "1", ",", "0", ",", "0", ")", ",", "'y'", ":", "(", "0", ",", "1", ",", "0", ")", ",", "'z'", ":", "(", "0", ",", "0", ",", "1", ")", "}", "[", "self", ".", "up", "[", "1", "]", "]", "up", "=", "np", ".", "array", "(", "up", ")", "*", "{", "'+'", ":", "+", "1", ",", "'-'", ":", "-", "1", "}", "[", "self", ".", "up", "[", "0", "]", "]", "def", "angle", "(", "p1", ",", "p2", ")", ":", "return", "np", ".", "arccos", "(", "p1", ".", "dot", "(", "p2", ")", ")", "#au = angle(pu, (0, 0, 1))", "ar", "=", "angle", "(", "pr", ",", "up", ")", "al", "=", "angle", "(", "pl", ",", "up", ")", "af", "=", "angle", "(", "pf", ",", "up", ")", "# Roll angle that's off from being leveled (in unit strength)", "roll_angle", "=", "math", ".", "sin", "(", "0.5", "*", "(", "al", "-", "ar", ")", ")", "# Correct for pitch", "roll_angle", "*=", "abs", "(", "math", ".", "sin", "(", "af", ")", ")", "# abs(math.sin(au))", "if", "abs", "(", "roll_angle", ")", "<", "0.05", ":", "roll_angle", "=", "0", "if", "roll_angle", ":", "# Correct to soften the force at 90 degree angle", "roll_angle", "=", "np", ".", "sign", "(", "roll_angle", ")", "*", "np", ".", "abs", "(", "roll_angle", ")", "**", "0.5", "# Get correction for this iteration and apply", "angle_correction", "=", "1.0", "*", "roll_angle", "*", "math", ".", "pi", "/", "180", "q", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angle_correction", ",", "0", ",", "0", ",", "1", ")", "self", ".", "_rotation1", "=", "(", "q", "*", "self", ".", "_rotation1", ")", ".", "normalize", "(", ")", "# Update", "if", "self", ".", "_speed", ".", "any", "(", ")", "or", "roll_angle", "or", "self", ".", "_update_from_mouse", ":", "self", ".", "_update_from_mouse", "=", "False", "self", ".", "view_changed", "(", ")" ]
38.542553
0.000807
def record_source(self, src, prg=''): """ function to collect raw data from the web and hard drive Examples - new source file for ontologies, email contacts list, folder for xmas photos """ self._log(self.logFileSource , force_to_string(src), prg)
[ "def", "record_source", "(", "self", ",", "src", ",", "prg", "=", "''", ")", ":", "self", ".", "_log", "(", "self", ".", "logFileSource", ",", "force_to_string", "(", "src", ")", ",", "prg", ")" ]
47
0.013937
def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, force_strl=False): """ Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" strl -> "%9s" """ # TODO: Refactor to combine type with format # TODO: expand this to handle a default datetime format? if dta_version < 117: max_str_len = 244 else: max_str_len = 2045 if force_strl: return '%9s' if dtype.type == np.object_: inferred_dtype = infer_dtype(column, skipna=True) if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Column `{col}` cannot be exported.\n\nOnly ' 'string-like object arrays containing all ' 'strings or a mix of strings and None can be ' 'exported. Object arrays containing only null ' 'values are prohibited. Other object types' 'cannot be exported and must first be converted ' 'to one of the supported ' 'types.'.format(col=column.name)) itemsize = max_len_string_array(ensure_object(column.values)) if itemsize > max_str_len: if dta_version >= 117: return '%9s' else: raise ValueError(excessive_string_length_error % column.name) return "%" + str(max(itemsize, 1)) + "s" elif dtype == np.float64: return "%10.0g" elif dtype == np.float32: return "%9.0g" elif dtype == np.int32: return "%12.0g" elif dtype == np.int8 or dtype == np.int16: return "%8.0g" else: # pragma : no cover raise NotImplementedError( "Data type {dtype} not supported.".format(dtype=dtype))
[ "def", "_dtype_to_default_stata_fmt", "(", "dtype", ",", "column", ",", "dta_version", "=", "114", ",", "force_strl", "=", "False", ")", ":", "# TODO: Refactor to combine type with format", "# TODO: expand this to handle a default datetime format?", "if", "dta_version", "<", "117", ":", "max_str_len", "=", "244", "else", ":", "max_str_len", "=", "2045", "if", "force_strl", ":", "return", "'%9s'", "if", "dtype", ".", "type", "==", "np", ".", "object_", ":", "inferred_dtype", "=", "infer_dtype", "(", "column", ",", "skipna", "=", "True", ")", "if", "not", "(", "inferred_dtype", "in", "(", "'string'", ",", "'unicode'", ")", "or", "len", "(", "column", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "'Column `{col}` cannot be exported.\\n\\nOnly '", "'string-like object arrays containing all '", "'strings or a mix of strings and None can be '", "'exported. Object arrays containing only null '", "'values are prohibited. Other object types'", "'cannot be exported and must first be converted '", "'to one of the supported '", "'types.'", ".", "format", "(", "col", "=", "column", ".", "name", ")", ")", "itemsize", "=", "max_len_string_array", "(", "ensure_object", "(", "column", ".", "values", ")", ")", "if", "itemsize", ">", "max_str_len", ":", "if", "dta_version", ">=", "117", ":", "return", "'%9s'", "else", ":", "raise", "ValueError", "(", "excessive_string_length_error", "%", "column", ".", "name", ")", "return", "\"%\"", "+", "str", "(", "max", "(", "itemsize", ",", "1", ")", ")", "+", "\"s\"", "elif", "dtype", "==", "np", ".", "float64", ":", "return", "\"%10.0g\"", "elif", "dtype", "==", "np", ".", "float32", ":", "return", "\"%9.0g\"", "elif", "dtype", "==", "np", ".", "int32", ":", "return", "\"%12.0g\"", "elif", "dtype", "==", "np", ".", "int8", "or", "dtype", "==", "np", ".", "int16", ":", "return", "\"%8.0g\"", "else", ":", "# pragma : no cover", "raise", "NotImplementedError", "(", "\"Data type {dtype} not supported.\"", ".", "format", "(", "dtype", "=", "dtype", ")", ")" ]
40.222222
0.000449
def validate_proxies_config(cls, proxies): """ Specific config validation method for the "proxies" portion of a config. Checks that each proxy defines a port and a list of `upstreams`, and that each upstream entry has a host and port defined. """ for name, proxy in six.iteritems(proxies): if "port" not in proxy: raise ValueError("No port defined for proxy %s" % name) if "upstreams" not in proxy: raise ValueError( "No upstreams defined for proxy %s" % name ) for upstream in proxy["upstreams"]: if "host" not in upstream: raise ValueError( "No host defined for upstream in proxy %s" % name ) if "port" not in upstream: raise ValueError( "No port defined for upstream in proxy %s" % name )
[ "def", "validate_proxies_config", "(", "cls", ",", "proxies", ")", ":", "for", "name", ",", "proxy", "in", "six", ".", "iteritems", "(", "proxies", ")", ":", "if", "\"port\"", "not", "in", "proxy", ":", "raise", "ValueError", "(", "\"No port defined for proxy %s\"", "%", "name", ")", "if", "\"upstreams\"", "not", "in", "proxy", ":", "raise", "ValueError", "(", "\"No upstreams defined for proxy %s\"", "%", "name", ")", "for", "upstream", "in", "proxy", "[", "\"upstreams\"", "]", ":", "if", "\"host\"", "not", "in", "upstream", ":", "raise", "ValueError", "(", "\"No host defined for upstream in proxy %s\"", "%", "name", ")", "if", "\"port\"", "not", "in", "upstream", ":", "raise", "ValueError", "(", "\"No port defined for upstream in proxy %s\"", "%", "name", ")" ]
41.208333
0.001976
def _gen_memd_wrappers(cls, factory): """Generates wrappers for all the memcached operations. :param factory: A function to be called to return the wrapped method. It will be called with two arguments; the first is the unbound method being wrapped, and the second is the name of such a method. The factory shall return a new unbound method :return: A dictionary of names mapping the API calls to the wrapped functions """ d = {} for n in cls._MEMCACHED_OPERATIONS: for variant in (n, n + "_multi"): try: d[variant] = factory(getattr(cls, variant), variant) except AttributeError: if n in cls._MEMCACHED_NOMULTI: continue raise return d
[ "def", "_gen_memd_wrappers", "(", "cls", ",", "factory", ")", ":", "d", "=", "{", "}", "for", "n", "in", "cls", ".", "_MEMCACHED_OPERATIONS", ":", "for", "variant", "in", "(", "n", ",", "n", "+", "\"_multi\"", ")", ":", "try", ":", "d", "[", "variant", "]", "=", "factory", "(", "getattr", "(", "cls", ",", "variant", ")", ",", "variant", ")", "except", "AttributeError", ":", "if", "n", "in", "cls", ".", "_MEMCACHED_NOMULTI", ":", "continue", "raise", "return", "d" ]
39
0.002275
def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15, sigrej=2.0, lower=None, upper=None, binwidth=0.3, scimask1=None, scimask2=None, dqbits=None, rpt_clean=0, atol=0.01, cte_correct=True, clobber=False, verbose=True): r"""Calibrate post-SM4 ACS/WFC exposure(s) and use standalone :ref:`acsdestripe`. This takes a RAW image and generates a FLT file containing its calibrated and destriped counterpart. If CTE correction is performed, FLC will also be present. Parameters ---------- inputfile : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*raw.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) suffix : str The string to use to add to each input file name to indicate an output product of ``acs_destripe``. This only affects the intermediate output file that will be automatically renamed to ``*blv_tmp.fits`` during the processing. stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1') Specifies the statistics to be used for computation of the background in image rows: * 'pmode1' - SEXTRACTOR-like mode estimate based on a modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``2.5*median-1.5*mean``; * 'pmode2' - mode estimate based on `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``3*median-2*mean``; * 'mean' - the mean of the distribution of the "good" pixels (after clipping, masking, etc.); * 'mode' - the mode of the distribution of the "good" pixels; * 'median' - the median of the distribution of the "good" pixels; * 'midpt' - estimate of the median of the distribution of the "good" pixels based on an algorithm similar to IRAF's `imagestats` task (``CDF(midpt)=1/2``). .. note:: The midpoint and mode are computed in two passes through the image. In the first pass the standard deviation of the pixels is calculated and used with the *binwidth* parameter to compute the resolution of the data histogram. The midpoint is estimated by integrating the histogram and computing by interpolation the data value at which exactly half the pixels are below that data value and half are above it. The mode is computed by locating the maximum of the data histogram and fitting the peak by parabolic interpolation. maxiter : int This parameter controls the maximum number of iterations to perform when computing the statistics used to compute the row-by-row corrections. sigrej : float This parameters sets the sigma level for the rejection applied during each iteration of statistics computations for the row-by-row corrections. lower : float, None (Default = None) Lower limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). upper : float, None (Default = None) Upper limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). binwidth : float (Default = 0.1) Histogram's bin width, in sigma units, used to sample the distribution of pixel brightness values in order to compute the background statistics. This parameter is aplicable *only* to *stat* parameter values of `'mode'` or `'midpt'`. clobber : bool Specify whether or not to 'clobber' (delete then replace) previously generated products with the same names. scimask1 : str or list of str Mask images for *calibrated* ``SCI,1``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. scimask2 : str or list of str Mask images for *calibrated* ``SCI,2``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. This is not used for subarrays. dqbits : int, str, None (Default = None) Integer sum of all the DQ bit values from the input image's DQ array that should be considered "good" when building masks for de-striping computations. For example, if pixels in the DQ array can be combinations of 1, 2, 4, and 8 flags and one wants to consider DQ "defects" having flags 2 and 4 as being acceptable for de-striping computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel having values 2,4, or 6 will be considered a good pixel, while a DQ pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged as a "bad" pixel. Alternatively, one can enter a comma- or '+'-separated list of integer bit flags that should be added to obtain the final "good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to setting `dqbits` to 12. | Set `dqbits` to 0 to make *all* non-zero pixels in the DQ mask to be considered "bad" pixels, and the corresponding image pixels not to be used for de-striping computations. | Default value (`None`) will turn off the use of image's DQ array for de-striping computations. | In order to reverse the meaning of the `dqbits` parameter from indicating values of the "good" DQ flags to indicating the "bad" DQ flags, prepend '~' to the string value. For example, in order not to use pixels with DQ flags 4 and 8 for sky computations and to consider as "good" all other pixels (regardless of their DQ flag), set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the same effect with an `int` input value (except for 0), enter -(4+8+1)=-9. Following this convention, a `dqbits` string value of ``'~0'`` would be equivalent to setting ``dqbits=None``. .. note:: DQ masks (if used), *will be* combined with user masks specified in the `scimask1` and `scimask2` parameters (if any). rpt_clean : int An integer indicating how many *additional* times stripe cleaning should be performed on the input image. Default = 0. atol : float, None The threshold for maximum absolute value of bias stripe correction below which repeated cleanings can stop. When `atol` is `None` cleaning will be repeated `rpt_clean` number of times. Default = 0.01 [e]. cte_correct : bool Perform CTE correction. verbose : bool Print informational messages. Default = True. Raises ------ ImportError ``stsci.tools`` not found. IOError Input file does not exist. ValueError Invalid header values or CALACS version. """ # Optional package dependencies from stsci.tools import parseinput try: from stsci.tools.bitmask import interpret_bit_flags except ImportError: from stsci.tools.bitmask import ( interpret_bits_value as interpret_bit_flags ) # process input file(s) and if we have multiple input files - recursively # call acs_destripe_plus for each input image: flist = parseinput.parseinput(inputfile)[0] if isinstance(scimask1, str): mlist1 = parseinput.parseinput(scimask1)[0] elif isinstance(scimask1, np.ndarray): mlist1 = [scimask1.copy()] elif scimask1 is None: mlist1 = [] elif isinstance(scimask1, list): mlist1 = [] for m in scimask1: if isinstance(m, np.ndarray): mlist1.append(m.copy()) elif isinstance(m, str): mlist1 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask1' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask1' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") if isinstance(scimask2, str): mlist2 = parseinput.parseinput(scimask2)[0] elif isinstance(scimask2, np.ndarray): mlist2 = [scimask2.copy()] elif scimask2 is None: mlist2 = [] elif isinstance(scimask2, list): mlist2 = [] for m in scimask2: if isinstance(m, np.ndarray): mlist2.append(m.copy()) elif isinstance(m, str): mlist2 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask2' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask2' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") n_input = len(flist) n_mask1 = len(mlist1) n_mask2 = len(mlist2) if n_input == 0: raise ValueError( 'No input file(s) provided or the file(s) do not exist') if n_mask1 == 0: mlist1 = [None] * n_input elif n_mask1 != n_input: raise ValueError('Insufficient masks for [SCI,1]') if n_mask2 == 0: mlist2 = [None] * n_input elif n_mask2 != n_input: raise ValueError('Insufficient masks for [SCI,2]') if n_input > 1: for img, mf1, mf2 in zip(flist, mlist1, mlist2): destripe_plus( inputfile=img, suffix=suffix, stat=stat, lower=lower, upper=upper, binwidth=binwidth, maxiter=maxiter, sigrej=sigrej, scimask1=scimask1, scimask2=scimask2, dqbits=dqbits, cte_correct=cte_correct, clobber=clobber, verbose=verbose ) return inputfile = flist[0] scimask1 = mlist1[0] scimask2 = mlist2[0] # verify that the RAW image exists in cwd cwddir = os.getcwd() if not os.path.exists(os.path.join(cwddir, inputfile)): raise IOError("{0} does not exist.".format(inputfile)) # get image's primary header: header = fits.getheader(inputfile) # verify masks defined (or not) simultaneously: if (header['CCDAMP'] == 'ABCD' and ((scimask1 is not None and scimask2 is None) or (scimask1 is None and scimask2 is not None))): raise ValueError("Both 'scimask1' and 'scimask2' must be specified " "or not specified together.") calacs_str = subprocess.check_output(['calacs.e', '--version']).split()[0] calacs_ver = [int(x) for x in calacs_str.decode().split('.')] if calacs_ver < [8, 3, 1]: raise ValueError('CALACS {0} is incomptible. ' 'Must be 8.3.1 or later.'.format(calacs_str)) # check date for post-SM4 and if supported subarray or full frame is_subarray = False ctecorr = header['PCTECORR'] aperture = header['APERTURE'] detector = header['DETECTOR'] date_obs = Time(header['DATE-OBS']) # intermediate filenames blvtmp_name = inputfile.replace('raw', 'blv_tmp') blctmp_name = inputfile.replace('raw', 'blc_tmp') # output filenames tra_name = inputfile.replace('_raw.fits', '.tra') flt_name = inputfile.replace('raw', 'flt') flc_name = inputfile.replace('raw', 'flc') if detector != 'WFC': raise ValueError("{0} is not a WFC image, please check the 'DETECTOR'" " keyword.".format(inputfile)) if date_obs < SM4_DATE: raise ValueError( "{0} is a pre-SM4 image.".format(inputfile)) if header['SUBARRAY'] and cte_correct: if aperture in SUBARRAY_LIST: is_subarray = True else: LOG.warning('Using non-supported subarray, ' 'turning CTE correction off') cte_correct = False # delete files from previous CALACS runs if clobber: for tmpfilename in [blvtmp_name, blctmp_name, flt_name, flc_name, tra_name]: if os.path.exists(tmpfilename): os.remove(tmpfilename) # run ACSCCD on RAW acsccd.acsccd(inputfile) # modify user mask with DQ masks if requested dqbits = interpret_bit_flags(dqbits) if dqbits is not None: # save 'tra' file in memory to trick the log file # not to save first acs2d log as this is done only # for the purpose of obtaining DQ masks. # WISH: it would have been nice is there was an easy way of obtaining # just the DQ masks as if data were calibrated but without # having to recalibrate them with acs2d. if os.path.isfile(tra_name): with open(tra_name) as fh: tra_lines = fh.readlines() else: tra_lines = None # apply flats, etc. acs2d.acs2d(blvtmp_name, verbose=False, quiet=True) # extract DQ arrays from the FLT image: dq1, dq2 = _read_DQ_arrays(flt_name) mask1 = _get_mask(scimask1, 1) scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits) mask2 = _get_mask(scimask2, 2) if dq2 is not None: scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits) elif mask2 is None: scimask2 = None # reconstruct trailer file: if tra_lines is not None: with open(tra_name, mode='w') as fh: fh.writelines(tra_lines) # delete temporary FLT image: if os.path.isfile(flt_name): os.remove(flt_name) # execute destriping (post-SM4 data only) acs_destripe.clean( blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej, lower=lower, upper=upper, binwidth=binwidth, mask1=scimask1, mask2=scimask2, dqbits=dqbits, rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose) blvtmpsfx = 'blv_tmp_{0}'.format(suffix) os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name) # update subarray header if is_subarray and cte_correct: fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM') ctecorr = 'PERFORM' # perform CTE correction on destriped image if cte_correct: if ctecorr == 'PERFORM': acscte.acscte(blvtmp_name) else: LOG.warning( "PCTECORR={0}, cannot run CTE correction".format(ctecorr)) cte_correct = False # run ACS2D to get FLT and FLC images acs2d.acs2d(blvtmp_name) if cte_correct: acs2d.acs2d(blctmp_name) # delete intermediate files os.remove(blvtmp_name) if cte_correct and os.path.isfile(blctmp_name): os.remove(blctmp_name) info_str = 'Done.\nFLT: {0}\n'.format(flt_name) if cte_correct: info_str += 'FLC: {0}\n'.format(flc_name) LOG.info(info_str)
[ "def", "destripe_plus", "(", "inputfile", ",", "suffix", "=", "'strp'", ",", "stat", "=", "'pmode1'", ",", "maxiter", "=", "15", ",", "sigrej", "=", "2.0", ",", "lower", "=", "None", ",", "upper", "=", "None", ",", "binwidth", "=", "0.3", ",", "scimask1", "=", "None", ",", "scimask2", "=", "None", ",", "dqbits", "=", "None", ",", "rpt_clean", "=", "0", ",", "atol", "=", "0.01", ",", "cte_correct", "=", "True", ",", "clobber", "=", "False", ",", "verbose", "=", "True", ")", ":", "# Optional package dependencies", "from", "stsci", ".", "tools", "import", "parseinput", "try", ":", "from", "stsci", ".", "tools", ".", "bitmask", "import", "interpret_bit_flags", "except", "ImportError", ":", "from", "stsci", ".", "tools", ".", "bitmask", "import", "(", "interpret_bits_value", "as", "interpret_bit_flags", ")", "# process input file(s) and if we have multiple input files - recursively", "# call acs_destripe_plus for each input image:", "flist", "=", "parseinput", ".", "parseinput", "(", "inputfile", ")", "[", "0", "]", "if", "isinstance", "(", "scimask1", ",", "str", ")", ":", "mlist1", "=", "parseinput", ".", "parseinput", "(", "scimask1", ")", "[", "0", "]", "elif", "isinstance", "(", "scimask1", ",", "np", ".", "ndarray", ")", ":", "mlist1", "=", "[", "scimask1", ".", "copy", "(", ")", "]", "elif", "scimask1", "is", "None", ":", "mlist1", "=", "[", "]", "elif", "isinstance", "(", "scimask1", ",", "list", ")", ":", "mlist1", "=", "[", "]", "for", "m", "in", "scimask1", ":", "if", "isinstance", "(", "m", ",", "np", ".", "ndarray", ")", ":", "mlist1", ".", "append", "(", "m", ".", "copy", "(", ")", ")", "elif", "isinstance", "(", "m", ",", "str", ")", ":", "mlist1", "+=", "parseinput", ".", "parseinput", "(", "m", ")", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "\"'scimask1' must be a list of str or \"", "\"numpy.ndarray values.\"", ")", "else", ":", "raise", "TypeError", "(", "\"'scimask1' must be either a str, or a \"", "\"numpy.ndarray, or a list of the two type of \"", "\"values.\"", ")", "if", "isinstance", "(", "scimask2", ",", "str", ")", ":", "mlist2", "=", "parseinput", ".", "parseinput", "(", "scimask2", ")", "[", "0", "]", "elif", "isinstance", "(", "scimask2", ",", "np", ".", "ndarray", ")", ":", "mlist2", "=", "[", "scimask2", ".", "copy", "(", ")", "]", "elif", "scimask2", "is", "None", ":", "mlist2", "=", "[", "]", "elif", "isinstance", "(", "scimask2", ",", "list", ")", ":", "mlist2", "=", "[", "]", "for", "m", "in", "scimask2", ":", "if", "isinstance", "(", "m", ",", "np", ".", "ndarray", ")", ":", "mlist2", ".", "append", "(", "m", ".", "copy", "(", ")", ")", "elif", "isinstance", "(", "m", ",", "str", ")", ":", "mlist2", "+=", "parseinput", ".", "parseinput", "(", "m", ")", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "\"'scimask2' must be a list of str or \"", "\"numpy.ndarray values.\"", ")", "else", ":", "raise", "TypeError", "(", "\"'scimask2' must be either a str, or a \"", "\"numpy.ndarray, or a list of the two type of \"", "\"values.\"", ")", "n_input", "=", "len", "(", "flist", ")", "n_mask1", "=", "len", "(", "mlist1", ")", "n_mask2", "=", "len", "(", "mlist2", ")", "if", "n_input", "==", "0", ":", "raise", "ValueError", "(", "'No input file(s) provided or the file(s) do not exist'", ")", "if", "n_mask1", "==", "0", ":", "mlist1", "=", "[", "None", "]", "*", "n_input", "elif", "n_mask1", "!=", "n_input", ":", "raise", "ValueError", "(", "'Insufficient masks for [SCI,1]'", ")", "if", "n_mask2", "==", "0", ":", "mlist2", "=", "[", "None", "]", "*", "n_input", "elif", "n_mask2", "!=", "n_input", ":", "raise", "ValueError", "(", "'Insufficient masks for [SCI,2]'", ")", "if", "n_input", ">", "1", ":", "for", "img", ",", "mf1", ",", "mf2", "in", "zip", "(", "flist", ",", "mlist1", ",", "mlist2", ")", ":", "destripe_plus", "(", "inputfile", "=", "img", ",", "suffix", "=", "suffix", ",", "stat", "=", "stat", ",", "lower", "=", "lower", ",", "upper", "=", "upper", ",", "binwidth", "=", "binwidth", ",", "maxiter", "=", "maxiter", ",", "sigrej", "=", "sigrej", ",", "scimask1", "=", "scimask1", ",", "scimask2", "=", "scimask2", ",", "dqbits", "=", "dqbits", ",", "cte_correct", "=", "cte_correct", ",", "clobber", "=", "clobber", ",", "verbose", "=", "verbose", ")", "return", "inputfile", "=", "flist", "[", "0", "]", "scimask1", "=", "mlist1", "[", "0", "]", "scimask2", "=", "mlist2", "[", "0", "]", "# verify that the RAW image exists in cwd", "cwddir", "=", "os", ".", "getcwd", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "cwddir", ",", "inputfile", ")", ")", ":", "raise", "IOError", "(", "\"{0} does not exist.\"", ".", "format", "(", "inputfile", ")", ")", "# get image's primary header:", "header", "=", "fits", ".", "getheader", "(", "inputfile", ")", "# verify masks defined (or not) simultaneously:", "if", "(", "header", "[", "'CCDAMP'", "]", "==", "'ABCD'", "and", "(", "(", "scimask1", "is", "not", "None", "and", "scimask2", "is", "None", ")", "or", "(", "scimask1", "is", "None", "and", "scimask2", "is", "not", "None", ")", ")", ")", ":", "raise", "ValueError", "(", "\"Both 'scimask1' and 'scimask2' must be specified \"", "\"or not specified together.\"", ")", "calacs_str", "=", "subprocess", ".", "check_output", "(", "[", "'calacs.e'", ",", "'--version'", "]", ")", ".", "split", "(", ")", "[", "0", "]", "calacs_ver", "=", "[", "int", "(", "x", ")", "for", "x", "in", "calacs_str", ".", "decode", "(", ")", ".", "split", "(", "'.'", ")", "]", "if", "calacs_ver", "<", "[", "8", ",", "3", ",", "1", "]", ":", "raise", "ValueError", "(", "'CALACS {0} is incomptible. '", "'Must be 8.3.1 or later.'", ".", "format", "(", "calacs_str", ")", ")", "# check date for post-SM4 and if supported subarray or full frame", "is_subarray", "=", "False", "ctecorr", "=", "header", "[", "'PCTECORR'", "]", "aperture", "=", "header", "[", "'APERTURE'", "]", "detector", "=", "header", "[", "'DETECTOR'", "]", "date_obs", "=", "Time", "(", "header", "[", "'DATE-OBS'", "]", ")", "# intermediate filenames", "blvtmp_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'blv_tmp'", ")", "blctmp_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'blc_tmp'", ")", "# output filenames", "tra_name", "=", "inputfile", ".", "replace", "(", "'_raw.fits'", ",", "'.tra'", ")", "flt_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'flt'", ")", "flc_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'flc'", ")", "if", "detector", "!=", "'WFC'", ":", "raise", "ValueError", "(", "\"{0} is not a WFC image, please check the 'DETECTOR'\"", "\" keyword.\"", ".", "format", "(", "inputfile", ")", ")", "if", "date_obs", "<", "SM4_DATE", ":", "raise", "ValueError", "(", "\"{0} is a pre-SM4 image.\"", ".", "format", "(", "inputfile", ")", ")", "if", "header", "[", "'SUBARRAY'", "]", "and", "cte_correct", ":", "if", "aperture", "in", "SUBARRAY_LIST", ":", "is_subarray", "=", "True", "else", ":", "LOG", ".", "warning", "(", "'Using non-supported subarray, '", "'turning CTE correction off'", ")", "cte_correct", "=", "False", "# delete files from previous CALACS runs", "if", "clobber", ":", "for", "tmpfilename", "in", "[", "blvtmp_name", ",", "blctmp_name", ",", "flt_name", ",", "flc_name", ",", "tra_name", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "tmpfilename", ")", ":", "os", ".", "remove", "(", "tmpfilename", ")", "# run ACSCCD on RAW", "acsccd", ".", "acsccd", "(", "inputfile", ")", "# modify user mask with DQ masks if requested", "dqbits", "=", "interpret_bit_flags", "(", "dqbits", ")", "if", "dqbits", "is", "not", "None", ":", "# save 'tra' file in memory to trick the log file", "# not to save first acs2d log as this is done only", "# for the purpose of obtaining DQ masks.", "# WISH: it would have been nice is there was an easy way of obtaining", "# just the DQ masks as if data were calibrated but without", "# having to recalibrate them with acs2d.", "if", "os", ".", "path", ".", "isfile", "(", "tra_name", ")", ":", "with", "open", "(", "tra_name", ")", "as", "fh", ":", "tra_lines", "=", "fh", ".", "readlines", "(", ")", "else", ":", "tra_lines", "=", "None", "# apply flats, etc.", "acs2d", ".", "acs2d", "(", "blvtmp_name", ",", "verbose", "=", "False", ",", "quiet", "=", "True", ")", "# extract DQ arrays from the FLT image:", "dq1", ",", "dq2", "=", "_read_DQ_arrays", "(", "flt_name", ")", "mask1", "=", "_get_mask", "(", "scimask1", ",", "1", ")", "scimask1", "=", "acs_destripe", ".", "_mergeUserMaskAndDQ", "(", "dq1", ",", "mask1", ",", "dqbits", ")", "mask2", "=", "_get_mask", "(", "scimask2", ",", "2", ")", "if", "dq2", "is", "not", "None", ":", "scimask2", "=", "acs_destripe", ".", "_mergeUserMaskAndDQ", "(", "dq2", ",", "mask2", ",", "dqbits", ")", "elif", "mask2", "is", "None", ":", "scimask2", "=", "None", "# reconstruct trailer file:", "if", "tra_lines", "is", "not", "None", ":", "with", "open", "(", "tra_name", ",", "mode", "=", "'w'", ")", "as", "fh", ":", "fh", ".", "writelines", "(", "tra_lines", ")", "# delete temporary FLT image:", "if", "os", ".", "path", ".", "isfile", "(", "flt_name", ")", ":", "os", ".", "remove", "(", "flt_name", ")", "# execute destriping (post-SM4 data only)", "acs_destripe", ".", "clean", "(", "blvtmp_name", ",", "suffix", ",", "stat", "=", "stat", ",", "maxiter", "=", "maxiter", ",", "sigrej", "=", "sigrej", ",", "lower", "=", "lower", ",", "upper", "=", "upper", ",", "binwidth", "=", "binwidth", ",", "mask1", "=", "scimask1", ",", "mask2", "=", "scimask2", ",", "dqbits", "=", "dqbits", ",", "rpt_clean", "=", "rpt_clean", ",", "atol", "=", "atol", ",", "clobber", "=", "clobber", ",", "verbose", "=", "verbose", ")", "blvtmpsfx", "=", "'blv_tmp_{0}'", ".", "format", "(", "suffix", ")", "os", ".", "rename", "(", "inputfile", ".", "replace", "(", "'raw'", ",", "blvtmpsfx", ")", ",", "blvtmp_name", ")", "# update subarray header", "if", "is_subarray", "and", "cte_correct", ":", "fits", ".", "setval", "(", "blvtmp_name", ",", "'PCTECORR'", ",", "value", "=", "'PERFORM'", ")", "ctecorr", "=", "'PERFORM'", "# perform CTE correction on destriped image", "if", "cte_correct", ":", "if", "ctecorr", "==", "'PERFORM'", ":", "acscte", ".", "acscte", "(", "blvtmp_name", ")", "else", ":", "LOG", ".", "warning", "(", "\"PCTECORR={0}, cannot run CTE correction\"", ".", "format", "(", "ctecorr", ")", ")", "cte_correct", "=", "False", "# run ACS2D to get FLT and FLC images", "acs2d", ".", "acs2d", "(", "blvtmp_name", ")", "if", "cte_correct", ":", "acs2d", ".", "acs2d", "(", "blctmp_name", ")", "# delete intermediate files", "os", ".", "remove", "(", "blvtmp_name", ")", "if", "cte_correct", "and", "os", ".", "path", ".", "isfile", "(", "blctmp_name", ")", ":", "os", ".", "remove", "(", "blctmp_name", ")", "info_str", "=", "'Done.\\nFLT: {0}\\n'", ".", "format", "(", "flt_name", ")", "if", "cte_correct", ":", "info_str", "+=", "'FLC: {0}\\n'", ".", "format", "(", "flc_name", ")", "LOG", ".", "info", "(", "info_str", ")" ]
38.492268
0.000261
def dummyListOfDicts(size=100): """ returns a list (of the given size) of dicts with fake data. some dictionary keys are missing for some of the items. """ titles="ahp,halfwidth,peak,expT,expI,sweep".split(",") ld=[] #list of dicts for i in range(size): d={} for t in titles: if int(np.random.random(1)*100)>5: #5% of values are missing d[t]=float(np.random.random(1)*100) #random number 0-100 if t=="sweep" and "sweep" in d.keys(): d[t]=int(d[t]) ld.append(d) return ld
[ "def", "dummyListOfDicts", "(", "size", "=", "100", ")", ":", "titles", "=", "\"ahp,halfwidth,peak,expT,expI,sweep\"", ".", "split", "(", "\",\"", ")", "ld", "=", "[", "]", "#list of dicts", "for", "i", "in", "range", "(", "size", ")", ":", "d", "=", "{", "}", "for", "t", "in", "titles", ":", "if", "int", "(", "np", ".", "random", ".", "random", "(", "1", ")", "*", "100", ")", ">", "5", ":", "#5% of values are missing", "d", "[", "t", "]", "=", "float", "(", "np", ".", "random", ".", "random", "(", "1", ")", "*", "100", ")", "#random number 0-100", "if", "t", "==", "\"sweep\"", "and", "\"sweep\"", "in", "d", ".", "keys", "(", ")", ":", "d", "[", "t", "]", "=", "int", "(", "d", "[", "t", "]", ")", "ld", ".", "append", "(", "d", ")", "return", "ld" ]
35.4375
0.024055
def dictlist_to_tsv(dictlist: List[Dict[str, Any]]) -> str: """ From a consistent list of dictionaries mapping fieldnames to values, make a TSV file. """ if not dictlist: return "" fieldnames = dictlist[0].keys() tsv = "\t".join([tsv_escape(f) for f in fieldnames]) + "\n" for d in dictlist: tsv += "\t".join([tsv_escape(v) for v in d.values()]) + "\n" return tsv
[ "def", "dictlist_to_tsv", "(", "dictlist", ":", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ")", "->", "str", ":", "if", "not", "dictlist", ":", "return", "\"\"", "fieldnames", "=", "dictlist", "[", "0", "]", ".", "keys", "(", ")", "tsv", "=", "\"\\t\"", ".", "join", "(", "[", "tsv_escape", "(", "f", ")", "for", "f", "in", "fieldnames", "]", ")", "+", "\"\\n\"", "for", "d", "in", "dictlist", ":", "tsv", "+=", "\"\\t\"", ".", "join", "(", "[", "tsv_escape", "(", "v", ")", "for", "v", "in", "d", ".", "values", "(", ")", "]", ")", "+", "\"\\n\"", "return", "tsv" ]
33.666667
0.00241
def get_stories(label_type): """ Returns a list of the stories in the Na corpus. """ prefixes = get_story_prefixes(label_type) texts = list(set([prefix.split(".")[0].split("/")[1] for prefix in prefixes])) return texts
[ "def", "get_stories", "(", "label_type", ")", ":", "prefixes", "=", "get_story_prefixes", "(", "label_type", ")", "texts", "=", "list", "(", "set", "(", "[", "prefix", ".", "split", "(", "\".\"", ")", "[", "0", "]", ".", "split", "(", "\"/\"", ")", "[", "1", "]", "for", "prefix", "in", "prefixes", "]", ")", ")", "return", "texts" ]
38.333333
0.008511
def stringer(x): """ Takes an object and makes it stringy >>> print(stringer({'a': 1, 2: 3, 'b': [1, 'c', 2.5]})) {'b': ['1', 'c', '2.5'], 'a': '1', '2': '3'} """ if isinstance(x, string_types): return x if isinstance(x, (list, tuple)): return [stringer(y) for y in x] if isinstance(x, dict): return dict((stringer(a), stringer(b)) for a, b in x.items()) return text_type(x)
[ "def", "stringer", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "string_types", ")", ":", "return", "x", "if", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "stringer", "(", "y", ")", "for", "y", "in", "x", "]", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "return", "dict", "(", "(", "stringer", "(", "a", ")", ",", "stringer", "(", "b", ")", ")", "for", "a", ",", "b", "in", "x", ".", "items", "(", ")", ")", "return", "text_type", "(", "x", ")" ]
32.461538
0.002304
def id_nameDAVID(df,GTF=None,name_id=None): """ Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output :param df: a dataframe output from DAVIDenrich :param GTF: a GTF dataframe from readGTF() :param name_id: instead of a gtf dataframe a dataframe with the columns 'gene_name' and 'gene_id' can be given as input :returns: a pandas dataframe with a gene name column added to it. """ if name_id is None: gene_name=retrieve_GTF_field('gene_name',GTF) gene_id=retrieve_GTF_field('gene_id', GTF) GTF=pd.concat([gene_name,gene_id],axis=1) else: GTF=name_id.copy() df['Gene_names']="genes" terms=df['termName'].tolist() enrichN=pd.DataFrame() for term in terms: tmp=df[df['termName']==term] tmp=tmp.reset_index(drop=True) ids=tmp.xs(0)['geneIds'] ids=pd.DataFrame(data=ids.split(", ")) ids.columns=['geneIds'] ids['geneIds']=ids['geneIds'].map(str.lower) GTF['gene_id']=GTF['gene_id'].astype(str) GTF['gene_id']=GTF['gene_id'].map(str.lower) ids=pd.merge(ids, GTF, how='left', left_on='geneIds', right_on='gene_id') names=ids['gene_name'].tolist() names= ', '.join(names) tmp["Gene_names"]=names #tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names) enrichN=pd.concat([enrichN, tmp]) enrichN=enrichN.reset_index(drop=True) gene_names=enrichN[['Gene_names']] gpos=enrichN.columns.get_loc("geneIds") enrichN=enrichN.drop(['Gene_names'],axis=1) cols=enrichN.columns.tolist() enrichN=pd.concat([enrichN[cols[:gpos+1]],gene_names,enrichN[cols[gpos+1:]]],axis=1) return enrichN
[ "def", "id_nameDAVID", "(", "df", ",", "GTF", "=", "None", ",", "name_id", "=", "None", ")", ":", "if", "name_id", "is", "None", ":", "gene_name", "=", "retrieve_GTF_field", "(", "'gene_name'", ",", "GTF", ")", "gene_id", "=", "retrieve_GTF_field", "(", "'gene_id'", ",", "GTF", ")", "GTF", "=", "pd", ".", "concat", "(", "[", "gene_name", ",", "gene_id", "]", ",", "axis", "=", "1", ")", "else", ":", "GTF", "=", "name_id", ".", "copy", "(", ")", "df", "[", "'Gene_names'", "]", "=", "\"genes\"", "terms", "=", "df", "[", "'termName'", "]", ".", "tolist", "(", ")", "enrichN", "=", "pd", ".", "DataFrame", "(", ")", "for", "term", "in", "terms", ":", "tmp", "=", "df", "[", "df", "[", "'termName'", "]", "==", "term", "]", "tmp", "=", "tmp", ".", "reset_index", "(", "drop", "=", "True", ")", "ids", "=", "tmp", ".", "xs", "(", "0", ")", "[", "'geneIds'", "]", "ids", "=", "pd", ".", "DataFrame", "(", "data", "=", "ids", ".", "split", "(", "\", \"", ")", ")", "ids", ".", "columns", "=", "[", "'geneIds'", "]", "ids", "[", "'geneIds'", "]", "=", "ids", "[", "'geneIds'", "]", ".", "map", "(", "str", ".", "lower", ")", "GTF", "[", "'gene_id'", "]", "=", "GTF", "[", "'gene_id'", "]", ".", "astype", "(", "str", ")", "GTF", "[", "'gene_id'", "]", "=", "GTF", "[", "'gene_id'", "]", ".", "map", "(", "str", ".", "lower", ")", "ids", "=", "pd", ".", "merge", "(", "ids", ",", "GTF", ",", "how", "=", "'left'", ",", "left_on", "=", "'geneIds'", ",", "right_on", "=", "'gene_id'", ")", "names", "=", "ids", "[", "'gene_name'", "]", ".", "tolist", "(", ")", "names", "=", "', '", ".", "join", "(", "names", ")", "tmp", "[", "\"Gene_names\"", "]", "=", "names", "#tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names)", "enrichN", "=", "pd", ".", "concat", "(", "[", "enrichN", ",", "tmp", "]", ")", "enrichN", "=", "enrichN", ".", "reset_index", "(", "drop", "=", "True", ")", "gene_names", "=", "enrichN", "[", "[", "'Gene_names'", "]", "]", "gpos", "=", "enrichN", ".", "columns", ".", "get_loc", "(", "\"geneIds\"", ")", "enrichN", "=", "enrichN", ".", "drop", "(", "[", "'Gene_names'", "]", ",", "axis", "=", "1", ")", "cols", "=", "enrichN", ".", "columns", ".", "tolist", "(", ")", "enrichN", "=", "pd", ".", "concat", "(", "[", "enrichN", "[", "cols", "[", ":", "gpos", "+", "1", "]", "]", ",", "gene_names", ",", "enrichN", "[", "cols", "[", "gpos", "+", "1", ":", "]", "]", "]", ",", "axis", "=", "1", ")", "return", "enrichN" ]
39.860465
0.023918
def execution_time(self, value): """ Force the execution_time to always be a datetime :param value: :return: """ if value: self._execution_time = parse(value) if isinstance(value, type_check) else value
[ "def", "execution_time", "(", "self", ",", "value", ")", ":", "if", "value", ":", "self", ".", "_execution_time", "=", "parse", "(", "value", ")", "if", "isinstance", "(", "value", ",", "type_check", ")", "else", "value" ]
31.875
0.01145
def load_precision(filename): """ Load a CLASS precision file into a dictionary. Parameters ---------- filename : str the name of an existing file to load, or one in the files included as part of the CLASS source Returns ------- dict : the precision parameters loaded from file """ # also look in data dir path = _find_file(filename) r = dict() with open(path, 'r') as f: exec(f.read(), {}, r) return r
[ "def", "load_precision", "(", "filename", ")", ":", "# also look in data dir", "path", "=", "_find_file", "(", "filename", ")", "r", "=", "dict", "(", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "exec", "(", "f", ".", "read", "(", ")", ",", "{", "}", ",", "r", ")", "return", "r" ]
20.521739
0.002024
def sed(self, name, **kwargs): """Generate a spectral energy distribution (SED) for a source. This function will fit the normalization of the source in each energy bin. By default the SED will be generated with the analysis energy bins but a custom binning can be defined with the ``loge_bins`` parameter. Parameters ---------- name : str Source name. prefix : str Optional string that will be prepended to all output files (FITS and rendered images). loge_bins : `~numpy.ndarray` Sequence of energies in log10(E/MeV) defining the edges of the energy bins. If this argument is None then the analysis energy bins will be used. The energies in this sequence must align with the bin edges of the underyling analysis instance. {options} optimizer : dict Dictionary that overrides the default optimizer settings. Returns ------- sed : dict Dictionary containing output of the SED analysis. """ timer = Timer.create(start=True) name = self.roi.get_source_by_name(name).name # Create schema for method configuration schema = ConfigSchema(self.defaults['sed'], optimizer=self.defaults['optimizer']) schema.add_option('prefix', '') schema.add_option('outfile', None, '', str) schema.add_option('loge_bins', None, '', list) config = utils.create_dict(self.config['sed'], optimizer=self.config['optimizer']) config = schema.create_config(config, **kwargs) self.logger.info('Computing SED for %s' % name) o = self._make_sed(name, **config) self.logger.info('Finished SED') outfile = config.get('outfile', None) if outfile is None: outfile = utils.format_filename(self.workdir, 'sed', prefix=[config['prefix'], name.lower().replace(' ', '_')]) else: outfile = os.path.join(self.workdir, os.path.splitext(outfile)[0]) o['file'] = None if config['write_fits']: o['file'] = os.path.basename(outfile) + '.fits' self._make_sed_fits(o, outfile + '.fits', **config) if config['write_npy']: np.save(outfile + '.npy', o) if config['make_plots']: self._plotter.make_sed_plots(o, **config) self.logger.info('Execution time: %.2f s', timer.elapsed_time) return o
[ "def", "sed", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "timer", "=", "Timer", ".", "create", "(", "start", "=", "True", ")", "name", "=", "self", ".", "roi", ".", "get_source_by_name", "(", "name", ")", ".", "name", "# Create schema for method configuration", "schema", "=", "ConfigSchema", "(", "self", ".", "defaults", "[", "'sed'", "]", ",", "optimizer", "=", "self", ".", "defaults", "[", "'optimizer'", "]", ")", "schema", ".", "add_option", "(", "'prefix'", ",", "''", ")", "schema", ".", "add_option", "(", "'outfile'", ",", "None", ",", "''", ",", "str", ")", "schema", ".", "add_option", "(", "'loge_bins'", ",", "None", ",", "''", ",", "list", ")", "config", "=", "utils", ".", "create_dict", "(", "self", ".", "config", "[", "'sed'", "]", ",", "optimizer", "=", "self", ".", "config", "[", "'optimizer'", "]", ")", "config", "=", "schema", ".", "create_config", "(", "config", ",", "*", "*", "kwargs", ")", "self", ".", "logger", ".", "info", "(", "'Computing SED for %s'", "%", "name", ")", "o", "=", "self", ".", "_make_sed", "(", "name", ",", "*", "*", "config", ")", "self", ".", "logger", ".", "info", "(", "'Finished SED'", ")", "outfile", "=", "config", ".", "get", "(", "'outfile'", ",", "None", ")", "if", "outfile", "is", "None", ":", "outfile", "=", "utils", ".", "format_filename", "(", "self", ".", "workdir", ",", "'sed'", ",", "prefix", "=", "[", "config", "[", "'prefix'", "]", ",", "name", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "]", ")", "else", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "os", ".", "path", ".", "splitext", "(", "outfile", ")", "[", "0", "]", ")", "o", "[", "'file'", "]", "=", "None", "if", "config", "[", "'write_fits'", "]", ":", "o", "[", "'file'", "]", "=", "os", ".", "path", ".", "basename", "(", "outfile", ")", "+", "'.fits'", "self", ".", "_make_sed_fits", "(", "o", ",", "outfile", "+", "'.fits'", ",", "*", "*", "config", ")", "if", "config", "[", "'write_npy'", "]", ":", "np", ".", "save", "(", "outfile", "+", "'.npy'", ",", "o", ")", "if", "config", "[", "'make_plots'", "]", ":", "self", ".", "_plotter", ".", "make_sed_plots", "(", "o", ",", "*", "*", "config", ")", "self", ".", "logger", ".", "info", "(", "'Execution time: %.2f s'", ",", "timer", ".", "elapsed_time", ")", "return", "o" ]
35.56
0.001094
def http_request(self, verb, uri, data=None, headers=None, files=None, response_format=None, is_rdf = True, stream = False ): ''' Primary route for all HTTP requests to repository. Ability to set most parameters for requests library, with some additional convenience parameters as well. Args: verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc. uri (rdflib.term.URIRef,str): input URI data (str,file): payload of data to send for request, may be overridden in preperation of request headers (dict): optional dictionary of headers passed directly to requests.request files (dict): optional dictionary of files passed directly to requests.request response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc. is_rdf (bool): if True, set Accept header based on combination of response_format and headers stream (bool): passed directly to requests.request for stream parameter Returns: requests.models.Response ''' # set content negotiated response format for RDFSources if is_rdf: ''' Acceptable content negotiated response formats include: application/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository) application/n-triples application/rdf+xml text/n3 (or text/rdf+n3) text/plain text/turtle (or application/x-turtle) ''' # set for GET requests only if verb == 'GET': # if no response_format has been requested to this point, use repository instance default if not response_format: response_format = self.repo.default_serialization # if headers present, append if headers and 'Accept' not in headers.keys(): headers['Accept'] = response_format # if headers are blank, init dictionary else: headers = {'Accept':response_format} # prepare uri for HTTP request if type(uri) == rdflib.term.URIRef: uri = uri.toPython() logger.debug("%s request for %s, format %s, headers %s" % (verb, uri, response_format, headers)) # manually prepare request session = requests.Session() request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files) prepped_request = session.prepare_request(request) response = session.send(prepped_request, stream=stream, ) return response
[ "def", "http_request", "(", "self", ",", "verb", ",", "uri", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "files", "=", "None", ",", "response_format", "=", "None", ",", "is_rdf", "=", "True", ",", "stream", "=", "False", ")", ":", "# set content negotiated response format for RDFSources", "if", "is_rdf", ":", "'''\n\t\t\tAcceptable content negotiated response formats include:\n\t\t\t\tapplication/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)\n\t\t\t\tapplication/n-triples\n\t\t\t\tapplication/rdf+xml\n\t\t\t\ttext/n3 (or text/rdf+n3)\n\t\t\t\ttext/plain\n\t\t\t\ttext/turtle (or application/x-turtle)\n\t\t\t'''", "# set for GET requests only", "if", "verb", "==", "'GET'", ":", "# if no response_format has been requested to this point, use repository instance default", "if", "not", "response_format", ":", "response_format", "=", "self", ".", "repo", ".", "default_serialization", "# if headers present, append", "if", "headers", "and", "'Accept'", "not", "in", "headers", ".", "keys", "(", ")", ":", "headers", "[", "'Accept'", "]", "=", "response_format", "# if headers are blank, init dictionary", "else", ":", "headers", "=", "{", "'Accept'", ":", "response_format", "}", "# prepare uri for HTTP request", "if", "type", "(", "uri", ")", "==", "rdflib", ".", "term", ".", "URIRef", ":", "uri", "=", "uri", ".", "toPython", "(", ")", "logger", ".", "debug", "(", "\"%s request for %s, format %s, headers %s\"", "%", "(", "verb", ",", "uri", ",", "response_format", ",", "headers", ")", ")", "# manually prepare request", "session", "=", "requests", ".", "Session", "(", ")", "request", "=", "requests", ".", "Request", "(", "verb", ",", "uri", ",", "auth", "=", "(", "self", ".", "repo", ".", "username", ",", "self", ".", "repo", ".", "password", ")", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "files", "=", "files", ")", "prepped_request", "=", "session", ".", "prepare_request", "(", "request", ")", "response", "=", "session", ".", "send", "(", "prepped_request", ",", "stream", "=", "stream", ",", ")", "return", "response" ]
35.104478
0.036394
def is_unit_or_unitstring(value): """must be an astropy.unit""" if is_unit(value)[0]: return True, value try: unit = units.Unit(value) except: return False, value else: return True, unit
[ "def", "is_unit_or_unitstring", "(", "value", ")", ":", "if", "is_unit", "(", "value", ")", "[", "0", "]", ":", "return", "True", ",", "value", "try", ":", "unit", "=", "units", ".", "Unit", "(", "value", ")", "except", ":", "return", "False", ",", "value", "else", ":", "return", "True", ",", "unit" ]
22.9
0.008403
def get(src_hdfs_path, dest_path, **kwargs): """\ Copy the contents of ``src_hdfs_path`` to ``dest_path``. ``dest_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs)
[ "def", "get", "(", "src_hdfs_path", ",", "dest_path", ",", "*", "*", "kwargs", ")", ":", "cp", "(", "src_hdfs_path", ",", "path", ".", "abspath", "(", "dest_path", ",", "local", "=", "True", ")", ",", "*", "*", "kwargs", ")" ]
44.2
0.002217
def remove(self): """ Remove the directory. """ lib.gp_camera_folder_remove_dir( self._cam._cam, self.parent.path.encode(), self.name.encode(), self._cam._ctx)
[ "def", "remove", "(", "self", ")", ":", "lib", ".", "gp_camera_folder_remove_dir", "(", "self", ".", "_cam", ".", "_cam", ",", "self", ".", "parent", ".", "path", ".", "encode", "(", ")", ",", "self", ".", "name", ".", "encode", "(", ")", ",", "self", ".", "_cam", ".", "_ctx", ")" ]
39
0.01005
def __bindings(self): """Binds events to handlers""" self.textctrl.Bind(wx.EVT_TEXT, self.OnText) self.fontbutton.Bind(wx.EVT_BUTTON, self.OnFont) self.Bind(csel.EVT_COLOURSELECT, self.OnColor)
[ "def", "__bindings", "(", "self", ")", ":", "self", ".", "textctrl", ".", "Bind", "(", "wx", ".", "EVT_TEXT", ",", "self", ".", "OnText", ")", "self", ".", "fontbutton", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "OnFont", ")", "self", ".", "Bind", "(", "csel", ".", "EVT_COLOURSELECT", ",", "self", ".", "OnColor", ")" ]
36.833333
0.00885
def StartAndWait(self): """Starts the task and waits until it is done.""" self.StartTask() self.WaitUntilTaskDone(pydaq.DAQmx_Val_WaitInfinitely) self.ClearTask()
[ "def", "StartAndWait", "(", "self", ")", ":", "self", ".", "StartTask", "(", ")", "self", ".", "WaitUntilTaskDone", "(", "pydaq", ".", "DAQmx_Val_WaitInfinitely", ")", "self", ".", "ClearTask", "(", ")" ]
38
0.010309
def fit_sparse(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=None, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): r"""Fits a GLM using coordinate-wise FIM-informed proximal gradient descent. This function uses a L1- and L2-regularized, second-order quasi-Newton method to find maximum-likelihood parameters for the given model and observed data. The second-order approximations use negative Fisher information in place of the Hessian, that is, ```none FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, current value of model_coefficients)] ``` For large, sparse data sets, `model_matrix` should be supplied as a `SparseTensor`. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the tolerance for each optiization step; see the `tolerance` argument of `fit_sparse_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term. l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term. Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying maximum number of iterations of the outer loop of the optimizer (i.e., maximum number of calls to `fit_sparse_one_step`). After this many iterations of the outer loop, the algorithm will terminate even if the return value `model_coefficients` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of coordinate descent sweeps allowed in each iteration. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse"`. Returns: model_coefficients: (Batch of) `Tensor` of the same shape and dtype as `model_coefficients_start`, representing the computed model coefficients which minimize the regularized negative log-likelihood. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged across all batches within the specified number of iterations. Here convergence means that an iteration of the inner loop (`fit_sparse_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `fit_sparse_one_step` before achieving convergence). #### Example ```python from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions def make_dataset(n, d, link, scale=1., dtype=np.float32): model_coefficients = tfd.Uniform( low=np.array(-1, dtype), high=np.array(1, dtype)).sample( d, seed=42) radius = np.sqrt(2.) model_coefficients *= radius / tf.linalg.norm(model_coefficients) mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d)) model_coefficients = tf.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal( loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( [n, d], seed=43) scale = tf.convert_to_tensor(scale, dtype) linear_response = tf.matmul(model_matrix, model_coefficients[..., tf.newaxis])[..., 0] if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0, dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample(seed=44) else: raise ValueError('unrecognized true link: {}'.format(link)) return model_matrix, response, model_coefficients, mask with tf.Session() as sess: x_, y_, model_coefficients_true_, _ = sess.run(make_dataset( n=int(1e5), d=100, link='probit')) model = tfp.glm.Bernoulli() model_coefficients_start = tf.zeros(x_.shape[-1], np.float32) model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse( model_matrix=tf.convert_to_tensor(x_), response=tf.convert_to_tensor(y_), model=model, model_coefficients_start=model_coefficients_start, l1_regularizer=800., l2_regularizer=None, maximum_iterations=10, maximum_full_sweeps_per_iteration=10, tolerance=1e-6, learning_rate=None) model_coefficients_, is_converged_, num_iter_ = sess.run([ model_coefficients, is_converged, num_iter]) print("is_converged:", is_converged_) print(" num_iter:", num_iter_) print("\nLearned / True") print(np.concatenate( [[model_coefficients_], [model_coefficients_true_]], axis=0).T) # ==> # is_converged: True # num_iter: 1 # # Learned / True # [[ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.11195257 0.12484948] # [ 0. 0. ] # [ 0.05191106 0.06394956] # [-0.15090358 -0.15325639] # [-0.18187316 -0.18825999] # [-0.06140942 -0.07994166] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.14474444 0.15810856] # [ 0. 0. ] # [-0.25249591 -0.24260855] # [ 0. 0. ] # [ 0. 0. ] # [-0.03888761 -0.06755984] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [-0.0192222 -0.04169233] # [ 0. 0. ] # [ 0. 0. ] # [ 0.01434913 0.03568212] # [-0.11336883 -0.12873614] # [ 0. 0. ] # [-0.24496339 -0.24048163] # [ 0. 0. ] # [ 0. 0. ] # [ 0.04088281 0.06565224] # [-0.12784363 -0.13359821] # [ 0.05618424 0.07396613] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0. -0.01719233] # [ 0. 0. ] # [ 0. 0. ] # [-0.00076072 -0.03607186] # [ 0.21801499 0.21146794] # [-0.02161094 -0.04031265] # [ 0.0918689 0.10487888] # [ 0.0106154 0.03233612] # [-0.07817317 -0.09725142] # [ 0. 0. ] # [ 0. 0. ] # [-0.23725343 -0.24194022] # [ 0. 0. ] # [-0.08725718 -0.1048776 ] # [ 0. 0. ] # [ 0. 0. ] # [-0.02114314 -0.04145789] # [ 0. 0. ] # [ 0. 0. ] # [-0.02710908 -0.04590397] # [ 0.15293184 0.15415154] # [ 0.2114463 0.2088728 ] # [-0.10969634 -0.12368613] # [ 0. -0.01505797] # [-0.01140458 -0.03234904] # [ 0.16051085 0.1680062 ] # [ 0.09816848 0.11094204] ``` #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, # TODO(b/111925792): Replace `tolerance` arg with something like # `convergence_criteria_fn`. tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _grad_neg_log_likelihood_and_fim_fn(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return g, model_matrix, h_middle return tfp.optimizer.proximal_hessian_sparse_minimize( _grad_neg_log_likelihood_and_fim_fn, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_iterations=maximum_iterations, maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration, learning_rate=learning_rate, tolerance=tolerance, name=name)
[ "def", "fit_sparse", "(", "model_matrix", ",", "response", ",", "model", ",", "model_coefficients_start", ",", "tolerance", ",", "l1_regularizer", ",", "l2_regularizer", "=", "None", ",", "maximum_iterations", "=", "None", ",", "maximum_full_sweeps_per_iteration", "=", "1", ",", "learning_rate", "=", "None", ",", "name", "=", "None", ")", ":", "graph_deps", "=", "[", "model_matrix", ",", "response", ",", "model_coefficients_start", ",", "l1_regularizer", ",", "l2_regularizer", ",", "maximum_iterations", ",", "maximum_full_sweeps_per_iteration", ",", "# TODO(b/111925792): Replace `tolerance` arg with something like", "# `convergence_criteria_fn`.", "tolerance", ",", "learning_rate", ",", "]", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'fit_sparse'", ",", "graph_deps", ")", ":", "# TODO(b/111922388): Include dispersion and offset parameters.", "def", "_grad_neg_log_likelihood_and_fim_fn", "(", "x", ")", ":", "predicted_linear_response", "=", "sparse_or_dense_matvecmul", "(", "model_matrix", ",", "x", ")", "g", ",", "h_middle", "=", "_grad_neg_log_likelihood_and_fim", "(", "model_matrix", ",", "predicted_linear_response", ",", "response", ",", "model", ")", "return", "g", ",", "model_matrix", ",", "h_middle", "return", "tfp", ".", "optimizer", ".", "proximal_hessian_sparse_minimize", "(", "_grad_neg_log_likelihood_and_fim_fn", ",", "x_start", "=", "model_coefficients_start", ",", "l1_regularizer", "=", "l1_regularizer", ",", "l2_regularizer", "=", "l2_regularizer", ",", "maximum_iterations", "=", "maximum_iterations", ",", "maximum_full_sweeps_per_iteration", "=", "maximum_full_sweeps_per_iteration", ",", "learning_rate", "=", "learning_rate", ",", "tolerance", "=", "tolerance", ",", "name", "=", "name", ")" ]
39.173228
0.001176
def load(self): """Return the current load. The load is represented as a float, where 1.0 represents having hit one of the flow control limits, and values between 0.0 and 1.0 represent how close we are to them. (0.5 means we have exactly half of what the flow control setting allows, for example.) There are (currently) two flow control settings; this property computes how close the manager is to each of them, and returns whichever value is higher. (It does not matter that we have lots of running room on setting A if setting B is over.) Returns: float: The load value. """ if self._leaser is None: return 0 return max( [ self._leaser.message_count / self._flow_control.max_messages, self._leaser.bytes / self._flow_control.max_bytes, ] )
[ "def", "load", "(", "self", ")", ":", "if", "self", ".", "_leaser", "is", "None", ":", "return", "0", "return", "max", "(", "[", "self", ".", "_leaser", ".", "message_count", "/", "self", ".", "_flow_control", ".", "max_messages", ",", "self", ".", "_leaser", ".", "bytes", "/", "self", ".", "_flow_control", ".", "max_bytes", ",", "]", ")" ]
36.52
0.002134
def decision_function(self, X=None): """Output the decision value of the prediction. if X is not equal to self.test_raw_data\\_, i.e. predict is not called, first generate the test_data after getting the test_data, get the decision value via self.clf. if X is None, test_data\\_ is ready to be used Parameters ---------- X: Optional[list of tuple (data1, data2)] data1 and data2 are numpy array in shape [num_TRs, num_voxels] to be computed for correlation. default None, meaning that the data to be predicted have been processed in the fit method. Otherwise, X contains the activity data filtered by ROIs and prepared for correlation computation. len(X) is the number of test samples. if len(X) > 1: normalization is done on all test samples. Within list, all data1s must have the same num_voxels value, all data2s must have the same num_voxels value. Returns ------- confidence: the predictions confidence values of X, in shape [len(X),] """ if X is not None and not self._is_equal_to_test_raw_data(X): for x in X: assert len(x) == 2, \ 'there must be two parts for each correlation computation' X1, X2 = zip(*X) num_voxels1 = X1[0].shape[1] num_voxels2 = X2[0].shape[1] assert len(X1) == len(X2), \ 'the list lengths do not match' # make sure X1 always has more voxels if num_voxels1 < num_voxels2: X1, X2 = X2, X1 num_voxels1, num_voxels2 = num_voxels2, num_voxels1 assert self.num_features_ == num_voxels1 * num_voxels2, \ 'the number of features does not match the model' num_test_samples = len(X1) self.test_raw_data_ = X # generate the test_data first # correlation computation corr_data = self._prepare_corerelation_data(X1, X2) # normalization normalized_corr_data = \ self._normalize_correlation_data(corr_data, num_test_samples) # test data generation self.test_data_ = self._prepare_test_data(normalized_corr_data) confidence = self.clf.decision_function(self.test_data_) return confidence
[ "def", "decision_function", "(", "self", ",", "X", "=", "None", ")", ":", "if", "X", "is", "not", "None", "and", "not", "self", ".", "_is_equal_to_test_raw_data", "(", "X", ")", ":", "for", "x", "in", "X", ":", "assert", "len", "(", "x", ")", "==", "2", ",", "'there must be two parts for each correlation computation'", "X1", ",", "X2", "=", "zip", "(", "*", "X", ")", "num_voxels1", "=", "X1", "[", "0", "]", ".", "shape", "[", "1", "]", "num_voxels2", "=", "X2", "[", "0", "]", ".", "shape", "[", "1", "]", "assert", "len", "(", "X1", ")", "==", "len", "(", "X2", ")", ",", "'the list lengths do not match'", "# make sure X1 always has more voxels", "if", "num_voxels1", "<", "num_voxels2", ":", "X1", ",", "X2", "=", "X2", ",", "X1", "num_voxels1", ",", "num_voxels2", "=", "num_voxels2", ",", "num_voxels1", "assert", "self", ".", "num_features_", "==", "num_voxels1", "*", "num_voxels2", ",", "'the number of features does not match the model'", "num_test_samples", "=", "len", "(", "X1", ")", "self", ".", "test_raw_data_", "=", "X", "# generate the test_data first", "# correlation computation", "corr_data", "=", "self", ".", "_prepare_corerelation_data", "(", "X1", ",", "X2", ")", "# normalization", "normalized_corr_data", "=", "self", ".", "_normalize_correlation_data", "(", "corr_data", ",", "num_test_samples", ")", "# test data generation", "self", ".", "test_data_", "=", "self", ".", "_prepare_test_data", "(", "normalized_corr_data", ")", "confidence", "=", "self", ".", "clf", ".", "decision_function", "(", "self", ".", "test_data_", ")", "return", "confidence" ]
45.574074
0.000796
def namedb_get_all_importing_namespace_hashes( self, current_block ): """ Get the list of all non-expired preordered and revealed namespace hashes. """ query = "SELECT preorder_hash FROM namespaces WHERE (op = ? AND reveal_block < ?) OR (op = ? AND block_number < ?);" args = (NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE, NAMESPACE_PREORDER, current_block + NAMESPACE_PREORDER_EXPIRE ) namespace_rows = namedb_query_execute( cur, query, args ) ret = [] for namespace_row in namespace_rows: ret.append( namespace_row['preorder_hash'] ) return ret
[ "def", "namedb_get_all_importing_namespace_hashes", "(", "self", ",", "current_block", ")", ":", "query", "=", "\"SELECT preorder_hash FROM namespaces WHERE (op = ? AND reveal_block < ?) OR (op = ? AND block_number < ?);\"", "args", "=", "(", "NAMESPACE_REVEAL", ",", "current_block", "+", "NAMESPACE_REVEAL_EXPIRE", ",", "NAMESPACE_PREORDER", ",", "current_block", "+", "NAMESPACE_PREORDER_EXPIRE", ")", "namespace_rows", "=", "namedb_query_execute", "(", "cur", ",", "query", ",", "args", ")", "ret", "=", "[", "]", "for", "namespace_row", "in", "namespace_rows", ":", "ret", ".", "append", "(", "namespace_row", "[", "'preorder_hash'", "]", ")", "return", "ret" ]
42.357143
0.016502
def send_approve_mail(request, user): """ Sends an email to staff in listed in the setting ``ACCOUNTS_APPROVAL_EMAILS``, when a new user signs up and the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``. """ approval_emails = split_addresses(settings.ACCOUNTS_APPROVAL_EMAILS) if not approval_emails: return context = { "request": request, "user": user, "change_url": admin_url(user.__class__, "change", user.id), } subject = subject_template("email/account_approve_subject.txt", context) send_mail_template(subject, "email/account_approve", settings.DEFAULT_FROM_EMAIL, approval_emails, context=context)
[ "def", "send_approve_mail", "(", "request", ",", "user", ")", ":", "approval_emails", "=", "split_addresses", "(", "settings", ".", "ACCOUNTS_APPROVAL_EMAILS", ")", "if", "not", "approval_emails", ":", "return", "context", "=", "{", "\"request\"", ":", "request", ",", "\"user\"", ":", "user", ",", "\"change_url\"", ":", "admin_url", "(", "user", ".", "__class__", ",", "\"change\"", ",", "user", ".", "id", ")", ",", "}", "subject", "=", "subject_template", "(", "\"email/account_approve_subject.txt\"", ",", "context", ")", "send_mail_template", "(", "subject", ",", "\"email/account_approve\"", ",", "settings", ".", "DEFAULT_FROM_EMAIL", ",", "approval_emails", ",", "context", "=", "context", ")" ]
39.5
0.001374
def get_current_cmus(): """ Get the current song from cmus. """ result = subprocess.run('cmus-remote -Q'.split(' '), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split('\n'): line = line.split(' ') if line[0] != 'tag': continue key = line[1] if key in ['album', 'title', 'artist', 'albumartist'] and\ key not in info: info[key] = ' '.join(line[2:]) if 'albumartist' in info: info['artist'] = info['albumartist'] del info['albumartist'] return Song(**info)
[ "def", "get_current_cmus", "(", ")", ":", "result", "=", "subprocess", ".", "run", "(", "'cmus-remote -Q'", ".", "split", "(", "' '", ")", ",", "check", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "info", "=", "{", "}", "for", "line", "in", "result", ".", "stdout", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ":", "line", "=", "line", ".", "split", "(", "' '", ")", "if", "line", "[", "0", "]", "!=", "'tag'", ":", "continue", "key", "=", "line", "[", "1", "]", "if", "key", "in", "[", "'album'", ",", "'title'", ",", "'artist'", ",", "'albumartist'", "]", "and", "key", "not", "in", "info", ":", "info", "[", "key", "]", "=", "' '", ".", "join", "(", "line", "[", "2", ":", "]", ")", "if", "'albumartist'", "in", "info", ":", "info", "[", "'artist'", "]", "=", "info", "[", "'albumartist'", "]", "del", "info", "[", "'albumartist'", "]", "return", "Song", "(", "*", "*", "info", ")" ]
30.619048
0.001508
def build_import_pattern(mapping1, mapping2): u""" mapping1: A dict mapping py3k modules to all possible py2k replacements mapping2: A dict mapping py2k modules to the things they do This builds a HUGE pattern to match all ways that things can be imported """ # py3k: urllib.request, py2k: ('urllib2', 'urllib') yield from_import % (all_modules_subpattern()) for py3k, py2k in mapping1.items(): name, attr = py3k.split(u'.') s_name = simple_name % (name) s_attr = simple_attr % (attr) d_name = dotted_name % (s_name, s_attr) yield name_import % (d_name) yield power_twoname % (s_name, s_attr) if attr == u'__init__': yield name_import % (s_name) yield power_onename % (s_name) yield name_import_rename % (d_name) yield from_import_rename % (s_name, s_attr, s_attr, s_attr, s_attr)
[ "def", "build_import_pattern", "(", "mapping1", ",", "mapping2", ")", ":", "# py3k: urllib.request, py2k: ('urllib2', 'urllib')", "yield", "from_import", "%", "(", "all_modules_subpattern", "(", ")", ")", "for", "py3k", ",", "py2k", "in", "mapping1", ".", "items", "(", ")", ":", "name", ",", "attr", "=", "py3k", ".", "split", "(", "u'.'", ")", "s_name", "=", "simple_name", "%", "(", "name", ")", "s_attr", "=", "simple_attr", "%", "(", "attr", ")", "d_name", "=", "dotted_name", "%", "(", "s_name", ",", "s_attr", ")", "yield", "name_import", "%", "(", "d_name", ")", "yield", "power_twoname", "%", "(", "s_name", ",", "s_attr", ")", "if", "attr", "==", "u'__init__'", ":", "yield", "name_import", "%", "(", "s_name", ")", "yield", "power_onename", "%", "(", "s_name", ")", "yield", "name_import_rename", "%", "(", "d_name", ")", "yield", "from_import_rename", "%", "(", "s_name", ",", "s_attr", ",", "s_attr", ",", "s_attr", ",", "s_attr", ")" ]
44.45
0.001101
def get_qtls_from_mapqtl_data(matrix, threshold, inputfile): """Extract the QTLs found by MapQTL reading its file. This assume that there is only one QTL per linkage group. :arg matrix, the MapQTL file read in memory :arg threshold, threshold used to determine if a given LOD value is reflective the presence of a QTL. :arg inputfile, name of the inputfile in which the QTLs have been found """ trait_name = inputfile.split(')_', 1)[1].split('.mqo')[0] qtls = [] qtl = None for entry in matrix[1:]: if qtl is None: qtl = entry if qtl[1] != entry[1]: if float(qtl[4]) > float(threshold): qtl[0] = trait_name qtls.append(qtl) qtl = entry if entry[4] == '': # pragma: no cover entry[4] = 0 if qtl[4] == '': # pragma: no cover qtl[4] = 0 if float(entry[4]) > float(qtl[4]): qtl = entry if float(qtl[4]) > float(threshold): qtl[0] = trait_name if qtl not in qtls: qtls.append(qtl) return qtls
[ "def", "get_qtls_from_mapqtl_data", "(", "matrix", ",", "threshold", ",", "inputfile", ")", ":", "trait_name", "=", "inputfile", ".", "split", "(", "')_'", ",", "1", ")", "[", "1", "]", ".", "split", "(", "'.mqo'", ")", "[", "0", "]", "qtls", "=", "[", "]", "qtl", "=", "None", "for", "entry", "in", "matrix", "[", "1", ":", "]", ":", "if", "qtl", "is", "None", ":", "qtl", "=", "entry", "if", "qtl", "[", "1", "]", "!=", "entry", "[", "1", "]", ":", "if", "float", "(", "qtl", "[", "4", "]", ")", ">", "float", "(", "threshold", ")", ":", "qtl", "[", "0", "]", "=", "trait_name", "qtls", ".", "append", "(", "qtl", ")", "qtl", "=", "entry", "if", "entry", "[", "4", "]", "==", "''", ":", "# pragma: no cover", "entry", "[", "4", "]", "=", "0", "if", "qtl", "[", "4", "]", "==", "''", ":", "# pragma: no cover", "qtl", "[", "4", "]", "=", "0", "if", "float", "(", "entry", "[", "4", "]", ")", ">", "float", "(", "qtl", "[", "4", "]", ")", ":", "qtl", "=", "entry", "if", "float", "(", "qtl", "[", "4", "]", ")", ">", "float", "(", "threshold", ")", ":", "qtl", "[", "0", "]", "=", "trait_name", "if", "qtl", "not", "in", "qtls", ":", "qtls", ".", "append", "(", "qtl", ")", "return", "qtls" ]
31.285714
0.000886
def listen(cls, event, func): """Add a callback for a signal against the class""" signal(event).connect(func, sender=cls)
[ "def", "listen", "(", "cls", ",", "event", ",", "func", ")", ":", "signal", "(", "event", ")", ".", "connect", "(", "func", ",", "sender", "=", "cls", ")" ]
45
0.014599
def get_hostname(): ''' Determines the current hostname by probing ``uname -n``. Falls back to ``hostname`` in case of problems. |appteardown| if both failed (usually they don't but consider this if you are debugging weird problems..) :returns: The hostname as string. Domain parts will be split off ''' h = shell_run('uname -n', critical=False, verbose=False) if not h: h = shell_run('hostname', critical=False, verbose=False) if not h: shell_notify('could not retrieve hostname', state=True) return str(h.get('out')).split('.')[0]
[ "def", "get_hostname", "(", ")", ":", "h", "=", "shell_run", "(", "'uname -n'", ",", "critical", "=", "False", ",", "verbose", "=", "False", ")", "if", "not", "h", ":", "h", "=", "shell_run", "(", "'hostname'", ",", "critical", "=", "False", ",", "verbose", "=", "False", ")", "if", "not", "h", ":", "shell_notify", "(", "'could not retrieve hostname'", ",", "state", "=", "True", ")", "return", "str", "(", "h", ".", "get", "(", "'out'", ")", ")", ".", "split", "(", "'.'", ")", "[", "0", "]" ]
32.611111
0.001656
def modifie(self, key: str, value: Any) -> None: """Store the modification. `value` should be dumped in DB compatible format.""" if key in self.FIELDS_OPTIONS: self.modifie_options(key, value) else: self.modifications[key] = value
[ "def", "modifie", "(", "self", ",", "key", ":", "str", ",", "value", ":", "Any", ")", "->", "None", ":", "if", "key", "in", "self", ".", "FIELDS_OPTIONS", ":", "self", ".", "modifie_options", "(", "key", ",", "value", ")", "else", ":", "self", ".", "modifications", "[", "key", "]", "=", "value" ]
45.5
0.010791
def main(): """Main function.""" time_start = time.time() logging.info('loading vocab file from dataset: %s', args.vocab) vocab_obj = nlp.data.utils._load_pretrained_vocab(args.vocab) tokenizer = BERTTokenizer( vocab=vocab_obj, lower='uncased' in args.vocab) input_files = [] for input_pattern in args.input_file.split(','): input_files.extend(glob.glob(os.path.expanduser(input_pattern))) logging.info('*** Reading from %d input files ***', len(input_files)) for input_file in input_files: logging.info(' %s', input_file) num_outputs = min(args.num_outputs, len(input_files)) output_dir = os.path.expanduser(args.output_dir) if not os.path.exists(output_dir): os.makedirs(output_dir) rng = random.Random(args.random_seed) nworker = args.num_workers # calculate the number of splits file_splits = [] split_size = (len(input_files) + num_outputs - 1) // num_outputs for i in range(num_outputs - 1): file_splits.append(input_files[i*split_size:(i+1)*split_size]) file_splits.append(input_files[(num_outputs-1)*split_size:]) # prepare workload suffix = 'npz' if args.format == 'numpy' else 'rec' count = 0 map_args = [] pool_args = (tokenizer, args.max_seq_length, args.dupe_factor,\ args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq, rng) for i, file_split in enumerate(file_splits): out = os.path.join(output_dir, 'part-{}.{}'.format(str(i).zfill(3), suffix)) count += len(file_split) map_args.append((file_split, out) + pool_args) # sanity check assert count == len(input_files) # dispatch to workers if nworker > 1: pool = Pool(nworker) pool.map(create_training_instances, map_args) else: for map_arg in map_args: create_training_instances(map_arg) time_end = time.time() logging.info('Time cost=%.1f', time_end - time_start)
[ "def", "main", "(", ")", ":", "time_start", "=", "time", ".", "time", "(", ")", "logging", ".", "info", "(", "'loading vocab file from dataset: %s'", ",", "args", ".", "vocab", ")", "vocab_obj", "=", "nlp", ".", "data", ".", "utils", ".", "_load_pretrained_vocab", "(", "args", ".", "vocab", ")", "tokenizer", "=", "BERTTokenizer", "(", "vocab", "=", "vocab_obj", ",", "lower", "=", "'uncased'", "in", "args", ".", "vocab", ")", "input_files", "=", "[", "]", "for", "input_pattern", "in", "args", ".", "input_file", ".", "split", "(", "','", ")", ":", "input_files", ".", "extend", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "expanduser", "(", "input_pattern", ")", ")", ")", "logging", ".", "info", "(", "'*** Reading from %d input files ***'", ",", "len", "(", "input_files", ")", ")", "for", "input_file", "in", "input_files", ":", "logging", ".", "info", "(", "' %s'", ",", "input_file", ")", "num_outputs", "=", "min", "(", "args", ".", "num_outputs", ",", "len", "(", "input_files", ")", ")", "output_dir", "=", "os", ".", "path", ".", "expanduser", "(", "args", ".", "output_dir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "makedirs", "(", "output_dir", ")", "rng", "=", "random", ".", "Random", "(", "args", ".", "random_seed", ")", "nworker", "=", "args", ".", "num_workers", "# calculate the number of splits", "file_splits", "=", "[", "]", "split_size", "=", "(", "len", "(", "input_files", ")", "+", "num_outputs", "-", "1", ")", "//", "num_outputs", "for", "i", "in", "range", "(", "num_outputs", "-", "1", ")", ":", "file_splits", ".", "append", "(", "input_files", "[", "i", "*", "split_size", ":", "(", "i", "+", "1", ")", "*", "split_size", "]", ")", "file_splits", ".", "append", "(", "input_files", "[", "(", "num_outputs", "-", "1", ")", "*", "split_size", ":", "]", ")", "# prepare workload", "suffix", "=", "'npz'", "if", "args", ".", "format", "==", "'numpy'", "else", "'rec'", "count", "=", "0", "map_args", "=", "[", "]", "pool_args", "=", "(", "tokenizer", ",", "args", ".", "max_seq_length", ",", "args", ".", "dupe_factor", ",", "args", ".", "short_seq_prob", ",", "args", ".", "masked_lm_prob", ",", "args", ".", "max_predictions_per_seq", ",", "rng", ")", "for", "i", ",", "file_split", "in", "enumerate", "(", "file_splits", ")", ":", "out", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'part-{}.{}'", ".", "format", "(", "str", "(", "i", ")", ".", "zfill", "(", "3", ")", ",", "suffix", ")", ")", "count", "+=", "len", "(", "file_split", ")", "map_args", ".", "append", "(", "(", "file_split", ",", "out", ")", "+", "pool_args", ")", "# sanity check", "assert", "count", "==", "len", "(", "input_files", ")", "# dispatch to workers", "if", "nworker", ">", "1", ":", "pool", "=", "Pool", "(", "nworker", ")", "pool", ".", "map", "(", "create_training_instances", ",", "map_args", ")", "else", ":", "for", "map_arg", "in", "map_args", ":", "create_training_instances", "(", "map_arg", ")", "time_end", "=", "time", ".", "time", "(", ")", "logging", ".", "info", "(", "'Time cost=%.1f'", ",", "time_end", "-", "time_start", ")" ]
34.45614
0.001485
def write_namespaces(self, namespaces): """write the module-level namespace-generating callable.""" self.printer.writelines( "def _mako_get_namespace(context, name):", "try:", "return context.namespaces[(__name__, name)]", "except KeyError:", "_mako_generate_namespaces(context)", "return context.namespaces[(__name__, name)]", None, None ) self.printer.writeline("def _mako_generate_namespaces(context):") for node in namespaces.values(): if 'import' in node.attributes: self.compiler.has_ns_imports = True self.printer.start_source(node.lineno) if len(node.nodes): self.printer.writeline("def make_namespace():") export = [] identifiers = self.compiler.identifiers.branch(node) self.in_def = True class NSDefVisitor(object): def visitDefTag(s, node): s.visitDefOrBase(node) def visitBlockTag(s, node): s.visitDefOrBase(node) def visitDefOrBase(s, node): if node.is_anonymous: raise exceptions.CompileException( "Can't put anonymous blocks inside " "<%namespace>", **node.exception_kwargs ) self.write_inline_def(node, identifiers, nested=False) export.append(node.funcname) vis = NSDefVisitor() for n in node.nodes: n.accept_visitor(vis) self.printer.writeline("return [%s]" % (','.join(export))) self.printer.writeline(None) self.in_def = False callable_name = "make_namespace()" else: callable_name = "None" if 'file' in node.parsed_attributes: self.printer.writeline( "ns = runtime.TemplateNamespace(%r," " context._clean_inheritance_tokens()," " templateuri=%s, callables=%s, " " calling_uri=_template_uri)" % ( node.name, node.parsed_attributes.get('file', 'None'), callable_name, ) ) elif 'module' in node.parsed_attributes: self.printer.writeline( "ns = runtime.ModuleNamespace(%r," " context._clean_inheritance_tokens()," " callables=%s, calling_uri=_template_uri," " module=%s)" % ( node.name, callable_name, node.parsed_attributes.get( 'module', 'None') ) ) else: self.printer.writeline( "ns = runtime.Namespace(%r," " context._clean_inheritance_tokens()," " callables=%s, calling_uri=_template_uri)" % ( node.name, callable_name, ) ) if eval(node.attributes.get('inheritable', "False")): self.printer.writeline("context['self'].%s = ns" % (node.name)) self.printer.writeline( "context.namespaces[(__name__, %s)] = ns" % repr(node.name)) self.printer.write_blanks(1) if not len(namespaces): self.printer.writeline("pass") self.printer.writeline(None)
[ "def", "write_namespaces", "(", "self", ",", "namespaces", ")", ":", "self", ".", "printer", ".", "writelines", "(", "\"def _mako_get_namespace(context, name):\"", ",", "\"try:\"", ",", "\"return context.namespaces[(__name__, name)]\"", ",", "\"except KeyError:\"", ",", "\"_mako_generate_namespaces(context)\"", ",", "\"return context.namespaces[(__name__, name)]\"", ",", "None", ",", "None", ")", "self", ".", "printer", ".", "writeline", "(", "\"def _mako_generate_namespaces(context):\"", ")", "for", "node", "in", "namespaces", ".", "values", "(", ")", ":", "if", "'import'", "in", "node", ".", "attributes", ":", "self", ".", "compiler", ".", "has_ns_imports", "=", "True", "self", ".", "printer", ".", "start_source", "(", "node", ".", "lineno", ")", "if", "len", "(", "node", ".", "nodes", ")", ":", "self", ".", "printer", ".", "writeline", "(", "\"def make_namespace():\"", ")", "export", "=", "[", "]", "identifiers", "=", "self", ".", "compiler", ".", "identifiers", ".", "branch", "(", "node", ")", "self", ".", "in_def", "=", "True", "class", "NSDefVisitor", "(", "object", ")", ":", "def", "visitDefTag", "(", "s", ",", "node", ")", ":", "s", ".", "visitDefOrBase", "(", "node", ")", "def", "visitBlockTag", "(", "s", ",", "node", ")", ":", "s", ".", "visitDefOrBase", "(", "node", ")", "def", "visitDefOrBase", "(", "s", ",", "node", ")", ":", "if", "node", ".", "is_anonymous", ":", "raise", "exceptions", ".", "CompileException", "(", "\"Can't put anonymous blocks inside \"", "\"<%namespace>\"", ",", "*", "*", "node", ".", "exception_kwargs", ")", "self", ".", "write_inline_def", "(", "node", ",", "identifiers", ",", "nested", "=", "False", ")", "export", ".", "append", "(", "node", ".", "funcname", ")", "vis", "=", "NSDefVisitor", "(", ")", "for", "n", "in", "node", ".", "nodes", ":", "n", ".", "accept_visitor", "(", "vis", ")", "self", ".", "printer", ".", "writeline", "(", "\"return [%s]\"", "%", "(", "','", ".", "join", "(", "export", ")", ")", ")", "self", ".", "printer", ".", "writeline", "(", "None", ")", "self", ".", "in_def", "=", "False", "callable_name", "=", "\"make_namespace()\"", "else", ":", "callable_name", "=", "\"None\"", "if", "'file'", "in", "node", ".", "parsed_attributes", ":", "self", ".", "printer", ".", "writeline", "(", "\"ns = runtime.TemplateNamespace(%r,\"", "\" context._clean_inheritance_tokens(),\"", "\" templateuri=%s, callables=%s, \"", "\" calling_uri=_template_uri)\"", "%", "(", "node", ".", "name", ",", "node", ".", "parsed_attributes", ".", "get", "(", "'file'", ",", "'None'", ")", ",", "callable_name", ",", ")", ")", "elif", "'module'", "in", "node", ".", "parsed_attributes", ":", "self", ".", "printer", ".", "writeline", "(", "\"ns = runtime.ModuleNamespace(%r,\"", "\" context._clean_inheritance_tokens(),\"", "\" callables=%s, calling_uri=_template_uri,\"", "\" module=%s)\"", "%", "(", "node", ".", "name", ",", "callable_name", ",", "node", ".", "parsed_attributes", ".", "get", "(", "'module'", ",", "'None'", ")", ")", ")", "else", ":", "self", ".", "printer", ".", "writeline", "(", "\"ns = runtime.Namespace(%r,\"", "\" context._clean_inheritance_tokens(),\"", "\" callables=%s, calling_uri=_template_uri)\"", "%", "(", "node", ".", "name", ",", "callable_name", ",", ")", ")", "if", "eval", "(", "node", ".", "attributes", ".", "get", "(", "'inheritable'", ",", "\"False\"", ")", ")", ":", "self", ".", "printer", ".", "writeline", "(", "\"context['self'].%s = ns\"", "%", "(", "node", ".", "name", ")", ")", "self", ".", "printer", ".", "writeline", "(", "\"context.namespaces[(__name__, %s)] = ns\"", "%", "repr", "(", "node", ".", "name", ")", ")", "self", ".", "printer", ".", "write_blanks", "(", "1", ")", "if", "not", "len", "(", "namespaces", ")", ":", "self", ".", "printer", ".", "writeline", "(", "\"pass\"", ")", "self", ".", "printer", ".", "writeline", "(", "None", ")" ]
44.946237
0.001404
def stem(self, word): """Return the stem of a word according to the Schinke stemmer. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = Schinke() >>> stmr.stem('atque') {'n': 'atque', 'v': 'atque'} >>> stmr.stem('census') {'n': 'cens', 'v': 'censu'} >>> stmr.stem('virum') {'n': 'uir', 'v': 'uiru'} >>> stmr.stem('populusque') {'n': 'popul', 'v': 'populu'} >>> stmr.stem('senatus') {'n': 'senat', 'v': 'senatu'} """ word = normalize('NFKD', text_type(word.lower())) word = ''.join( c for c in word if c in { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', } ) # Rule 2 word = word.replace('j', 'i').replace('v', 'u') # Rule 3 if word[-3:] == 'que': # This diverges from the paper by also returning 'que' itself # unstemmed if word[:-3] in self._keep_que or word == 'que': return {'n': word, 'v': word} else: word = word[:-3] # Base case will mean returning the words as is noun = word verb = word # Rule 4 for endlen in range(4, 0, -1): if word[-endlen:] in self._n_endings[endlen]: if len(word) - 2 >= endlen: noun = word[:-endlen] else: noun = word break for endlen in range(6, 0, -1): if word[-endlen:] in self._v_endings_strip[endlen]: if len(word) - 2 >= endlen: verb = word[:-endlen] else: verb = word break if word[-endlen:] in self._v_endings_alter[endlen]: if word[-endlen:] in { 'iuntur', 'erunt', 'untur', 'iunt', 'unt', }: new_word = word[:-endlen] + 'i' addlen = 1 elif word[-endlen:] in {'beris', 'bor', 'bo'}: new_word = word[:-endlen] + 'bi' addlen = 2 else: new_word = word[:-endlen] + 'eri' addlen = 3 # Technically this diverges from the paper by considering the # length of the stem without the new suffix if len(new_word) >= 2 + addlen: verb = new_word else: verb = word break return {'n': noun, 'v': verb}
[ "def", "stem", "(", "self", ",", "word", ")", ":", "word", "=", "normalize", "(", "'NFKD'", ",", "text_type", "(", "word", ".", "lower", "(", ")", ")", ")", "word", "=", "''", ".", "join", "(", "c", "for", "c", "in", "word", "if", "c", "in", "{", "'a'", ",", "'b'", ",", "'c'", ",", "'d'", ",", "'e'", ",", "'f'", ",", "'g'", ",", "'h'", ",", "'i'", ",", "'j'", ",", "'k'", ",", "'l'", ",", "'m'", ",", "'n'", ",", "'o'", ",", "'p'", ",", "'q'", ",", "'r'", ",", "'s'", ",", "'t'", ",", "'u'", ",", "'v'", ",", "'w'", ",", "'x'", ",", "'y'", ",", "'z'", ",", "}", ")", "# Rule 2", "word", "=", "word", ".", "replace", "(", "'j'", ",", "'i'", ")", ".", "replace", "(", "'v'", ",", "'u'", ")", "# Rule 3", "if", "word", "[", "-", "3", ":", "]", "==", "'que'", ":", "# This diverges from the paper by also returning 'que' itself", "# unstemmed", "if", "word", "[", ":", "-", "3", "]", "in", "self", ".", "_keep_que", "or", "word", "==", "'que'", ":", "return", "{", "'n'", ":", "word", ",", "'v'", ":", "word", "}", "else", ":", "word", "=", "word", "[", ":", "-", "3", "]", "# Base case will mean returning the words as is", "noun", "=", "word", "verb", "=", "word", "# Rule 4", "for", "endlen", "in", "range", "(", "4", ",", "0", ",", "-", "1", ")", ":", "if", "word", "[", "-", "endlen", ":", "]", "in", "self", ".", "_n_endings", "[", "endlen", "]", ":", "if", "len", "(", "word", ")", "-", "2", ">=", "endlen", ":", "noun", "=", "word", "[", ":", "-", "endlen", "]", "else", ":", "noun", "=", "word", "break", "for", "endlen", "in", "range", "(", "6", ",", "0", ",", "-", "1", ")", ":", "if", "word", "[", "-", "endlen", ":", "]", "in", "self", ".", "_v_endings_strip", "[", "endlen", "]", ":", "if", "len", "(", "word", ")", "-", "2", ">=", "endlen", ":", "verb", "=", "word", "[", ":", "-", "endlen", "]", "else", ":", "verb", "=", "word", "break", "if", "word", "[", "-", "endlen", ":", "]", "in", "self", ".", "_v_endings_alter", "[", "endlen", "]", ":", "if", "word", "[", "-", "endlen", ":", "]", "in", "{", "'iuntur'", ",", "'erunt'", ",", "'untur'", ",", "'iunt'", ",", "'unt'", ",", "}", ":", "new_word", "=", "word", "[", ":", "-", "endlen", "]", "+", "'i'", "addlen", "=", "1", "elif", "word", "[", "-", "endlen", ":", "]", "in", "{", "'beris'", ",", "'bor'", ",", "'bo'", "}", ":", "new_word", "=", "word", "[", ":", "-", "endlen", "]", "+", "'bi'", "addlen", "=", "2", "else", ":", "new_word", "=", "word", "[", ":", "-", "endlen", "]", "+", "'eri'", "addlen", "=", "3", "# Technically this diverges from the paper by considering the", "# length of the stem without the new suffix", "if", "len", "(", "new_word", ")", ">=", "2", "+", "addlen", ":", "verb", "=", "new_word", "else", ":", "verb", "=", "word", "break", "return", "{", "'n'", ":", "noun", ",", "'v'", ":", "verb", "}" ]
26.735537
0.000596
def adjacent(predicate, iterable, distance=1): """Return an iterable over `(bool, item)` tuples where the `item` is drawn from *iterable* and the `bool` indicates whether that item satisfies the *predicate* or is adjacent to an item that does. For example, to find whether items are adjacent to a ``3``:: >>> list(adjacent(lambda x: x == 3, range(6))) [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] Set *distance* to change what counts as adjacent. For example, to find whether items are two places away from a ``3``: >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] This is useful for contextualizing the results of a search function. For example, a code comparison tool might want to identify lines that have changed, but also surrounding lines to give the viewer of the diff context. The predicate function will only be called once for each item in the iterable. See also :func:`groupby_transform`, which can be used with this function to group ranges of items with the same `bool` value. """ # Allow distance=0 mainly for testing that it reproduces results with map() if distance < 0: raise ValueError('distance must be at least 0') i1, i2 = tee(iterable) padding = [False] * distance selected = chain(padding, map(predicate, i1), padding) adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) return zip(adjacent_to_selected, i2)
[ "def", "adjacent", "(", "predicate", ",", "iterable", ",", "distance", "=", "1", ")", ":", "# Allow distance=0 mainly for testing that it reproduces results with map()", "if", "distance", "<", "0", ":", "raise", "ValueError", "(", "'distance must be at least 0'", ")", "i1", ",", "i2", "=", "tee", "(", "iterable", ")", "padding", "=", "[", "False", "]", "*", "distance", "selected", "=", "chain", "(", "padding", ",", "map", "(", "predicate", ",", "i1", ")", ",", "padding", ")", "adjacent_to_selected", "=", "map", "(", "any", ",", "windowed", "(", "selected", ",", "2", "*", "distance", "+", "1", ")", ")", "return", "zip", "(", "adjacent_to_selected", ",", "i2", ")" ]
41.945946
0.00063
def handle(cls, value, **kwargs): """Use a value from the environment or fall back to a default if the environment doesn't contain the variable. Format of value: <env_var>::<default value> For example: Groups: ${default app_security_groups::sg-12345,sg-67890} If `app_security_groups` is defined in the environment, its defined value will be returned. Otherwise, `sg-12345,sg-67890` will be the returned value. This allows defaults to be set at the config file level. """ try: env_var_name, default_val = value.split("::", 1) except ValueError: raise ValueError("Invalid value for default: %s. Must be in " "<env_var>::<default value> format." % value) if env_var_name in kwargs['context'].environment: return kwargs['context'].environment[env_var_name] else: return default_val
[ "def", "handle", "(", "cls", ",", "value", ",", "*", "*", "kwargs", ")", ":", "try", ":", "env_var_name", ",", "default_val", "=", "value", ".", "split", "(", "\"::\"", ",", "1", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Invalid value for default: %s. Must be in \"", "\"<env_var>::<default value> format.\"", "%", "value", ")", "if", "env_var_name", "in", "kwargs", "[", "'context'", "]", ".", "environment", ":", "return", "kwargs", "[", "'context'", "]", ".", "environment", "[", "env_var_name", "]", "else", ":", "return", "default_val" ]
33.310345
0.002012
def preScale(self, sx, sy): """Calculate pre scaling and replace current matrix.""" self.a *= sx self.b *= sx self.c *= sy self.d *= sy return self
[ "def", "preScale", "(", "self", ",", "sx", ",", "sy", ")", ":", "self", ".", "a", "*=", "sx", "self", ".", "b", "*=", "sx", "self", ".", "c", "*=", "sy", "self", ".", "d", "*=", "sy", "return", "self" ]
27
0.010256
def find_amp_phase(angle, data, npeaks=3, min_amp=None, min_phase=None): """Estimate amplitude and phase of an approximately sinusoidal quantity using `scipy.optimize.curve_fit`. Phase is defined as the angle at which the cosine curve fit reaches its first peak. It is assumed that phase is positive. For example: data_fit = amp*np.cos(npeaks*(angle - phase)) + mean_data Parameters ---------- angle : numpy array Time series of angle values in radians data : numpy array Time series of data to be fit npeaks : int Number of peaks per revolution, or normalized frequency min_phase : float Minimum phase to allow for guess to least squares fit Returns ------- amp : float Amplitude of regressed cosine phase : float Angle of the first peak in radians """ # First subtract the mean of the data data = data - data.mean() # Make some guesses for parameters from a subset of data starting at an # integer multiple of periods if angle[0] != 0.0: angle1 = angle[0] + (2*np.pi/npeaks - (2*np.pi/npeaks) % angle[0]) else: angle1 = angle[0] angle1 += min_phase angle2 = angle1 + 2*np.pi/npeaks ind = np.logical_and(angle >= angle1, angle <= angle2) angle_sub = angle[ind] data_sub = data[ind] amp_guess = (data_sub.max() - data_sub.min())/2 phase_guess = angle[np.where(data_sub == data_sub.max())[0][0]] \ % (np.pi*2/npeaks) # Define the function we will try to fit to def func(angle, amp, phase, mean): return amp*np.cos(npeaks*(angle - phase)) + mean # Calculate fit p0 = amp_guess, phase_guess, 0.0 popt, pcov = curve_fit(func, angle, data, p0=p0) amp, phase, mean = popt return amp, phase
[ "def", "find_amp_phase", "(", "angle", ",", "data", ",", "npeaks", "=", "3", ",", "min_amp", "=", "None", ",", "min_phase", "=", "None", ")", ":", "# First subtract the mean of the data\r", "data", "=", "data", "-", "data", ".", "mean", "(", ")", "# Make some guesses for parameters from a subset of data starting at an\r", "# integer multiple of periods\r", "if", "angle", "[", "0", "]", "!=", "0.0", ":", "angle1", "=", "angle", "[", "0", "]", "+", "(", "2", "*", "np", ".", "pi", "/", "npeaks", "-", "(", "2", "*", "np", ".", "pi", "/", "npeaks", ")", "%", "angle", "[", "0", "]", ")", "else", ":", "angle1", "=", "angle", "[", "0", "]", "angle1", "+=", "min_phase", "angle2", "=", "angle1", "+", "2", "*", "np", ".", "pi", "/", "npeaks", "ind", "=", "np", ".", "logical_and", "(", "angle", ">=", "angle1", ",", "angle", "<=", "angle2", ")", "angle_sub", "=", "angle", "[", "ind", "]", "data_sub", "=", "data", "[", "ind", "]", "amp_guess", "=", "(", "data_sub", ".", "max", "(", ")", "-", "data_sub", ".", "min", "(", ")", ")", "/", "2", "phase_guess", "=", "angle", "[", "np", ".", "where", "(", "data_sub", "==", "data_sub", ".", "max", "(", ")", ")", "[", "0", "]", "[", "0", "]", "]", "%", "(", "np", ".", "pi", "*", "2", "/", "npeaks", ")", "# Define the function we will try to fit to\r", "def", "func", "(", "angle", ",", "amp", ",", "phase", ",", "mean", ")", ":", "return", "amp", "*", "np", ".", "cos", "(", "npeaks", "*", "(", "angle", "-", "phase", ")", ")", "+", "mean", "# Calculate fit\r", "p0", "=", "amp_guess", ",", "phase_guess", ",", "0.0", "popt", ",", "pcov", "=", "curve_fit", "(", "func", ",", "angle", ",", "data", ",", "p0", "=", "p0", ")", "amp", ",", "phase", ",", "mean", "=", "popt", "return", "amp", ",", "phase" ]
35.666667
0.001605
def get_ngroups(self, field=None): ''' Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'ngroups' in self.data['grouped'][field]: return self.data['grouped'][field]['ngroups'] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
[ "def", "get_ngroups", "(", "self", ",", "field", "=", "None", ")", ":", "field", "=", "field", "if", "field", "else", "self", ".", "_determine_group_field", "(", "field", ")", "if", "'ngroups'", "in", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", ":", "return", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", "[", "'ngroups'", "]", "raise", "ValueError", "(", "\"ngroups not found in response. specify group.ngroups in the query.\"", ")" ]
52.8
0.009311
def get_next_iteration(self, iteration, iteration_kwargs={}): """ Returns a SH iteration with only evaluations on the biggest budget Parameters ---------- iteration: int the index of the iteration to be instantiated Returns ------- SuccessiveHalving: the SuccessiveHalving iteration with the corresponding number of configurations """ budgets = [self.max_budget] ns = [self.budget_per_iteration//self.max_budget] return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=budgets, config_sampler=self.config_generator.get_config, **iteration_kwargs))
[ "def", "get_next_iteration", "(", "self", ",", "iteration", ",", "iteration_kwargs", "=", "{", "}", ")", ":", "budgets", "=", "[", "self", ".", "max_budget", "]", "ns", "=", "[", "self", ".", "budget_per_iteration", "//", "self", ".", "max_budget", "]", "return", "(", "SuccessiveHalving", "(", "HPB_iter", "=", "iteration", ",", "num_configs", "=", "ns", ",", "budgets", "=", "budgets", ",", "config_sampler", "=", "self", ".", "config_generator", ".", "get_config", ",", "*", "*", "iteration_kwargs", ")", ")" ]
29.4
0.042834
def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage): """Internal usage only""" if not is_seq(rgb24): raise TypeError("Expected sequence (str, numpy.ndarray, list, tuple " "or bytearray) as first argument") is_str = is_pure_str(rgb24) if is_str: if not width or not height: raise ValueError("When giving a string as data, you must also " "supply width and height") if np and isinstance(rgb24, np.ndarray): if rgb24.ndim != 3: if not width or not height: raise ValueError("When giving a non 2D numpy array, width and " "height must be supplied") if rgb24.nbytes / 3 != width * height: raise ValueError("numpy array size mismatch") else: if rgb24.itemsize != 1: raise TypeError("Expected numpy array with itemsize == 1") if not rgb24.flags.c_contiguous: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not rgb24.flags.aligned: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not is_str and (not width or not height): height = len(rgb24) if height < 1: raise IndexError("Expected sequence with at least one row") row0 = rgb24[0] if not is_seq(row0): raise IndexError("Expected sequence (str, numpy.ndarray, list, tuple or " "bytearray) inside a sequence") width = len(row0) if is_pure_str(row0) or type(row0) == bytearray: width /= 3 if format == _ImageFormat.RawImage: self._encode_rgb24(rgb24, width, height) elif format == _ImageFormat.JpegImage: self._encode_jpeg_rgb24(rgb24, width, height, quality)
[ "def", "__EncodedAttribute_generic_encode_rgb24", "(", "self", ",", "rgb24", ",", "width", "=", "0", ",", "height", "=", "0", ",", "quality", "=", "0", ",", "format", "=", "_ImageFormat", ".", "RawImage", ")", ":", "if", "not", "is_seq", "(", "rgb24", ")", ":", "raise", "TypeError", "(", "\"Expected sequence (str, numpy.ndarray, list, tuple \"", "\"or bytearray) as first argument\"", ")", "is_str", "=", "is_pure_str", "(", "rgb24", ")", "if", "is_str", ":", "if", "not", "width", "or", "not", "height", ":", "raise", "ValueError", "(", "\"When giving a string as data, you must also \"", "\"supply width and height\"", ")", "if", "np", "and", "isinstance", "(", "rgb24", ",", "np", ".", "ndarray", ")", ":", "if", "rgb24", ".", "ndim", "!=", "3", ":", "if", "not", "width", "or", "not", "height", ":", "raise", "ValueError", "(", "\"When giving a non 2D numpy array, width and \"", "\"height must be supplied\"", ")", "if", "rgb24", ".", "nbytes", "/", "3", "!=", "width", "*", "height", ":", "raise", "ValueError", "(", "\"numpy array size mismatch\"", ")", "else", ":", "if", "rgb24", ".", "itemsize", "!=", "1", ":", "raise", "TypeError", "(", "\"Expected numpy array with itemsize == 1\"", ")", "if", "not", "rgb24", ".", "flags", ".", "c_contiguous", ":", "raise", "TypeError", "(", "\"Currently, only contiguous, aligned numpy arrays \"", "\"are supported\"", ")", "if", "not", "rgb24", ".", "flags", ".", "aligned", ":", "raise", "TypeError", "(", "\"Currently, only contiguous, aligned numpy arrays \"", "\"are supported\"", ")", "if", "not", "is_str", "and", "(", "not", "width", "or", "not", "height", ")", ":", "height", "=", "len", "(", "rgb24", ")", "if", "height", "<", "1", ":", "raise", "IndexError", "(", "\"Expected sequence with at least one row\"", ")", "row0", "=", "rgb24", "[", "0", "]", "if", "not", "is_seq", "(", "row0", ")", ":", "raise", "IndexError", "(", "\"Expected sequence (str, numpy.ndarray, list, tuple or \"", "\"bytearray) inside a sequence\"", ")", "width", "=", "len", "(", "row0", ")", "if", "is_pure_str", "(", "row0", ")", "or", "type", "(", "row0", ")", "==", "bytearray", ":", "width", "/=", "3", "if", "format", "==", "_ImageFormat", ".", "RawImage", ":", "self", ".", "_encode_rgb24", "(", "rgb24", ",", "width", ",", "height", ")", "elif", "format", "==", "_ImageFormat", ".", "JpegImage", ":", "self", ".", "_encode_jpeg_rgb24", "(", "rgb24", ",", "width", ",", "height", ",", "quality", ")" ]
43.911111
0.001485
def disconnect_handler(remote, *args, **kwargs): """Handle unlinking of remote account. :param remote: The remote application. :returns: The HTML response. """ if not current_user.is_authenticated: return current_app.login_manager.unauthorized() remote_account = RemoteAccount.get(user_id=current_user.get_id(), client_id=remote.consumer_key) external_ids = [i.id for i in current_user.external_identifiers if i.method == GLOBUS_EXTERNAL_METHOD] if external_ids: oauth_unlink_external_id(dict(id=external_ids[0], method=GLOBUS_EXTERNAL_METHOD)) if remote_account: with db.session.begin_nested(): remote_account.delete() return redirect(url_for('invenio_oauthclient_settings.index'))
[ "def", "disconnect_handler", "(", "remote", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "current_user", ".", "is_authenticated", ":", "return", "current_app", ".", "login_manager", ".", "unauthorized", "(", ")", "remote_account", "=", "RemoteAccount", ".", "get", "(", "user_id", "=", "current_user", ".", "get_id", "(", ")", ",", "client_id", "=", "remote", ".", "consumer_key", ")", "external_ids", "=", "[", "i", ".", "id", "for", "i", "in", "current_user", ".", "external_identifiers", "if", "i", ".", "method", "==", "GLOBUS_EXTERNAL_METHOD", "]", "if", "external_ids", ":", "oauth_unlink_external_id", "(", "dict", "(", "id", "=", "external_ids", "[", "0", "]", ",", "method", "=", "GLOBUS_EXTERNAL_METHOD", ")", ")", "if", "remote_account", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "remote_account", ".", "delete", "(", ")", "return", "redirect", "(", "url_for", "(", "'invenio_oauthclient_settings.index'", ")", ")" ]
36.434783
0.001163
def updateMesh(self, polydata): """ Overwrite the polygonal mesh of the actor with a new one. """ self.poly = polydata self.mapper.SetInputData(polydata) self.mapper.Modified() return self
[ "def", "updateMesh", "(", "self", ",", "polydata", ")", ":", "self", ".", "poly", "=", "polydata", "self", ".", "mapper", ".", "SetInputData", "(", "polydata", ")", "self", ".", "mapper", ".", "Modified", "(", ")", "return", "self" ]
29.625
0.008197
def Y_ampl(self, new_y_scale): """Make scaling on Y axis using predefined values""" self.parent.value('y_scale', new_y_scale) self.parent.traces.display()
[ "def", "Y_ampl", "(", "self", ",", "new_y_scale", ")", ":", "self", ".", "parent", ".", "value", "(", "'y_scale'", ",", "new_y_scale", ")", "self", ".", "parent", ".", "traces", ".", "display", "(", ")" ]
43.75
0.011236
def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob)
[ "def", "Print", "(", "self", ")", ":", "for", "hypo", ",", "prob", "in", "sorted", "(", "self", ".", "Items", "(", ")", ")", ":", "print", "(", "hypo", ",", "prob", ")" ]
38
0.012903
def processFiles(args): """ Generates and error checks each file's information before the compilation actually starts """ to_process = [] for filename in args['filenames']: file = dict() if args['include']: file['include'] = INCLUDE_STRING + ''.join( ['-I' + item for item in args['include']]) else: file['include'] = INCLUDE_STRING file['file_path'] = getPath(filename) file['file_base_name'] = \ os.path.splitext(os.path.basename(file['file_path']))[0] file['no_extension'], file['extension'] = os.path.splitext( file['file_path']) if file['extension'] not in CYTHONIZABLE_FILE_EXTS: raise CytherError( "The file '{}' is not a designated cython file".format( file['file_path'])) base_path = os.path.dirname(file['file_path']) local_build = args['local'] if not local_build: cache_name = os.path.join(base_path, '__cythercache__') os.makedirs(cache_name, exist_ok=True) file['c_name'] = os.path.join(cache_name, file['file_base_name']) + '.c' else: file['c_name'] = file['no_extension'] + '.c' file['object_file_name'] = os.path.splitext(file['c_name'])[0] + '.o' output_name = args['output_name'] if args['watch']: file['output_name'] = file['no_extension']+DEFAULT_OUTPUT_EXTENSION elif output_name: if os.path.exists(output_name) and os.path.isfile(output_name): file['output_name'] = output_name else: dirname = os.path.dirname(output_name) if not dirname: dirname = os.getcwd() if os.path.exists(dirname): file['output_name'] = output_name else: raise CytherError('The directory specified to write' 'the output file in does not exist') else: file['output_name'] = file['no_extension']+DEFAULT_OUTPUT_EXTENSION file['stamp_if_error'] = 0 to_process.append(file) return to_process
[ "def", "processFiles", "(", "args", ")", ":", "to_process", "=", "[", "]", "for", "filename", "in", "args", "[", "'filenames'", "]", ":", "file", "=", "dict", "(", ")", "if", "args", "[", "'include'", "]", ":", "file", "[", "'include'", "]", "=", "INCLUDE_STRING", "+", "''", ".", "join", "(", "[", "'-I'", "+", "item", "for", "item", "in", "args", "[", "'include'", "]", "]", ")", "else", ":", "file", "[", "'include'", "]", "=", "INCLUDE_STRING", "file", "[", "'file_path'", "]", "=", "getPath", "(", "filename", ")", "file", "[", "'file_base_name'", "]", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "file", "[", "'file_path'", "]", ")", ")", "[", "0", "]", "file", "[", "'no_extension'", "]", ",", "file", "[", "'extension'", "]", "=", "os", ".", "path", ".", "splitext", "(", "file", "[", "'file_path'", "]", ")", "if", "file", "[", "'extension'", "]", "not", "in", "CYTHONIZABLE_FILE_EXTS", ":", "raise", "CytherError", "(", "\"The file '{}' is not a designated cython file\"", ".", "format", "(", "file", "[", "'file_path'", "]", ")", ")", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "file", "[", "'file_path'", "]", ")", "local_build", "=", "args", "[", "'local'", "]", "if", "not", "local_build", ":", "cache_name", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "'__cythercache__'", ")", "os", ".", "makedirs", "(", "cache_name", ",", "exist_ok", "=", "True", ")", "file", "[", "'c_name'", "]", "=", "os", ".", "path", ".", "join", "(", "cache_name", ",", "file", "[", "'file_base_name'", "]", ")", "+", "'.c'", "else", ":", "file", "[", "'c_name'", "]", "=", "file", "[", "'no_extension'", "]", "+", "'.c'", "file", "[", "'object_file_name'", "]", "=", "os", ".", "path", ".", "splitext", "(", "file", "[", "'c_name'", "]", ")", "[", "0", "]", "+", "'.o'", "output_name", "=", "args", "[", "'output_name'", "]", "if", "args", "[", "'watch'", "]", ":", "file", "[", "'output_name'", "]", "=", "file", "[", "'no_extension'", "]", "+", "DEFAULT_OUTPUT_EXTENSION", "elif", "output_name", ":", "if", "os", ".", "path", ".", "exists", "(", "output_name", ")", "and", "os", ".", "path", ".", "isfile", "(", "output_name", ")", ":", "file", "[", "'output_name'", "]", "=", "output_name", "else", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "output_name", ")", "if", "not", "dirname", ":", "dirname", "=", "os", ".", "getcwd", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "file", "[", "'output_name'", "]", "=", "output_name", "else", ":", "raise", "CytherError", "(", "'The directory specified to write'", "'the output file in does not exist'", ")", "else", ":", "file", "[", "'output_name'", "]", "=", "file", "[", "'no_extension'", "]", "+", "DEFAULT_OUTPUT_EXTENSION", "file", "[", "'stamp_if_error'", "]", "=", "0", "to_process", ".", "append", "(", "file", ")", "return", "to_process" ]
40.472727
0.001316
def rollback(self): """Implementation of NAPALM method rollback.""" commands = [] commands.append('configure replace flash:rollback-0') commands.append('write memory') self.device.run_commands(commands)
[ "def", "rollback", "(", "self", ")", ":", "commands", "=", "[", "]", "commands", ".", "append", "(", "'configure replace flash:rollback-0'", ")", "commands", ".", "append", "(", "'write memory'", ")", "self", ".", "device", ".", "run_commands", "(", "commands", ")" ]
39.5
0.008264
def start(): r"""Starts ec. """ processPendingModules() if not state.main_module_name in ModuleMembers: # don't start the core when main is not Ec-ed return MainModule = sys.modules[state.main_module_name] if not MainModule.__ec_member__.Members: # there was some error while loading script(s) return global BaseGroup BaseGroup = MainModule.__ec_member__ Argv = sys.argv[1:] global mode mode = 'd' if Argv else 's' # dispatch / shell mode if mode == 's': import shell shell.init() else: import dispatch dispatch.init(Argv) processExitHooks()
[ "def", "start", "(", ")", ":", "processPendingModules", "(", ")", "if", "not", "state", ".", "main_module_name", "in", "ModuleMembers", ":", "# don't start the core when main is not Ec-ed\r", "return", "MainModule", "=", "sys", ".", "modules", "[", "state", ".", "main_module_name", "]", "if", "not", "MainModule", ".", "__ec_member__", ".", "Members", ":", "# there was some error while loading script(s)\r", "return", "global", "BaseGroup", "BaseGroup", "=", "MainModule", ".", "__ec_member__", "Argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "global", "mode", "mode", "=", "'d'", "if", "Argv", "else", "'s'", "# dispatch / shell mode\r", "if", "mode", "==", "'s'", ":", "import", "shell", "shell", ".", "init", "(", ")", "else", ":", "import", "dispatch", "dispatch", ".", "init", "(", "Argv", ")", "processExitHooks", "(", ")" ]
20.724138
0.031797
def generate_oauth2_headers(self): """Generates header for oauth2 """ encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8')) headers={ 'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')), 'Content-Type': 'application/x-www-form-urlencoded' } return headers
[ "def", "generate_oauth2_headers", "(", "self", ")", ":", "encoded_credentials", "=", "base64", ".", "b64encode", "(", "(", "'{0}:{1}'", ".", "format", "(", "self", ".", "consumer_key", ",", "self", ".", "consumer_secret", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", "headers", "=", "{", "'Authorization'", ":", "'Basic {0}'", ".", "format", "(", "encoded_credentials", ".", "decode", "(", "'utf-8'", ")", ")", ",", "'Content-Type'", ":", "'application/x-www-form-urlencoded'", "}", "return", "headers" ]
40
0.017115
def _add_snps( self, snps, discrepant_snp_positions_threshold, discrepant_genotypes_threshold, save_output, ): """ Add SNPs to this Individual. Parameters ---------- snps : SNPs SNPs to add discrepant_snp_positions_threshold : int see above discrepant_genotypes_threshold : int see above save_output see above Returns ------- discrepant_positions : pandas.DataFrame discrepant_genotypes : pandas.DataFrame """ discrepant_positions = pd.DataFrame() discrepant_genotypes = pd.DataFrame() if snps.snps is None: return discrepant_positions, discrepant_genotypes build = snps.build source = [s.strip() for s in snps.source.split(",")] if not snps.build_detected: print("build not detected, assuming build {}".format(snps.build)) if self._build is None: self._build = build elif self._build != build: print( "build / assembly mismatch between current build of SNPs and SNPs being loaded" ) # ensure there area always two X alleles snps = self._double_single_alleles(snps.snps, "X") if self._snps is None: self._source.extend(source) self._snps = snps else: common_snps = self._snps.join(snps, how="inner", rsuffix="_added") discrepant_positions = common_snps.loc[ (common_snps["chrom"] != common_snps["chrom_added"]) | (common_snps["pos"] != common_snps["pos_added"]) ] if 0 < len(discrepant_positions) < discrepant_snp_positions_threshold: print( str(len(discrepant_positions)) + " SNP positions were discrepant; " "keeping original positions" ) if save_output: self._discrepant_positions_file_count += 1 lineage.save_df_as_csv( discrepant_positions, self._output_dir, self.get_var_name() + "_discrepant_positions_" + str(self._discrepant_positions_file_count) + ".csv", ) elif len(discrepant_positions) >= discrepant_snp_positions_threshold: print( "too many SNPs differ in position; ensure same genome build is being used" ) return discrepant_positions, discrepant_genotypes # remove null genotypes common_snps = common_snps.loc[ ~common_snps["genotype"].isnull() & ~common_snps["genotype_added"].isnull() ] # discrepant genotypes are where alleles are not equivalent (i.e., alleles are not the # same and not swapped) discrepant_genotypes = common_snps.loc[ ( (common_snps["genotype"].str.len() == 1) & (common_snps["genotype_added"].str.len() == 1) & ~( common_snps["genotype"].str[0] == common_snps["genotype_added"].str[0] ) ) | ( (common_snps["genotype"].str.len() == 2) & (common_snps["genotype_added"].str.len() == 2) & ~( ( common_snps["genotype"].str[0] == common_snps["genotype_added"].str[0] ) & ( common_snps["genotype"].str[1] == common_snps["genotype_added"].str[1] ) ) & ~( ( common_snps["genotype"].str[0] == common_snps["genotype_added"].str[1] ) & ( common_snps["genotype"].str[1] == common_snps["genotype_added"].str[0] ) ) ) ] if 0 < len(discrepant_genotypes) < discrepant_genotypes_threshold: print( str(len(discrepant_genotypes)) + " SNP genotypes were discrepant; " "marking those as null" ) if save_output: self._discrepant_genotypes_file_count += 1 lineage.save_df_as_csv( discrepant_genotypes, self._output_dir, self.get_var_name() + "_discrepant_genotypes_" + str(self._discrepant_genotypes_file_count) + ".csv", ) elif len(discrepant_genotypes) >= discrepant_genotypes_threshold: print( "too many SNPs differ in their genotype; ensure file is for same " "individual" ) return discrepant_positions, discrepant_genotypes # add new SNPs self._source.extend(source) self._snps = self._snps.combine_first(snps) self._snps.loc[discrepant_genotypes.index, "genotype"] = np.nan # combine_first converts position to float64, so convert it back to int64 self._snps["pos"] = self._snps["pos"].astype(np.int64) self._snps = sort_snps(self._snps) return discrepant_positions, discrepant_genotypes
[ "def", "_add_snps", "(", "self", ",", "snps", ",", "discrepant_snp_positions_threshold", ",", "discrepant_genotypes_threshold", ",", "save_output", ",", ")", ":", "discrepant_positions", "=", "pd", ".", "DataFrame", "(", ")", "discrepant_genotypes", "=", "pd", ".", "DataFrame", "(", ")", "if", "snps", ".", "snps", "is", "None", ":", "return", "discrepant_positions", ",", "discrepant_genotypes", "build", "=", "snps", ".", "build", "source", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "snps", ".", "source", ".", "split", "(", "\",\"", ")", "]", "if", "not", "snps", ".", "build_detected", ":", "print", "(", "\"build not detected, assuming build {}\"", ".", "format", "(", "snps", ".", "build", ")", ")", "if", "self", ".", "_build", "is", "None", ":", "self", ".", "_build", "=", "build", "elif", "self", ".", "_build", "!=", "build", ":", "print", "(", "\"build / assembly mismatch between current build of SNPs and SNPs being loaded\"", ")", "# ensure there area always two X alleles", "snps", "=", "self", ".", "_double_single_alleles", "(", "snps", ".", "snps", ",", "\"X\"", ")", "if", "self", ".", "_snps", "is", "None", ":", "self", ".", "_source", ".", "extend", "(", "source", ")", "self", ".", "_snps", "=", "snps", "else", ":", "common_snps", "=", "self", ".", "_snps", ".", "join", "(", "snps", ",", "how", "=", "\"inner\"", ",", "rsuffix", "=", "\"_added\"", ")", "discrepant_positions", "=", "common_snps", ".", "loc", "[", "(", "common_snps", "[", "\"chrom\"", "]", "!=", "common_snps", "[", "\"chrom_added\"", "]", ")", "|", "(", "common_snps", "[", "\"pos\"", "]", "!=", "common_snps", "[", "\"pos_added\"", "]", ")", "]", "if", "0", "<", "len", "(", "discrepant_positions", ")", "<", "discrepant_snp_positions_threshold", ":", "print", "(", "str", "(", "len", "(", "discrepant_positions", ")", ")", "+", "\" SNP positions were discrepant; \"", "\"keeping original positions\"", ")", "if", "save_output", ":", "self", ".", "_discrepant_positions_file_count", "+=", "1", "lineage", ".", "save_df_as_csv", "(", "discrepant_positions", ",", "self", ".", "_output_dir", ",", "self", ".", "get_var_name", "(", ")", "+", "\"_discrepant_positions_\"", "+", "str", "(", "self", ".", "_discrepant_positions_file_count", ")", "+", "\".csv\"", ",", ")", "elif", "len", "(", "discrepant_positions", ")", ">=", "discrepant_snp_positions_threshold", ":", "print", "(", "\"too many SNPs differ in position; ensure same genome build is being used\"", ")", "return", "discrepant_positions", ",", "discrepant_genotypes", "# remove null genotypes", "common_snps", "=", "common_snps", ".", "loc", "[", "~", "common_snps", "[", "\"genotype\"", "]", ".", "isnull", "(", ")", "&", "~", "common_snps", "[", "\"genotype_added\"", "]", ".", "isnull", "(", ")", "]", "# discrepant genotypes are where alleles are not equivalent (i.e., alleles are not the", "# same and not swapped)", "discrepant_genotypes", "=", "common_snps", ".", "loc", "[", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", ".", "len", "(", ")", "==", "1", ")", "&", "(", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", ".", "len", "(", ")", "==", "1", ")", "&", "~", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "0", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "0", "]", ")", ")", "|", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", ".", "len", "(", ")", "==", "2", ")", "&", "(", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", ".", "len", "(", ")", "==", "2", ")", "&", "~", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "0", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "0", "]", ")", "&", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "1", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "1", "]", ")", ")", "&", "~", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "0", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "1", "]", ")", "&", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "1", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "0", "]", ")", ")", ")", "]", "if", "0", "<", "len", "(", "discrepant_genotypes", ")", "<", "discrepant_genotypes_threshold", ":", "print", "(", "str", "(", "len", "(", "discrepant_genotypes", ")", ")", "+", "\" SNP genotypes were discrepant; \"", "\"marking those as null\"", ")", "if", "save_output", ":", "self", ".", "_discrepant_genotypes_file_count", "+=", "1", "lineage", ".", "save_df_as_csv", "(", "discrepant_genotypes", ",", "self", ".", "_output_dir", ",", "self", ".", "get_var_name", "(", ")", "+", "\"_discrepant_genotypes_\"", "+", "str", "(", "self", ".", "_discrepant_genotypes_file_count", ")", "+", "\".csv\"", ",", ")", "elif", "len", "(", "discrepant_genotypes", ")", ">=", "discrepant_genotypes_threshold", ":", "print", "(", "\"too many SNPs differ in their genotype; ensure file is for same \"", "\"individual\"", ")", "return", "discrepant_positions", ",", "discrepant_genotypes", "# add new SNPs", "self", ".", "_source", ".", "extend", "(", "source", ")", "self", ".", "_snps", "=", "self", ".", "_snps", ".", "combine_first", "(", "snps", ")", "self", ".", "_snps", ".", "loc", "[", "discrepant_genotypes", ".", "index", ",", "\"genotype\"", "]", "=", "np", ".", "nan", "# combine_first converts position to float64, so convert it back to int64", "self", ".", "_snps", "[", "\"pos\"", "]", "=", "self", ".", "_snps", "[", "\"pos\"", "]", ".", "astype", "(", "np", ".", "int64", ")", "self", ".", "_snps", "=", "sort_snps", "(", "self", ".", "_snps", ")", "return", "discrepant_positions", ",", "discrepant_genotypes" ]
36.579618
0.002034
def get_points(orig, dest, taillen): """Return a pair of lists of points for use making an arrow. The first list is the beginning and end point of the trunk of the arrow. The second list is the arrowhead. """ # Adjust the start and end points so they're on the first non-transparent pixel. # y = slope(x-ox) + oy # x = (y - oy) / slope + ox ox, oy = orig.center ow, oh = orig.size dx, dy = dest.center dw, dh = dest.size if ox < dx: leftx = ox rightx = dx xco = 1 elif ox > dx: leftx = ox * -1 rightx = dx * -1 xco = -1 else: # straight up and down arrow return up_and_down(orig, dest, taillen) if oy < dy: boty = oy topy = dy yco = 1 elif oy > dy: boty = oy * -1 topy = dy * -1 yco = -1 else: # straight left and right arrow return left_and_right(orig, dest, taillen) slope = (topy - boty) / (rightx - leftx) # start from the earliest point that intersects the bounding box. # work toward the center to find a non-transparent pixel # y - boty = ((topy-boty)/(rightx-leftx))*(x - leftx) if slope <= 1: for rightx in range( int(rightx - dw / 2), int(rightx)+1 ): topy = slope * (rightx - leftx) + boty if dest.collide_point(rightx * xco, topy * yco): rightx = float(rightx - 1) for pip in range(10): rightx += 0.1 * pip topy = slope * (rightx - leftx) + boty if dest.collide_point(rightx * xco, topy * yco): break break for leftx in range( int(leftx + ow / 2), int(leftx)-1, -1 ): boty = slope * (leftx - rightx) + topy if orig.collide_point(leftx * xco, boty * yco): leftx = float(leftx + 1) for pip in range(10): leftx -= 0.1 * pip boty = slope * (leftx - rightx) + topy if orig.collide_point(leftx * xco, boty * yco): break break else: # x = leftx + ((rightx-leftx)(y - boty))/(topy-boty) for topy in range( int(topy - dh / 2), int(topy) + 1 ): rightx = leftx + (topy - boty) / slope if dest.collide_point(rightx * xco, topy * yco): topy = float(topy - 1) for pip in range(10): topy += 0.1 * pip rightx = leftx + (topy - boty) / slope if dest.collide_point(rightx * xco, topy * yco): break break for boty in range( int(boty + oh / 2), int(boty) - 1, -1 ): leftx = (boty - topy) / slope + rightx if orig.collide_point(leftx * xco, boty * yco): boty = float(boty + 1) for pip in range(10): boty -= 0.1 * pip leftx = (boty - topy) / slope + rightx if orig.collide_point(leftx * xco, boty * yco): break break rise = topy - boty run = rightx - leftx try: start_theta = atan(rise/run) except ZeroDivisionError: return up_and_down(orig, dest, taillen) try: end_theta = atan(run/rise) except ZeroDivisionError: return left_and_right(orig, dest, taillen) # make the little wedge at the end so you can tell which way the # arrow's pointing, and flip it all back around to the way it was top_theta = start_theta - fortyfive bot_theta = pi - fortyfive - end_theta xoff1 = cos(top_theta) * taillen yoff1 = sin(top_theta) * taillen xoff2 = cos(bot_theta) * taillen yoff2 = sin(bot_theta) * taillen x1 = (rightx - xoff1) * xco x2 = (rightx - xoff2) * xco y1 = (topy - yoff1) * yco y2 = (topy - yoff2) * yco startx = leftx * xco starty = boty * yco endx = rightx * xco endy = topy * yco return ( [startx, starty, endx, endy], [x1, y1, endx, endy, x2, y2] )
[ "def", "get_points", "(", "orig", ",", "dest", ",", "taillen", ")", ":", "# Adjust the start and end points so they're on the first non-transparent pixel.", "# y = slope(x-ox) + oy", "# x = (y - oy) / slope + ox", "ox", ",", "oy", "=", "orig", ".", "center", "ow", ",", "oh", "=", "orig", ".", "size", "dx", ",", "dy", "=", "dest", ".", "center", "dw", ",", "dh", "=", "dest", ".", "size", "if", "ox", "<", "dx", ":", "leftx", "=", "ox", "rightx", "=", "dx", "xco", "=", "1", "elif", "ox", ">", "dx", ":", "leftx", "=", "ox", "*", "-", "1", "rightx", "=", "dx", "*", "-", "1", "xco", "=", "-", "1", "else", ":", "# straight up and down arrow", "return", "up_and_down", "(", "orig", ",", "dest", ",", "taillen", ")", "if", "oy", "<", "dy", ":", "boty", "=", "oy", "topy", "=", "dy", "yco", "=", "1", "elif", "oy", ">", "dy", ":", "boty", "=", "oy", "*", "-", "1", "topy", "=", "dy", "*", "-", "1", "yco", "=", "-", "1", "else", ":", "# straight left and right arrow", "return", "left_and_right", "(", "orig", ",", "dest", ",", "taillen", ")", "slope", "=", "(", "topy", "-", "boty", ")", "/", "(", "rightx", "-", "leftx", ")", "# start from the earliest point that intersects the bounding box.", "# work toward the center to find a non-transparent pixel", "# y - boty = ((topy-boty)/(rightx-leftx))*(x - leftx)", "if", "slope", "<=", "1", ":", "for", "rightx", "in", "range", "(", "int", "(", "rightx", "-", "dw", "/", "2", ")", ",", "int", "(", "rightx", ")", "+", "1", ")", ":", "topy", "=", "slope", "*", "(", "rightx", "-", "leftx", ")", "+", "boty", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "rightx", "=", "float", "(", "rightx", "-", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "rightx", "+=", "0.1", "*", "pip", "topy", "=", "slope", "*", "(", "rightx", "-", "leftx", ")", "+", "boty", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "break", "break", "for", "leftx", "in", "range", "(", "int", "(", "leftx", "+", "ow", "/", "2", ")", ",", "int", "(", "leftx", ")", "-", "1", ",", "-", "1", ")", ":", "boty", "=", "slope", "*", "(", "leftx", "-", "rightx", ")", "+", "topy", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "leftx", "=", "float", "(", "leftx", "+", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "leftx", "-=", "0.1", "*", "pip", "boty", "=", "slope", "*", "(", "leftx", "-", "rightx", ")", "+", "topy", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "break", "break", "else", ":", "# x = leftx + ((rightx-leftx)(y - boty))/(topy-boty)", "for", "topy", "in", "range", "(", "int", "(", "topy", "-", "dh", "/", "2", ")", ",", "int", "(", "topy", ")", "+", "1", ")", ":", "rightx", "=", "leftx", "+", "(", "topy", "-", "boty", ")", "/", "slope", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "topy", "=", "float", "(", "topy", "-", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "topy", "+=", "0.1", "*", "pip", "rightx", "=", "leftx", "+", "(", "topy", "-", "boty", ")", "/", "slope", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "break", "break", "for", "boty", "in", "range", "(", "int", "(", "boty", "+", "oh", "/", "2", ")", ",", "int", "(", "boty", ")", "-", "1", ",", "-", "1", ")", ":", "leftx", "=", "(", "boty", "-", "topy", ")", "/", "slope", "+", "rightx", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "boty", "=", "float", "(", "boty", "+", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "boty", "-=", "0.1", "*", "pip", "leftx", "=", "(", "boty", "-", "topy", ")", "/", "slope", "+", "rightx", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "break", "break", "rise", "=", "topy", "-", "boty", "run", "=", "rightx", "-", "leftx", "try", ":", "start_theta", "=", "atan", "(", "rise", "/", "run", ")", "except", "ZeroDivisionError", ":", "return", "up_and_down", "(", "orig", ",", "dest", ",", "taillen", ")", "try", ":", "end_theta", "=", "atan", "(", "run", "/", "rise", ")", "except", "ZeroDivisionError", ":", "return", "left_and_right", "(", "orig", ",", "dest", ",", "taillen", ")", "# make the little wedge at the end so you can tell which way the", "# arrow's pointing, and flip it all back around to the way it was", "top_theta", "=", "start_theta", "-", "fortyfive", "bot_theta", "=", "pi", "-", "fortyfive", "-", "end_theta", "xoff1", "=", "cos", "(", "top_theta", ")", "*", "taillen", "yoff1", "=", "sin", "(", "top_theta", ")", "*", "taillen", "xoff2", "=", "cos", "(", "bot_theta", ")", "*", "taillen", "yoff2", "=", "sin", "(", "bot_theta", ")", "*", "taillen", "x1", "=", "(", "rightx", "-", "xoff1", ")", "*", "xco", "x2", "=", "(", "rightx", "-", "xoff2", ")", "*", "xco", "y1", "=", "(", "topy", "-", "yoff1", ")", "*", "yco", "y2", "=", "(", "topy", "-", "yoff2", ")", "*", "yco", "startx", "=", "leftx", "*", "xco", "starty", "=", "boty", "*", "yco", "endx", "=", "rightx", "*", "xco", "endy", "=", "topy", "*", "yco", "return", "(", "[", "startx", ",", "starty", ",", "endx", ",", "endy", "]", ",", "[", "x1", ",", "y1", ",", "endx", ",", "endy", ",", "x2", ",", "y2", "]", ")" ]
32.198473
0.00046
def depth_atleast(list_, depth): r""" Returns if depth of list is at least ``depth`` Args: list_ (list): depth (int): Returns: bool: True CommandLine: python -m utool.util_dict --exec-depth_atleast --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> list_ = [[[[0]]], [[0]]] >>> depth = 0 >>> result = [depth_atleast(list_, depth) for depth in range(0, 7)] >>> print(result) """ if depth == 0: return True else: try: return all([depth_atleast(item, depth - 1) for item in list_]) except TypeError: return False
[ "def", "depth_atleast", "(", "list_", ",", "depth", ")", ":", "if", "depth", "==", "0", ":", "return", "True", "else", ":", "try", ":", "return", "all", "(", "[", "depth_atleast", "(", "item", ",", "depth", "-", "1", ")", "for", "item", "in", "list_", "]", ")", "except", "TypeError", ":", "return", "False" ]
23.733333
0.00135
def nsum0(lx): """ Accepts log-values as input, exponentiates them, sums down the rows (first dimension), normalizes and returns the result. Handles underflow by rescaling so that the largest values is exactly 1.0. """ lx = numpy.asarray(lx) base = lx.max() x = numpy.exp(lx - base) ssum = x.sum(0) result = ssum / ssum.sum() conventional = (numpy.exp(lx).sum(0) / numpy.exp(lx).sum()) assert similar(result, conventional) return result
[ "def", "nsum0", "(", "lx", ")", ":", "lx", "=", "numpy", ".", "asarray", "(", "lx", ")", "base", "=", "lx", ".", "max", "(", ")", "x", "=", "numpy", ".", "exp", "(", "lx", "-", "base", ")", "ssum", "=", "x", ".", "sum", "(", "0", ")", "result", "=", "ssum", "/", "ssum", ".", "sum", "(", ")", "conventional", "=", "(", "numpy", ".", "exp", "(", "lx", ")", ".", "sum", "(", "0", ")", "/", "numpy", ".", "exp", "(", "lx", ")", ".", "sum", "(", ")", ")", "assert", "similar", "(", "result", ",", "conventional", ")", "return", "result" ]
28.0625
0.021552
def unblock_all(self): """ Unblock all emitters in this group. """ self.unblock() for em in self._emitters.values(): em.unblock()
[ "def", "unblock_all", "(", "self", ")", ":", "self", ".", "unblock", "(", ")", "for", "em", "in", "self", ".", "_emitters", ".", "values", "(", ")", ":", "em", ".", "unblock", "(", ")" ]
28
0.011561
def version(self) -> Optional[str]: """ 获取 http 版本 """ if self._version is None: self._version = self._parser.get_http_version() return self._version
[ "def", "version", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "if", "self", ".", "_version", "is", "None", ":", "self", ".", "_version", "=", "self", ".", "_parser", ".", "get_http_version", "(", ")", "return", "self", ".", "_version" ]
27.857143
0.00995
def _2ndDerivInt(x,y,z,dens,densDeriv,b2,c2,i,j,glx=None,glw=None): """Integral that gives the 2nd derivative of the potential in x,y,z""" def integrand(s): t= 1/s**2.-1. m= numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)) return (densDeriv(m) *(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2)) *(x/(1.+t)*(j==0)+y/(b2+t)*(j==1)+z/(c2+t)*(j==2))/m\ +dens(m)*(i==j)*((1./(1.+t)*(i==0)+1./(b2+t)*(i==1)+1./(c2+t)*(i==2))))\ /numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.)) if glx is None: return integrate.quad(integrand,0.,1.)[0] else: return numpy.sum(glw*integrand(glx))
[ "def", "_2ndDerivInt", "(", "x", ",", "y", ",", "z", ",", "dens", ",", "densDeriv", ",", "b2", ",", "c2", ",", "i", ",", "j", ",", "glx", "=", "None", ",", "glw", "=", "None", ")", ":", "def", "integrand", "(", "s", ")", ":", "t", "=", "1", "/", "s", "**", "2.", "-", "1.", "m", "=", "numpy", ".", "sqrt", "(", "x", "**", "2.", "/", "(", "1.", "+", "t", ")", "+", "y", "**", "2.", "/", "(", "b2", "+", "t", ")", "+", "z", "**", "2.", "/", "(", "c2", "+", "t", ")", ")", "return", "(", "densDeriv", "(", "m", ")", "*", "(", "x", "/", "(", "1.", "+", "t", ")", "*", "(", "i", "==", "0", ")", "+", "y", "/", "(", "b2", "+", "t", ")", "*", "(", "i", "==", "1", ")", "+", "z", "/", "(", "c2", "+", "t", ")", "*", "(", "i", "==", "2", ")", ")", "*", "(", "x", "/", "(", "1.", "+", "t", ")", "*", "(", "j", "==", "0", ")", "+", "y", "/", "(", "b2", "+", "t", ")", "*", "(", "j", "==", "1", ")", "+", "z", "/", "(", "c2", "+", "t", ")", "*", "(", "j", "==", "2", ")", ")", "/", "m", "+", "dens", "(", "m", ")", "*", "(", "i", "==", "j", ")", "*", "(", "(", "1.", "/", "(", "1.", "+", "t", ")", "*", "(", "i", "==", "0", ")", "+", "1.", "/", "(", "b2", "+", "t", ")", "*", "(", "i", "==", "1", ")", "+", "1.", "/", "(", "c2", "+", "t", ")", "*", "(", "i", "==", "2", ")", ")", ")", ")", "/", "numpy", ".", "sqrt", "(", "(", "1.", "+", "(", "b2", "-", "1.", ")", "*", "s", "**", "2.", ")", "*", "(", "1.", "+", "(", "c2", "-", "1.", ")", "*", "s", "**", "2.", ")", ")", "if", "glx", "is", "None", ":", "return", "integrate", ".", "quad", "(", "integrand", ",", "0.", ",", "1.", ")", "[", "0", "]", "else", ":", "return", "numpy", ".", "sum", "(", "glw", "*", "integrand", "(", "glx", ")", ")" ]
49.285714
0.046942
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False): """ Sets the title of the marginal histograms. Parameters ---------- ax : Axes The `Axes` instance for the plot. fmt : str The string to add to the title. color : str The color of the text to add to the title. label : str If title does not exist, then include label at beginning of the string. rotated : bool If `True` then rotate the text 270 degrees for sideways title. """ # get rotation angle of the title rotation = 270 if rotated else 0 # get how much to displace title on axes xscale = 1.05 if rotated else 0.0 if rotated: yscale = 1.0 elif len(ax.get_figure().axes) > 1: yscale = 1.15 else: yscale = 1.05 # get class that packs text boxes vertical or horizonitally packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker # if no title exists if not hasattr(ax, "title_boxes"): # create a text box title = "{} = {}".format(label, fmt) tbox1 = offsetbox.TextArea( title, textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) # save a list of text boxes as attribute for later ax.title_boxes = [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # else append existing title else: # delete old title ax.title_anchor.remove() # add new text box to list tbox1 = offsetbox.TextArea( " {}".format(fmt), textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) ax.title_boxes = ax.title_boxes + [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # add new title and keep reference to instance as an attribute anchored_ybox = offsetbox.AnchoredOffsetbox( loc=2, child=ybox, pad=0., frameon=False, bbox_to_anchor=(xscale, yscale), bbox_transform=ax.transAxes, borderpad=0.) ax.title_anchor = ax.add_artist(anchored_ybox)
[ "def", "set_marginal_histogram_title", "(", "ax", ",", "fmt", ",", "color", ",", "label", "=", "None", ",", "rotated", "=", "False", ")", ":", "# get rotation angle of the title", "rotation", "=", "270", "if", "rotated", "else", "0", "# get how much to displace title on axes", "xscale", "=", "1.05", "if", "rotated", "else", "0.0", "if", "rotated", ":", "yscale", "=", "1.0", "elif", "len", "(", "ax", ".", "get_figure", "(", ")", ".", "axes", ")", ">", "1", ":", "yscale", "=", "1.15", "else", ":", "yscale", "=", "1.05", "# get class that packs text boxes vertical or horizonitally", "packer_class", "=", "offsetbox", ".", "VPacker", "if", "rotated", "else", "offsetbox", ".", "HPacker", "# if no title exists", "if", "not", "hasattr", "(", "ax", ",", "\"title_boxes\"", ")", ":", "# create a text box", "title", "=", "\"{} = {}\"", ".", "format", "(", "label", ",", "fmt", ")", "tbox1", "=", "offsetbox", ".", "TextArea", "(", "title", ",", "textprops", "=", "dict", "(", "color", "=", "color", ",", "size", "=", "15", ",", "rotation", "=", "rotation", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ")", ")", "# save a list of text boxes as attribute for later", "ax", ".", "title_boxes", "=", "[", "tbox1", "]", "# pack text boxes", "ybox", "=", "packer_class", "(", "children", "=", "ax", ".", "title_boxes", ",", "align", "=", "\"bottom\"", ",", "pad", "=", "0", ",", "sep", "=", "5", ")", "# else append existing title", "else", ":", "# delete old title", "ax", ".", "title_anchor", ".", "remove", "(", ")", "# add new text box to list", "tbox1", "=", "offsetbox", ".", "TextArea", "(", "\" {}\"", ".", "format", "(", "fmt", ")", ",", "textprops", "=", "dict", "(", "color", "=", "color", ",", "size", "=", "15", ",", "rotation", "=", "rotation", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ")", ")", "ax", ".", "title_boxes", "=", "ax", ".", "title_boxes", "+", "[", "tbox1", "]", "# pack text boxes", "ybox", "=", "packer_class", "(", "children", "=", "ax", ".", "title_boxes", ",", "align", "=", "\"bottom\"", ",", "pad", "=", "0", ",", "sep", "=", "5", ")", "# add new title and keep reference to instance as an attribute", "anchored_ybox", "=", "offsetbox", ".", "AnchoredOffsetbox", "(", "loc", "=", "2", ",", "child", "=", "ybox", ",", "pad", "=", "0.", ",", "frameon", "=", "False", ",", "bbox_to_anchor", "=", "(", "xscale", ",", "yscale", ")", ",", "bbox_transform", "=", "ax", ".", "transAxes", ",", "borderpad", "=", "0.", ")", "ax", ".", "title_anchor", "=", "ax", ".", "add_artist", "(", "anchored_ybox", ")" ]
32.5
0.000415
def to_str(string): """ Return the given string (either byte string or Unicode string) converted to native-str, that is, a byte string on Python 2, or a Unicode string on Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to native-str :rtype: native-str """ if string is None: return None if isinstance(string, str): return string if PY2: return string.encode("utf-8") return string.decode("utf-8")
[ "def", "to_str", "(", "string", ")", ":", "if", "string", "is", "None", ":", "return", "None", "if", "isinstance", "(", "string", ",", "str", ")", ":", "return", "string", "if", "PY2", ":", "return", "string", ".", "encode", "(", "\"utf-8\"", ")", "return", "string", ".", "decode", "(", "\"utf-8\"", ")" ]
27.722222
0.001938
def attach_team(context, id, team_id): """attach_team(context, id, team_id) Attach a team to a topic. >>> dcictl topic-attach-team [OPTIONS] :param string id: ID of the topic to attach to [required] :param string team_id: ID of the team to attach to this topic [required] """ team_id = team_id or identity.my_team_id(context) result = topic.attach_team(context, id=id, team_id=team_id) utils.format_output(result, context.format)
[ "def", "attach_team", "(", "context", ",", "id", ",", "team_id", ")", ":", "team_id", "=", "team_id", "or", "identity", ".", "my_team_id", "(", "context", ")", "result", "=", "topic", ".", "attach_team", "(", "context", ",", "id", "=", "id", ",", "team_id", "=", "team_id", ")", "utils", ".", "format_output", "(", "result", ",", "context", ".", "format", ")" ]
35.076923
0.002137
def visit_Div(self, node: AST, dfltChaining: bool = True) -> str: """Return division sign.""" return '/' if self.compact else ' / '
[ "def", "visit_Div", "(", "self", ",", "node", ":", "AST", ",", "dfltChaining", ":", "bool", "=", "True", ")", "->", "str", ":", "return", "'/'", "if", "self", ".", "compact", "else", "' / '" ]
48.333333
0.013605
def flush(self): """Forces a flush from the internal queue to the server""" queue = self.queue size = queue.qsize() queue.join() self.log.debug('successfully flushed %s items.', size)
[ "def", "flush", "(", "self", ")", ":", "queue", "=", "self", ".", "queue", "size", "=", "queue", ".", "qsize", "(", ")", "queue", ".", "join", "(", ")", "self", ".", "log", ".", "debug", "(", "'successfully flushed %s items.'", ",", "size", ")" ]
36.333333
0.008969
def hugoniot_t_single(rho, rho0, c0, s, gamma0, q, theta0, n, mass, three_r=3. * constants.R, t_ref=300., c_v=0.): """ internal function to calculate pressure along Hugoniot :param rho: density in g/cm^3 :param rho0: density at 1 bar in g/cm^3 :param c0: velocity at 1 bar in km/s :param s: slope of the velocity change :param gamma0: Gruneisen parameter at 1 bar :param q: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature in K :param n: number of elements in a chemical formula :param mass: molar mass in gram :param three_r: 3 times gas constant. Jamieson modified this value to compensate for mismatches :param t_ref: reference temperature, 300 K :param c_v: heat capacity, see Jamieson 1983 for detail :return: temperature along hugoniot """ eta = 1. - rho0 / rho if eta == 0.0: return 300. threenk = three_r / mass * n # [J/mol/K] / [g/mol] = [J/g/K] k = [rho0, c0, s, gamma0, q, theta0 / 1.e3] t_h = odeint(_dT_h_delta, t_ref / 1.e3, [0., eta], args=(k, threenk, c_v), full_output=1) temp_h = np.squeeze(t_h[0][1]) return temp_h * 1.e3
[ "def", "hugoniot_t_single", "(", "rho", ",", "rho0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", ",", "n", ",", "mass", ",", "three_r", "=", "3.", "*", "constants", ".", "R", ",", "t_ref", "=", "300.", ",", "c_v", "=", "0.", ")", ":", "eta", "=", "1.", "-", "rho0", "/", "rho", "if", "eta", "==", "0.0", ":", "return", "300.", "threenk", "=", "three_r", "/", "mass", "*", "n", "# [J/mol/K] / [g/mol] = [J/g/K]", "k", "=", "[", "rho0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", "/", "1.e3", "]", "t_h", "=", "odeint", "(", "_dT_h_delta", ",", "t_ref", "/", "1.e3", ",", "[", "0.", ",", "eta", "]", ",", "args", "=", "(", "k", ",", "threenk", ",", "c_v", ")", ",", "full_output", "=", "1", ")", "temp_h", "=", "np", ".", "squeeze", "(", "t_h", "[", "0", "]", "[", "1", "]", ")", "return", "temp_h", "*", "1.e3" ]
41.103448
0.00082
def print_plugin_list(plugins: Dict[str, pkg_resources.EntryPoint]): """ Prints all registered plugins and checks if they can be loaded or not. :param plugins: plugins :type plugins: Dict[str, ~pkg_resources.EntryPoint] """ for trigger, entry_point in plugins.items(): try: plugin_class = entry_point.load() version = str(plugin_class._info.version) print( f"{trigger} (ok)\n" f" {version}" ) except Exception: print( f"{trigger} (failed)" )
[ "def", "print_plugin_list", "(", "plugins", ":", "Dict", "[", "str", ",", "pkg_resources", ".", "EntryPoint", "]", ")", ":", "for", "trigger", ",", "entry_point", "in", "plugins", ".", "items", "(", ")", ":", "try", ":", "plugin_class", "=", "entry_point", ".", "load", "(", ")", "version", "=", "str", "(", "plugin_class", ".", "_info", ".", "version", ")", "print", "(", "f\"{trigger} (ok)\\n\"", "f\" {version}\"", ")", "except", "Exception", ":", "print", "(", "f\"{trigger} (failed)\"", ")" ]
30.894737
0.001653
def scan(self, data, part): """Scan a string. Parameters ---------- data : `str` String to scan. part : `bool` True if data is partial. Returns ------- `generator` of (`str` or `markovchain.scanner.Scanner.END`) Token generator. """ if not self.end_chars: yield from data self.start = self.start or bool(data) self.end = False else: for char in data: if char in self.end_chars: if not self.start: continue self.end = True else: if self.end: yield self.END self.end = False self.start = True yield char if not part and self.start: if not self.end and self.default_end is not None: yield self.default_end yield self.END self.reset()
[ "def", "scan", "(", "self", ",", "data", ",", "part", ")", ":", "if", "not", "self", ".", "end_chars", ":", "yield", "from", "data", "self", ".", "start", "=", "self", ".", "start", "or", "bool", "(", "data", ")", "self", ".", "end", "=", "False", "else", ":", "for", "char", "in", "data", ":", "if", "char", "in", "self", ".", "end_chars", ":", "if", "not", "self", ".", "start", ":", "continue", "self", ".", "end", "=", "True", "else", ":", "if", "self", ".", "end", ":", "yield", "self", ".", "END", "self", ".", "end", "=", "False", "self", ".", "start", "=", "True", "yield", "char", "if", "not", "part", "and", "self", ".", "start", ":", "if", "not", "self", ".", "end", "and", "self", ".", "default_end", "is", "not", "None", ":", "yield", "self", ".", "default_end", "yield", "self", ".", "END", "self", ".", "reset", "(", ")" ]
27.810811
0.001878
def is_app_folder(self, folder): """ checks if a folder """ with open('%s/%s/build.gradle' % (self.path, folder)) as f: for line in f.readlines(): if config.gradle_plugin in line: return True return False
[ "def", "is_app_folder", "(", "self", ",", "folder", ")", ":", "with", "open", "(", "'%s/%s/build.gradle'", "%", "(", "self", ".", "path", ",", "folder", ")", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "config", ".", "gradle_plugin", "in", "line", ":", "return", "True", "return", "False" ]
26.777778
0.016064
def cache_get(key): """ Wrapper for ``cache.get``. The expiry time for the cache entry is stored with the entry. If the expiry time has past, put the stale entry back into cache, and don't return it to trigger a fake cache miss. """ packed = cache.get(_hashed_key(key)) if packed is None: return None value, refresh_time, refreshed = packed if (time() > refresh_time) and not refreshed: cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True) return None return value
[ "def", "cache_get", "(", "key", ")", ":", "packed", "=", "cache", ".", "get", "(", "_hashed_key", "(", "key", ")", ")", "if", "packed", "is", "None", ":", "return", "None", "value", ",", "refresh_time", ",", "refreshed", "=", "packed", "if", "(", "time", "(", ")", ">", "refresh_time", ")", "and", "not", "refreshed", ":", "cache_set", "(", "key", ",", "value", ",", "settings", ".", "CACHE_SET_DELAY_SECONDS", ",", "True", ")", "return", "None", "return", "value" ]
35.133333
0.001848
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): """A generic function to load mnist-like dataset. Parameters: ---------- shape : tuple The shape of digit images. path : str The path that the data is downloaded to. name : str The dataset name you want to use(the default is 'mnist'). url : str The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). """ path = os.path.join(path, name) # Define functions for loading mnist-like data's images and labels. # For convenience, they also download the requested files if needed. def load_mnist_images(path, filename): filepath = maybe_download_and_extract(filename, path, url) logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) # The inputs are vectors now, we reshape them to monochrome 2D images, # following the shape convention: (examples, channels, rows, columns) data = data.reshape(shape) # The inputs come as bytes, we convert them to float32 in range [0,1]. # (Actually to range [0, 255/256], for compatibility to the version # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.) return data / np.float32(256) def load_mnist_labels(path, filename): filepath = maybe_download_and_extract(filename, path, url) # Read the labels in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=8) # The labels are vectors of integers now, that's exactly what we want. return data # Download and read the training and test set images and labels. logging.info("Load or Download {0} > {1}".format(name.upper(), path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz') # We reserve the last 10000 training examples for validation. X_train, X_val = X_train[:-10000], X_train[-10000:] y_train, y_val = y_train[:-10000], y_train[-10000:] # We just return all the arrays in order, as expected in main(). # (It doesn't matter how we do this as long as we can read them again.) X_train = np.asarray(X_train, dtype=np.float32) y_train = np.asarray(y_train, dtype=np.int32) X_val = np.asarray(X_val, dtype=np.float32) y_val = np.asarray(y_val, dtype=np.int32) X_test = np.asarray(X_test, dtype=np.float32) y_test = np.asarray(y_test, dtype=np.int32) return X_train, y_train, X_val, y_val, X_test, y_test
[ "def", "_load_mnist_dataset", "(", "shape", ",", "path", ",", "name", "=", "'mnist'", ",", "url", "=", "'http://yann.lecun.com/exdb/mnist/'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", "# Define functions for loading mnist-like data's images and labels.", "# For convenience, they also download the requested files if needed.", "def", "load_mnist_images", "(", "path", ",", "filename", ")", ":", "filepath", "=", "maybe_download_and_extract", "(", "filename", ",", "path", ",", "url", ")", "logging", ".", "info", "(", "filepath", ")", "# Read the inputs in Yann LeCun's binary format.", "with", "gzip", ".", "open", "(", "filepath", ",", "'rb'", ")", "as", "f", ":", "data", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", ")", ",", "np", ".", "uint8", ",", "offset", "=", "16", ")", "# The inputs are vectors now, we reshape them to monochrome 2D images,", "# following the shape convention: (examples, channels, rows, columns)", "data", "=", "data", ".", "reshape", "(", "shape", ")", "# The inputs come as bytes, we convert them to float32 in range [0,1].", "# (Actually to range [0, 255/256], for compatibility to the version", "# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)", "return", "data", "/", "np", ".", "float32", "(", "256", ")", "def", "load_mnist_labels", "(", "path", ",", "filename", ")", ":", "filepath", "=", "maybe_download_and_extract", "(", "filename", ",", "path", ",", "url", ")", "# Read the labels in Yann LeCun's binary format.", "with", "gzip", ".", "open", "(", "filepath", ",", "'rb'", ")", "as", "f", ":", "data", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", ")", ",", "np", ".", "uint8", ",", "offset", "=", "8", ")", "# The labels are vectors of integers now, that's exactly what we want.", "return", "data", "# Download and read the training and test set images and labels.", "logging", ".", "info", "(", "\"Load or Download {0} > {1}\"", ".", "format", "(", "name", ".", "upper", "(", ")", ",", "path", ")", ")", "X_train", "=", "load_mnist_images", "(", "path", ",", "'train-images-idx3-ubyte.gz'", ")", "y_train", "=", "load_mnist_labels", "(", "path", ",", "'train-labels-idx1-ubyte.gz'", ")", "X_test", "=", "load_mnist_images", "(", "path", ",", "'t10k-images-idx3-ubyte.gz'", ")", "y_test", "=", "load_mnist_labels", "(", "path", ",", "'t10k-labels-idx1-ubyte.gz'", ")", "# We reserve the last 10000 training examples for validation.", "X_train", ",", "X_val", "=", "X_train", "[", ":", "-", "10000", "]", ",", "X_train", "[", "-", "10000", ":", "]", "y_train", ",", "y_val", "=", "y_train", "[", ":", "-", "10000", "]", ",", "y_train", "[", "-", "10000", ":", "]", "# We just return all the arrays in order, as expected in main().", "# (It doesn't matter how we do this as long as we can read them again.)", "X_train", "=", "np", ".", "asarray", "(", "X_train", ",", "dtype", "=", "np", ".", "float32", ")", "y_train", "=", "np", ".", "asarray", "(", "y_train", ",", "dtype", "=", "np", ".", "int32", ")", "X_val", "=", "np", ".", "asarray", "(", "X_val", ",", "dtype", "=", "np", ".", "float32", ")", "y_val", "=", "np", ".", "asarray", "(", "y_val", ",", "dtype", "=", "np", ".", "int32", ")", "X_test", "=", "np", ".", "asarray", "(", "X_test", ",", "dtype", "=", "np", ".", "float32", ")", "y_test", "=", "np", ".", "asarray", "(", "y_test", ",", "dtype", "=", "np", ".", "int32", ")", "return", "X_train", ",", "y_train", ",", "X_val", ",", "y_val", ",", "X_test", ",", "y_test" ]
46.377049
0.000692
def add_method(function, klass, name=None): '''Add an existing function to a class as a method. Note: Consider using the extend decorator as a more readable alternative to using this function directly. Args: function: The function to be added to the class klass. klass: The class to which the new method will be added. name: An optional name for the new method. If omitted or None the original name of the function is used. Returns: The function argument unmodified. Raises: ValueError: If klass already has an attribute with the same name as the extension method. ''' # Should we be using functools.update_wrapper in here? if name is None: name = function_name(function) if hasattr(klass, name): raise ValueError("Cannot replace existing attribute with method " "'{name}'".format(name=name)) setattr(klass, name, function) return function
[ "def", "add_method", "(", "function", ",", "klass", ",", "name", "=", "None", ")", ":", "# Should we be using functools.update_wrapper in here?\r", "if", "name", "is", "None", ":", "name", "=", "function_name", "(", "function", ")", "if", "hasattr", "(", "klass", ",", "name", ")", ":", "raise", "ValueError", "(", "\"Cannot replace existing attribute with method \"", "\"'{name}'\"", ".", "format", "(", "name", "=", "name", ")", ")", "setattr", "(", "klass", ",", "name", ",", "function", ")", "return", "function" ]
34.586207
0.00097
def get_file(self, name, save_to, add_to_cache=True, force_refresh=False, _lock_exclusive=False): """Retrieves file identified by ``name``. The file is saved as ``save_to``. If ``add_to_cache`` is ``True``, the file is added to the local store. If ``force_refresh`` is ``True``, local cache is not examined if a remote store is configured. If a remote store is configured, but ``name`` does not contain a version, the local data store is not used, as we cannot guarantee that the version there is fresh. Local data store implemented in :class:`LocalDataStore` tries to not copy the entire file to ``save_to`` if possible, but instead uses hardlinking. Therefore you should not modify the file if you don't want to totally blow something. This method returns the full versioned name of the retrieved file. """ uname, version = split_name(name) lock = None if self.local_store: lock = self.lock_manager.lock_for(uname) if _lock_exclusive: lock.lock_exclusive() else: lock.lock_shared() else: add_to_cache = False t = time.time() logger.debug(' downloading %s', name) try: if not self.remote_store or (version is not None and not force_refresh): try: if self.local_store and self.local_store.exists(name): return self.local_store.get_file(name, save_to) except Exception: if self.remote_store: logger.warning("Error getting '%s' from local store", name, exc_info=True) else: raise if self.remote_store: if not _lock_exclusive and add_to_cache: if lock: lock.unlock() return self.get_file(name, save_to, add_to_cache, _lock_exclusive=True) vname = self.remote_store.get_file(name, save_to) if add_to_cache: self._add_to_cache(vname, save_to) return vname raise FiletrackerError("File not available: %s" % name) finally: if lock: lock.close() logger.debug(' processed %s in %.2fs', name, time.time() - t)
[ "def", "get_file", "(", "self", ",", "name", ",", "save_to", ",", "add_to_cache", "=", "True", ",", "force_refresh", "=", "False", ",", "_lock_exclusive", "=", "False", ")", ":", "uname", ",", "version", "=", "split_name", "(", "name", ")", "lock", "=", "None", "if", "self", ".", "local_store", ":", "lock", "=", "self", ".", "lock_manager", ".", "lock_for", "(", "uname", ")", "if", "_lock_exclusive", ":", "lock", ".", "lock_exclusive", "(", ")", "else", ":", "lock", ".", "lock_shared", "(", ")", "else", ":", "add_to_cache", "=", "False", "t", "=", "time", ".", "time", "(", ")", "logger", ".", "debug", "(", "' downloading %s'", ",", "name", ")", "try", ":", "if", "not", "self", ".", "remote_store", "or", "(", "version", "is", "not", "None", "and", "not", "force_refresh", ")", ":", "try", ":", "if", "self", ".", "local_store", "and", "self", ".", "local_store", ".", "exists", "(", "name", ")", ":", "return", "self", ".", "local_store", ".", "get_file", "(", "name", ",", "save_to", ")", "except", "Exception", ":", "if", "self", ".", "remote_store", ":", "logger", ".", "warning", "(", "\"Error getting '%s' from local store\"", ",", "name", ",", "exc_info", "=", "True", ")", "else", ":", "raise", "if", "self", ".", "remote_store", ":", "if", "not", "_lock_exclusive", "and", "add_to_cache", ":", "if", "lock", ":", "lock", ".", "unlock", "(", ")", "return", "self", ".", "get_file", "(", "name", ",", "save_to", ",", "add_to_cache", ",", "_lock_exclusive", "=", "True", ")", "vname", "=", "self", ".", "remote_store", ".", "get_file", "(", "name", ",", "save_to", ")", "if", "add_to_cache", ":", "self", ".", "_add_to_cache", "(", "vname", ",", "save_to", ")", "return", "vname", "raise", "FiletrackerError", "(", "\"File not available: %s\"", "%", "name", ")", "finally", ":", "if", "lock", ":", "lock", ".", "close", "(", ")", "logger", ".", "debug", "(", "' processed %s in %.2fs'", ",", "name", ",", "time", ".", "time", "(", ")", "-", "t", ")" ]
41.322581
0.001525
def usage(path): ''' Show in which disk the chunks are allocated. CLI Example: .. code-block:: bash salt '*' btrfs.usage /your/mountpoint ''' out = __salt__['cmd.run_all']("btrfs filesystem usage {0}".format(path)) salt.utils.fsutils._verify_run(out) ret = {} for section in out['stdout'].split("\n\n"): if section.startswith("Overall:\n"): ret['overall'] = _usage_overall(section) elif section.startswith("Unallocated:\n"): ret['unallocated'] = _usage_unallocated(section) else: ret.update(_usage_specific(section)) return ret
[ "def", "usage", "(", "path", ")", ":", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "\"btrfs filesystem usage {0}\"", ".", "format", "(", "path", ")", ")", "salt", ".", "utils", ".", "fsutils", ".", "_verify_run", "(", "out", ")", "ret", "=", "{", "}", "for", "section", "in", "out", "[", "'stdout'", "]", ".", "split", "(", "\"\\n\\n\"", ")", ":", "if", "section", ".", "startswith", "(", "\"Overall:\\n\"", ")", ":", "ret", "[", "'overall'", "]", "=", "_usage_overall", "(", "section", ")", "elif", "section", ".", "startswith", "(", "\"Unallocated:\\n\"", ")", ":", "ret", "[", "'unallocated'", "]", "=", "_usage_unallocated", "(", "section", ")", "else", ":", "ret", ".", "update", "(", "_usage_specific", "(", "section", ")", ")", "return", "ret" ]
26.869565
0.001563
def ast2str(expr, level=0, names=None): """convert compiled ast to gene_reaction_rule str Parameters ---------- expr : str string for a gene reaction rule, e.g "a and b" level : int internal use only names : dict Dict where each element id a gene identifier and the value is the gene name. Use this to get a rule str which uses names instead. This should be done for display purposes only. All gene_reaction_rule strings which are computed with should use the id. Returns ------ string The gene reaction rule """ if isinstance(expr, Expression): return ast2str(expr.body, 0, names) \ if hasattr(expr, "body") else "" elif isinstance(expr, Name): return names.get(expr.id, expr.id) if names else expr.id elif isinstance(expr, BoolOp): op = expr.op if isinstance(op, Or): str_exp = " or ".join(ast2str(i, level + 1, names) for i in expr.values) elif isinstance(op, And): str_exp = " and ".join(ast2str(i, level + 1, names) for i in expr.values) else: raise TypeError("unsupported operation " + op.__class__.__name) return "(" + str_exp + ")" if level else str_exp elif expr is None: return "" else: raise TypeError("unsupported operation " + repr(expr))
[ "def", "ast2str", "(", "expr", ",", "level", "=", "0", ",", "names", "=", "None", ")", ":", "if", "isinstance", "(", "expr", ",", "Expression", ")", ":", "return", "ast2str", "(", "expr", ".", "body", ",", "0", ",", "names", ")", "if", "hasattr", "(", "expr", ",", "\"body\"", ")", "else", "\"\"", "elif", "isinstance", "(", "expr", ",", "Name", ")", ":", "return", "names", ".", "get", "(", "expr", ".", "id", ",", "expr", ".", "id", ")", "if", "names", "else", "expr", ".", "id", "elif", "isinstance", "(", "expr", ",", "BoolOp", ")", ":", "op", "=", "expr", ".", "op", "if", "isinstance", "(", "op", ",", "Or", ")", ":", "str_exp", "=", "\" or \"", ".", "join", "(", "ast2str", "(", "i", ",", "level", "+", "1", ",", "names", ")", "for", "i", "in", "expr", ".", "values", ")", "elif", "isinstance", "(", "op", ",", "And", ")", ":", "str_exp", "=", "\" and \"", ".", "join", "(", "ast2str", "(", "i", ",", "level", "+", "1", ",", "names", ")", "for", "i", "in", "expr", ".", "values", ")", "else", ":", "raise", "TypeError", "(", "\"unsupported operation \"", "+", "op", ".", "__class__", ".", "__name", ")", "return", "\"(\"", "+", "str_exp", "+", "\")\"", "if", "level", "else", "str_exp", "elif", "expr", "is", "None", ":", "return", "\"\"", "else", ":", "raise", "TypeError", "(", "\"unsupported operation \"", "+", "repr", "(", "expr", ")", ")" ]
35.5
0.000685
def update_serial(self, new_serial): """Updates the serial number of a device. The "serial number" used with adb's `-s` arg is not necessarily the actual serial number. For remote devices, it could be a combination of host names and port numbers. This is used for when such identifier of remote devices changes during a test. For example, when a remote device reboots, it may come back with a different serial number. This is NOT meant for switching the object to represent another device. We intentionally did not make it a regular setter of the serial property so people don't accidentally call this without understanding the consequences. Args: new_serial: string, the new serial number for the same device. Raises: DeviceError: tries to update serial when any service is running. """ new_serial = str(new_serial) if self.has_active_service: raise DeviceError( self, 'Cannot change device serial number when there is service running.' ) if self._debug_tag == self.serial: self._debug_tag = new_serial self._serial = new_serial self.adb.serial = new_serial self.fastboot.serial = new_serial
[ "def", "update_serial", "(", "self", ",", "new_serial", ")", ":", "new_serial", "=", "str", "(", "new_serial", ")", "if", "self", ".", "has_active_service", ":", "raise", "DeviceError", "(", "self", ",", "'Cannot change device serial number when there is service running.'", ")", "if", "self", ".", "_debug_tag", "==", "self", ".", "serial", ":", "self", ".", "_debug_tag", "=", "new_serial", "self", ".", "_serial", "=", "new_serial", "self", ".", "adb", ".", "serial", "=", "new_serial", "self", ".", "fastboot", ".", "serial", "=", "new_serial" ]
38.735294
0.002222
def find_store_dirs(cls): """ Returns the primary package directory and any additional ones from QUILT_PACKAGE_DIRS. """ store_dirs = [default_store_location()] extra_dirs_str = os.getenv('QUILT_PACKAGE_DIRS') if extra_dirs_str: store_dirs.extend(extra_dirs_str.split(':')) return store_dirs
[ "def", "find_store_dirs", "(", "cls", ")", ":", "store_dirs", "=", "[", "default_store_location", "(", ")", "]", "extra_dirs_str", "=", "os", ".", "getenv", "(", "'QUILT_PACKAGE_DIRS'", ")", "if", "extra_dirs_str", ":", "store_dirs", ".", "extend", "(", "extra_dirs_str", ".", "split", "(", "':'", ")", ")", "return", "store_dirs" ]
39
0.008357
def requests_admin(request, pk): """Table display of each request for a given product. Allows the given Page pk to refer to a direct parent of the ProductVariant model or be the ProductVariant model itself. This allows for the standard longclaw product modelling philosophy where ProductVariant refers to the actual product (in the case where there is only 1 variant) or to be variants of the product page. """ page = Page.objects.get(pk=pk).specific if hasattr(page, 'variants'): requests = ProductRequest.objects.filter( variant__in=page.variants.all() ) else: requests = ProductRequest.objects.filter(variant=page) return render( request, "productrequests/requests_admin.html", {'page': page, 'requests': requests} )
[ "def", "requests_admin", "(", "request", ",", "pk", ")", ":", "page", "=", "Page", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", ".", "specific", "if", "hasattr", "(", "page", ",", "'variants'", ")", ":", "requests", "=", "ProductRequest", ".", "objects", ".", "filter", "(", "variant__in", "=", "page", ".", "variants", ".", "all", "(", ")", ")", "else", ":", "requests", "=", "ProductRequest", ".", "objects", ".", "filter", "(", "variant", "=", "page", ")", "return", "render", "(", "request", ",", "\"productrequests/requests_admin.html\"", ",", "{", "'page'", ":", "page", ",", "'requests'", ":", "requests", "}", ")" ]
38.47619
0.001208
def get(self, master_id): """ Get a list of revisions by master ID :param master_id: :return: """ collection_name = self.request.headers.get("collection") self.client = BaseAsyncMotorDocument("%s_revisions" % collection_name) limit = self.get_query_argument("limit", 2) add_current_revision = self.get_arg_value_as_type("addCurrent", "false") show_history = self.get_arg_value_as_type("showHistory", "false") objects_processed = [] if isinstance(limit, unicode): limit = int(limit) objects = yield self.client.find({"master_id": master_id, "processed": False}, orderby="toa", order_by_direction=1, page=0, limit=20) # If this is a document that should have a revision and doesn't we # orchestratioin creation of the first one if len(objects) == 0: new_revision = yield self.__lazy_migration(master_id) if not new_revision: return if show_history: objects_processed = yield self.client.find({"master_id": master_id, "processed": True}, orderby="toa", order_by_direction=-1, page=0, limit=limit) elif add_current_revision: objects_processed = yield self.client.find({"master_id": master_id, "processed": True}, orderby="toa", order_by_direction=-1, page=0, limit=1) if len(objects_processed) > 0: objects_processed = objects_processed[::-1] objects_processed[-1]["current"] = True objects = objects_processed + objects self.write({ "count": len(objects), "results": objects })
[ "def", "get", "(", "self", ",", "master_id", ")", ":", "collection_name", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "\"collection\"", ")", "self", ".", "client", "=", "BaseAsyncMotorDocument", "(", "\"%s_revisions\"", "%", "collection_name", ")", "limit", "=", "self", ".", "get_query_argument", "(", "\"limit\"", ",", "2", ")", "add_current_revision", "=", "self", ".", "get_arg_value_as_type", "(", "\"addCurrent\"", ",", "\"false\"", ")", "show_history", "=", "self", ".", "get_arg_value_as_type", "(", "\"showHistory\"", ",", "\"false\"", ")", "objects_processed", "=", "[", "]", "if", "isinstance", "(", "limit", ",", "unicode", ")", ":", "limit", "=", "int", "(", "limit", ")", "objects", "=", "yield", "self", ".", "client", ".", "find", "(", "{", "\"master_id\"", ":", "master_id", ",", "\"processed\"", ":", "False", "}", ",", "orderby", "=", "\"toa\"", ",", "order_by_direction", "=", "1", ",", "page", "=", "0", ",", "limit", "=", "20", ")", "# If this is a document that should have a revision and doesn't we", "# orchestratioin creation of the first one", "if", "len", "(", "objects", ")", "==", "0", ":", "new_revision", "=", "yield", "self", ".", "__lazy_migration", "(", "master_id", ")", "if", "not", "new_revision", ":", "return", "if", "show_history", ":", "objects_processed", "=", "yield", "self", ".", "client", ".", "find", "(", "{", "\"master_id\"", ":", "master_id", ",", "\"processed\"", ":", "True", "}", ",", "orderby", "=", "\"toa\"", ",", "order_by_direction", "=", "-", "1", ",", "page", "=", "0", ",", "limit", "=", "limit", ")", "elif", "add_current_revision", ":", "objects_processed", "=", "yield", "self", ".", "client", ".", "find", "(", "{", "\"master_id\"", ":", "master_id", ",", "\"processed\"", ":", "True", "}", ",", "orderby", "=", "\"toa\"", ",", "order_by_direction", "=", "-", "1", ",", "page", "=", "0", ",", "limit", "=", "1", ")", "if", "len", "(", "objects_processed", ")", ">", "0", ":", "objects_processed", "=", "objects_processed", "[", ":", ":", "-", "1", "]", "objects_processed", "[", "-", "1", "]", "[", "\"current\"", "]", "=", "True", "objects", "=", "objects_processed", "+", "objects", "self", ".", "write", "(", "{", "\"count\"", ":", "len", "(", "objects", ")", ",", "\"results\"", ":", "objects", "}", ")" ]
40.766667
0.000798
def now(years=0, days=0, hours=0, minutes=0, seconds=0): """ :param years: int delta of years from now :param days: int delta of days from now :param hours: int delta of hours from now :param minutes: int delta of minutes from now :param seconds: float delta of seconds from now :return: str of the now timestamp """ date_time = datetime.utcnow() date_time += timedelta(days=days + years * 365, hours=hours, minutes=minutes, seconds=seconds) return datetime_to_str(date_time)
[ "def", "now", "(", "years", "=", "0", ",", "days", "=", "0", ",", "hours", "=", "0", ",", "minutes", "=", "0", ",", "seconds", "=", "0", ")", ":", "date_time", "=", "datetime", ".", "utcnow", "(", ")", "date_time", "+=", "timedelta", "(", "days", "=", "days", "+", "years", "*", "365", ",", "hours", "=", "hours", ",", "minutes", "=", "minutes", ",", "seconds", "=", "seconds", ")", "return", "datetime_to_str", "(", "date_time", ")" ]
42.153846
0.001786
def set_tile(self, codepoint: int, tile: np.array) -> None: """Upload a tile into this array. The tile can be in 32-bit color (height, width, rgba), or grey-scale (height, width). The tile should have a dtype of ``np.uint8``. This data may need to be sent to graphics card memory, this is a slow operation. """ tile = np.ascontiguousarray(tile, dtype=np.uint8) if tile.shape == self.tile_shape: full_tile = np.empty(self.tile_shape + (4,), dtype=np.uint8) full_tile[:, :, :3] = 255 full_tile[:, :, 3] = tile return self.set_tile(codepoint, full_tile) required = self.tile_shape + (4,) if tile.shape != required: raise ValueError( "Tile shape must be %r or %r, got %r." % (required, self.tile_shape, tile.shape) ) lib.TCOD_tileset_set_tile_( self._tileset_p, codepoint, ffi.cast("struct TCOD_ColorRGBA*", tile.ctypes.data), )
[ "def", "set_tile", "(", "self", ",", "codepoint", ":", "int", ",", "tile", ":", "np", ".", "array", ")", "->", "None", ":", "tile", "=", "np", ".", "ascontiguousarray", "(", "tile", ",", "dtype", "=", "np", ".", "uint8", ")", "if", "tile", ".", "shape", "==", "self", ".", "tile_shape", ":", "full_tile", "=", "np", ".", "empty", "(", "self", ".", "tile_shape", "+", "(", "4", ",", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "full_tile", "[", ":", ",", ":", ",", ":", "3", "]", "=", "255", "full_tile", "[", ":", ",", ":", ",", "3", "]", "=", "tile", "return", "self", ".", "set_tile", "(", "codepoint", ",", "full_tile", ")", "required", "=", "self", ".", "tile_shape", "+", "(", "4", ",", ")", "if", "tile", ".", "shape", "!=", "required", ":", "raise", "ValueError", "(", "\"Tile shape must be %r or %r, got %r.\"", "%", "(", "required", ",", "self", ".", "tile_shape", ",", "tile", ".", "shape", ")", ")", "lib", ".", "TCOD_tileset_set_tile_", "(", "self", ".", "_tileset_p", ",", "codepoint", ",", "ffi", ".", "cast", "(", "\"struct TCOD_ColorRGBA*\"", ",", "tile", ".", "ctypes", ".", "data", ")", ",", ")" ]
39.923077
0.001881
def _SMOTE(T, N, k, h = 1.0): """ Returns (N/100) * n_minority_samples synthetic minority samples. Parameters ---------- T : array-like, shape = [n_minority_samples, n_features] Holds the minority samples N : percetange of new synthetic samples: n_synthetic_samples = N/100 * n_minority_samples. Can be < 100. k : int. Number of nearest neighbours. Returns ------- S : Synthetic samples. array, shape = [(N/100) * n_minority_samples, n_features]. """ n_minority_samples, n_features = T.shape if N < 100: #create synthetic samples only for a subset of T. #TODO: select random minortiy samples N = 100 pass if (N % 100) != 0: raise ValueError("N must be < 100 or multiple of 100") N = N/100 n_synthetic_samples = N * n_minority_samples S = np.zeros(shape=(n_synthetic_samples, n_features)) #Learn nearest neighbours neigh = NearestNeighbors(n_neighbors = k) neigh.fit(T) #Calculate synthetic samples for i in range(n_minority_samples): nn = neigh.kneighbors(T[i], return_distance=False) for n in range(int(N)): nn_index = choice(nn[0]) #NOTE: nn includes T[i], we don't want to select it while nn_index == i: nn_index = choice(nn[0]) dif = T[nn_index] - T[i] gap = np.random.uniform(low = 0.0, high = h) S[n + i * N, :] = T[i,:] + gap * dif[:] return S
[ "def", "_SMOTE", "(", "T", ",", "N", ",", "k", ",", "h", "=", "1.0", ")", ":", "n_minority_samples", ",", "n_features", "=", "T", ".", "shape", "if", "N", "<", "100", ":", "#create synthetic samples only for a subset of T.", "#TODO: select random minortiy samples", "N", "=", "100", "pass", "if", "(", "N", "%", "100", ")", "!=", "0", ":", "raise", "ValueError", "(", "\"N must be < 100 or multiple of 100\"", ")", "N", "=", "N", "/", "100", "n_synthetic_samples", "=", "N", "*", "n_minority_samples", "S", "=", "np", ".", "zeros", "(", "shape", "=", "(", "n_synthetic_samples", ",", "n_features", ")", ")", "#Learn nearest neighbours", "neigh", "=", "NearestNeighbors", "(", "n_neighbors", "=", "k", ")", "neigh", ".", "fit", "(", "T", ")", "#Calculate synthetic samples", "for", "i", "in", "range", "(", "n_minority_samples", ")", ":", "nn", "=", "neigh", ".", "kneighbors", "(", "T", "[", "i", "]", ",", "return_distance", "=", "False", ")", "for", "n", "in", "range", "(", "int", "(", "N", ")", ")", ":", "nn_index", "=", "choice", "(", "nn", "[", "0", "]", ")", "#NOTE: nn includes T[i], we don't want to select it", "while", "nn_index", "==", "i", ":", "nn_index", "=", "choice", "(", "nn", "[", "0", "]", ")", "dif", "=", "T", "[", "nn_index", "]", "-", "T", "[", "i", "]", "gap", "=", "np", ".", "random", ".", "uniform", "(", "low", "=", "0.0", ",", "high", "=", "h", ")", "S", "[", "n", "+", "i", "*", "N", ",", ":", "]", "=", "T", "[", "i", ",", ":", "]", "+", "gap", "*", "dif", "[", ":", "]", "return", "S" ]
29.4
0.009875
def infographic_people_section_notes_extractor( impact_report, component_metadata): """Extracting notes for people section in the infographic. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.2 """ extra_args = component_metadata.extra_args provenance = impact_report.impact_function.provenance hazard_keywords = provenance['hazard_keywords'] exposure_keywords = provenance['exposure_keywords'] context = {} context['notes'] = [] note = { 'title': None, 'description': resolve_from_dictionary(extra_args, 'extra_note'), 'citations': None } context['notes'].append(note) concept_keys = ['affected_people', 'displaced_people'] for key in concept_keys: note = { 'title': concepts[key].get('name'), 'description': concepts[key].get('description'), 'citations': concepts[key].get('citations')[0]['text'] } context['notes'].append(note) hazard_classification = definition( active_classification(hazard_keywords, exposure_keywords['exposure'])) # generate rate description displacement_rates_note_format = resolve_from_dictionary( extra_args, 'hazard_displacement_rates_note_format') displacement_rates_note = [] for hazard_class in hazard_classification['classes']: hazard_class['classification_unit'] = ( hazard_classification['classification_unit']) displacement_rates_note.append( displacement_rates_note_format.format(**hazard_class)) rate_description = ', '.join(displacement_rates_note) note = { 'title': concepts['displacement_rate'].get('name'), 'description': rate_description, 'citations': concepts['displacement_rate'].get('citations')[0]['text'] } context['notes'].append(note) return context
[ "def", "infographic_people_section_notes_extractor", "(", "impact_report", ",", "component_metadata", ")", ":", "extra_args", "=", "component_metadata", ".", "extra_args", "provenance", "=", "impact_report", ".", "impact_function", ".", "provenance", "hazard_keywords", "=", "provenance", "[", "'hazard_keywords'", "]", "exposure_keywords", "=", "provenance", "[", "'exposure_keywords'", "]", "context", "=", "{", "}", "context", "[", "'notes'", "]", "=", "[", "]", "note", "=", "{", "'title'", ":", "None", ",", "'description'", ":", "resolve_from_dictionary", "(", "extra_args", ",", "'extra_note'", ")", ",", "'citations'", ":", "None", "}", "context", "[", "'notes'", "]", ".", "append", "(", "note", ")", "concept_keys", "=", "[", "'affected_people'", ",", "'displaced_people'", "]", "for", "key", "in", "concept_keys", ":", "note", "=", "{", "'title'", ":", "concepts", "[", "key", "]", ".", "get", "(", "'name'", ")", ",", "'description'", ":", "concepts", "[", "key", "]", ".", "get", "(", "'description'", ")", ",", "'citations'", ":", "concepts", "[", "key", "]", ".", "get", "(", "'citations'", ")", "[", "0", "]", "[", "'text'", "]", "}", "context", "[", "'notes'", "]", ".", "append", "(", "note", ")", "hazard_classification", "=", "definition", "(", "active_classification", "(", "hazard_keywords", ",", "exposure_keywords", "[", "'exposure'", "]", ")", ")", "# generate rate description", "displacement_rates_note_format", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'hazard_displacement_rates_note_format'", ")", "displacement_rates_note", "=", "[", "]", "for", "hazard_class", "in", "hazard_classification", "[", "'classes'", "]", ":", "hazard_class", "[", "'classification_unit'", "]", "=", "(", "hazard_classification", "[", "'classification_unit'", "]", ")", "displacement_rates_note", ".", "append", "(", "displacement_rates_note_format", ".", "format", "(", "*", "*", "hazard_class", ")", ")", "rate_description", "=", "', '", ".", "join", "(", "displacement_rates_note", ")", "note", "=", "{", "'title'", ":", "concepts", "[", "'displacement_rate'", "]", ".", "get", "(", "'name'", ")", ",", "'description'", ":", "rate_description", ",", "'citations'", ":", "concepts", "[", "'displacement_rate'", "]", ".", "get", "(", "'citations'", ")", "[", "0", "]", "[", "'text'", "]", "}", "context", "[", "'notes'", "]", ".", "append", "(", "note", ")", "return", "context" ]
34
0.000433
def _filter(self, value): """ Predicate used to exclude, False, or include, True, a computed value. """ if self.ignores and value in self.ignores: return False return True
[ "def", "_filter", "(", "self", ",", "value", ")", ":", "if", "self", ".", "ignores", "and", "value", "in", "self", ".", "ignores", ":", "return", "False", "return", "True" ]
31
0.008969
def x_build_action( self, node ): ''' Given a build action log, process into the corresponding test log and specific test log sub-part. ''' action_node = node name = self.get_child(action_node,tag='name') if name: name = self.get_data(name) #~ Based on the action, we decide what sub-section the log #~ should go into. action_type = None if re.match('[^%]+%[^.]+[.](compile)',name): action_type = 'compile' elif re.match('[^%]+%[^.]+[.](link|archive)',name): action_type = 'link' elif re.match('[^%]+%testing[.](capture-output)',name): action_type = 'run' elif re.match('[^%]+%testing[.](expect-failure|expect-success)',name): action_type = 'result' else: # TODO: Enable to see what other actions can be included in the test results. # action_type = None action_type = 'other' #~ print "+ [%s] %s %s :: %s" %(action_type,name,'','') if action_type: #~ Get the corresponding test. (target,test) = self.get_test(action_node,type=action_type) #~ Skip action that have no corresponding test as they are #~ regular build actions and don't need to show up in the #~ regression results. if not test: ##print "??? [%s] %s %s :: %s" %(action_type,name,target,test) return None ##print "+++ [%s] %s %s :: %s" %(action_type,name,target,test) #~ Collect some basic info about the action. action = { 'command' : self.get_action_command(action_node,action_type), 'output' : self.get_action_output(action_node,action_type), 'info' : self.get_action_info(action_node,action_type) } #~ For the test result status we find the appropriate node #~ based on the type of test. Then adjust the result status #~ accordingly. This makes the result status reflect the #~ expectation as the result pages post processing does not #~ account for this inversion. action['type'] = action_type if action_type == 'result': if re.match(r'^compile',test['test-type']): action['type'] = 'compile' elif re.match(r'^link',test['test-type']): action['type'] = 'link' elif re.match(r'^run',test['test-type']): action['type'] = 'run' #~ The result sub-part we will add this result to. if action_node.getAttribute('status') == '0': action['result'] = 'succeed' else: action['result'] = 'fail' # Add the action to the test. test['actions'].append(action) # Set the test result if this is the result action for the test. if action_type == 'result': test['result'] = action['result'] return None
[ "def", "x_build_action", "(", "self", ",", "node", ")", ":", "action_node", "=", "node", "name", "=", "self", ".", "get_child", "(", "action_node", ",", "tag", "=", "'name'", ")", "if", "name", ":", "name", "=", "self", ".", "get_data", "(", "name", ")", "#~ Based on the action, we decide what sub-section the log", "#~ should go into.", "action_type", "=", "None", "if", "re", ".", "match", "(", "'[^%]+%[^.]+[.](compile)'", ",", "name", ")", ":", "action_type", "=", "'compile'", "elif", "re", ".", "match", "(", "'[^%]+%[^.]+[.](link|archive)'", ",", "name", ")", ":", "action_type", "=", "'link'", "elif", "re", ".", "match", "(", "'[^%]+%testing[.](capture-output)'", ",", "name", ")", ":", "action_type", "=", "'run'", "elif", "re", ".", "match", "(", "'[^%]+%testing[.](expect-failure|expect-success)'", ",", "name", ")", ":", "action_type", "=", "'result'", "else", ":", "# TODO: Enable to see what other actions can be included in the test results.", "# action_type = None", "action_type", "=", "'other'", "#~ print \"+ [%s] %s %s :: %s\" %(action_type,name,'','')", "if", "action_type", ":", "#~ Get the corresponding test.", "(", "target", ",", "test", ")", "=", "self", ".", "get_test", "(", "action_node", ",", "type", "=", "action_type", ")", "#~ Skip action that have no corresponding test as they are", "#~ regular build actions and don't need to show up in the", "#~ regression results.", "if", "not", "test", ":", "##print \"??? [%s] %s %s :: %s\" %(action_type,name,target,test)", "return", "None", "##print \"+++ [%s] %s %s :: %s\" %(action_type,name,target,test)", "#~ Collect some basic info about the action.", "action", "=", "{", "'command'", ":", "self", ".", "get_action_command", "(", "action_node", ",", "action_type", ")", ",", "'output'", ":", "self", ".", "get_action_output", "(", "action_node", ",", "action_type", ")", ",", "'info'", ":", "self", ".", "get_action_info", "(", "action_node", ",", "action_type", ")", "}", "#~ For the test result status we find the appropriate node", "#~ based on the type of test. Then adjust the result status", "#~ accordingly. This makes the result status reflect the", "#~ expectation as the result pages post processing does not", "#~ account for this inversion.", "action", "[", "'type'", "]", "=", "action_type", "if", "action_type", "==", "'result'", ":", "if", "re", ".", "match", "(", "r'^compile'", ",", "test", "[", "'test-type'", "]", ")", ":", "action", "[", "'type'", "]", "=", "'compile'", "elif", "re", ".", "match", "(", "r'^link'", ",", "test", "[", "'test-type'", "]", ")", ":", "action", "[", "'type'", "]", "=", "'link'", "elif", "re", ".", "match", "(", "r'^run'", ",", "test", "[", "'test-type'", "]", ")", ":", "action", "[", "'type'", "]", "=", "'run'", "#~ The result sub-part we will add this result to.", "if", "action_node", ".", "getAttribute", "(", "'status'", ")", "==", "'0'", ":", "action", "[", "'result'", "]", "=", "'succeed'", "else", ":", "action", "[", "'result'", "]", "=", "'fail'", "# Add the action to the test.", "test", "[", "'actions'", "]", ".", "append", "(", "action", ")", "# Set the test result if this is the result action for the test.", "if", "action_type", "==", "'result'", ":", "test", "[", "'result'", "]", "=", "action", "[", "'result'", "]", "return", "None" ]
50.4
0.012275
def generateBatches(tasks, givens): """ A function to generate a batch of commands to run in a specific order as to meet all the dependencies for each command. For example, the commands with no dependencies are run first, and the commands with the most deep dependencies are run last """ _removeGivensFromTasks(tasks, givens) batches = [] while tasks: batch = set() for task, dependencies in tasks.items(): if not dependencies: batch.add(task) if not batch: _batchErrorProcessing(tasks) for task in batch: del tasks[task] for task, dependencies in tasks.items(): for item in batch: if item in dependencies: tasks[task].remove(item) batches.append(batch) return batches
[ "def", "generateBatches", "(", "tasks", ",", "givens", ")", ":", "_removeGivensFromTasks", "(", "tasks", ",", "givens", ")", "batches", "=", "[", "]", "while", "tasks", ":", "batch", "=", "set", "(", ")", "for", "task", ",", "dependencies", "in", "tasks", ".", "items", "(", ")", ":", "if", "not", "dependencies", ":", "batch", ".", "add", "(", "task", ")", "if", "not", "batch", ":", "_batchErrorProcessing", "(", "tasks", ")", "for", "task", "in", "batch", ":", "del", "tasks", "[", "task", "]", "for", "task", ",", "dependencies", "in", "tasks", ".", "items", "(", ")", ":", "for", "item", "in", "batch", ":", "if", "item", "in", "dependencies", ":", "tasks", "[", "task", "]", ".", "remove", "(", "item", ")", "batches", ".", "append", "(", "batch", ")", "return", "batches" ]
28.724138
0.001161
def _extract_object_params(self, name): """ Extract object params, return as dict """ params = self.request.query_params.lists() params_map = {} prefix = name[:-1] offset = len(prefix) for name, value in params: if name.startswith(prefix): if name.endswith('}'): name = name[offset:-1] elif name.endswith('}[]'): # strip off trailing [] # this fixes an Ember queryparams issue name = name[offset:-3] else: # malformed argument like: # filter{foo=bar raise exceptions.ParseError( '"%s" is not a well-formed filter key.' % name ) else: continue params_map[name] = value return params_map
[ "def", "_extract_object_params", "(", "self", ",", "name", ")", ":", "params", "=", "self", ".", "request", ".", "query_params", ".", "lists", "(", ")", "params_map", "=", "{", "}", "prefix", "=", "name", "[", ":", "-", "1", "]", "offset", "=", "len", "(", "prefix", ")", "for", "name", ",", "value", "in", "params", ":", "if", "name", ".", "startswith", "(", "prefix", ")", ":", "if", "name", ".", "endswith", "(", "'}'", ")", ":", "name", "=", "name", "[", "offset", ":", "-", "1", "]", "elif", "name", ".", "endswith", "(", "'}[]'", ")", ":", "# strip off trailing []", "# this fixes an Ember queryparams issue", "name", "=", "name", "[", "offset", ":", "-", "3", "]", "else", ":", "# malformed argument like:", "# filter{foo=bar", "raise", "exceptions", ".", "ParseError", "(", "'\"%s\" is not a well-formed filter key.'", "%", "name", ")", "else", ":", "continue", "params_map", "[", "name", "]", "=", "value", "return", "params_map" ]
32.714286
0.002121
def load_config(self, conf_path): """ Load config from an ``andes.conf`` file. This function creates a ``configparser.ConfigParser`` object to read the specified conf file and calls the ``load_config`` function of the config instances of the system and the routines. Parameters ---------- conf_path : None or str Path to the Andes config file. If ``None``, the function body will not run. Returns ------- None """ if conf_path is None: return conf = configparser.ConfigParser() conf.read(conf_path) self.config.load_config(conf) for r in routines.__all__: self.__dict__[r.lower()].config.load_config(conf) logger.debug('Loaded config file from {}.'.format(conf_path))
[ "def", "load_config", "(", "self", ",", "conf_path", ")", ":", "if", "conf_path", "is", "None", ":", "return", "conf", "=", "configparser", ".", "ConfigParser", "(", ")", "conf", ".", "read", "(", "conf_path", ")", "self", ".", "config", ".", "load_config", "(", "conf", ")", "for", "r", "in", "routines", ".", "__all__", ":", "self", ".", "__dict__", "[", "r", ".", "lower", "(", ")", "]", ".", "config", ".", "load_config", "(", "conf", ")", "logger", ".", "debug", "(", "'Loaded config file from {}.'", ".", "format", "(", "conf_path", ")", ")" ]
28.862069
0.002312
def format_currency(number, currency, format, locale=babel.numbers.LC_NUMERIC, force_frac=None, format_type='standard'): """Same as ``babel.numbers.format_currency``, but has ``force_frac`` argument instead of ``currency_digits``. If the ``force_frac`` argument is given, the argument is passed down to ``pattern.apply``. """ locale = babel.core.Locale.parse(locale) if format: pattern = babel.numbers.parse_pattern(format) else: try: pattern = locale.currency_formats[format_type] except KeyError: raise babel.numbers.UnknownCurrencyFormatError( "%r is not a known currency format type" % format_type) if force_frac is None: fractions = babel.core.get_global('currency_fractions') try: digits = fractions[currency][0] except KeyError: digits = fractions['DEFAULT'][0] frac = (digits, digits) else: frac = force_frac return pattern.apply(number, locale, currency=currency, force_frac=frac)
[ "def", "format_currency", "(", "number", ",", "currency", ",", "format", ",", "locale", "=", "babel", ".", "numbers", ".", "LC_NUMERIC", ",", "force_frac", "=", "None", ",", "format_type", "=", "'standard'", ")", ":", "locale", "=", "babel", ".", "core", ".", "Locale", ".", "parse", "(", "locale", ")", "if", "format", ":", "pattern", "=", "babel", ".", "numbers", ".", "parse_pattern", "(", "format", ")", "else", ":", "try", ":", "pattern", "=", "locale", ".", "currency_formats", "[", "format_type", "]", "except", "KeyError", ":", "raise", "babel", ".", "numbers", ".", "UnknownCurrencyFormatError", "(", "\"%r is not a known currency format type\"", "%", "format_type", ")", "if", "force_frac", "is", "None", ":", "fractions", "=", "babel", ".", "core", ".", "get_global", "(", "'currency_fractions'", ")", "try", ":", "digits", "=", "fractions", "[", "currency", "]", "[", "0", "]", "except", "KeyError", ":", "digits", "=", "fractions", "[", "'DEFAULT'", "]", "[", "0", "]", "frac", "=", "(", "digits", ",", "digits", ")", "else", ":", "frac", "=", "force_frac", "return", "pattern", ".", "apply", "(", "number", ",", "locale", ",", "currency", "=", "currency", ",", "force_frac", "=", "frac", ")" ]
39.111111
0.000924
def build_a(self): """Calculates the total absorption from water, phytoplankton and CDOM a = awater + acdom + aphi """ lg.info('Building total absorption') self.a = self.a_water + self.a_cdom + self.a_phi
[ "def", "build_a", "(", "self", ")", ":", "lg", ".", "info", "(", "'Building total absorption'", ")", "self", ".", "a", "=", "self", ".", "a_water", "+", "self", ".", "a_cdom", "+", "self", ".", "a_phi" ]
34.142857
0.008163
def load_signal(signal_handler, get_header=False): """ ----- Brief ----- Function that returns a dictionary with the data contained inside 'signal_name' file (stored in the biosignalsnotebooks signal samples directory). ----------- Description ----------- Biosignalsnotebooks library provides data samples in order to the users that are new to biosignals data handling to have a place to start without the need to acquire new data. This sample files are stored in the folder _signal_samples inside the library. This function returns the data from the selected sample. ---------- Parameters ---------- signal_name : file name or url Name that identifies the signal sample to be loaded or a url. Possible values: [ecg_4000_Hz] ================= ============== Signal Type ECG Acquisition Time 00:12.4 Sample Rate 4000 Hz Number of Channels 1 Conditions At Rest ================= ============== [ecg_5_min] ================= ============== Signal Type ECG Acquisition Time 05:00.0 Sample Rate 1000 Hz Number of Channels 1 Conditions At Rest ================= ============== [ecg_sample] ================= ============== Signal Type ECG Acquisition Time 00:11.9 Sample Rate 200 Hz Number of Channels 1 Conditions At Rest ================= ============== [ecg_20_sec_10_Hz] ================= ============== Signal Type ECG Acquisition Time 00:20.0 Sample Rate 10 Hz Number of Channels 1 Conditions At Rest using Lead II ================= ============== [ecg_20_sec_100_Hz] ================= ============== Signal Type ECG Acquisition Time 00:19.7 Sample Rate 100 Hz Number of Channels 1 Conditions At Rest using Lead II ================= ============== [ecg_20_sec_1000_Hz] ================= ============== Signal Type ECG Acquisition Time 00:20.4 Sample Rate 1000 Hz Number of Channels 1 Conditions At Rest using Lead II ================= ============== [emg_bursts] ================= ============== Signal Type EMG Muscle Biceps Brachii Acquisition Time 00:28.5 Sample Rate 1000 Hz Number of Channels 1 Conditions Cyclic Contraction ================= ============== [emg_fatigue] ================= ============== Signal Type EMG Muscle Biceps Brachii Acquisition Time 02:06.9 Sample Rate 1000 Hz Number of Channels 1 Conditions Cyclic Flexion and Extension for fatigue induction ================= ============== [temp_res_8_16] ================= ============== Signal Type Temperature Acquisition Time 03:53.1 Sample Rate 1000 Hz Number of Channels 2 Resolutions 8 and 16 bits Conditions Temperature increase and decrease ================= ============== [bvp_sample] ================= ============== Signal Type BVP Acquisition Time 00:27.3 Sample Rate 1000 Hz Number of Channels 1 Conditions At Rest ================= ============== get_header : boolean If True the file header will be returned as one of the function outputs. Returns ------- out : dict A dictionary with the data stored inside the file specified in the input 'signal_name'. header : dict Metadata of the acquisition file (includes sampling rate, resolution, used device...) """ available_signals = ["ecg_4000_Hz", "ecg_5_min", "ecg_sample", "ecg_20_sec_10_Hz", "ecg_20_sec_100_Hz", "ecg_20_sec_1000_Hz", "emg_bursts", "emg_fatigue", "temp_res_8_16", "bvp_sample"] # Check if signal_handler is a url. # [Statements to be executed if signal_handler is a url] if any(mark in signal_handler for mark in ["http://", "https://", "www.", ".pt", ".com", ".org", ".net"]): # Check if it is a Google Drive sharable link. if "drive.google" in signal_handler: signal_handler = _generate_download_google_link(signal_handler) # Load file. out, header = load(signal_handler, remote=True, get_header=True, signal_sample=True) # [Statements to be executed if signal_handler is an identifier of the signal] else: if signal_handler in available_signals: out, header = load(SIGNAL_PATH + signal_handler + FILE_EXTENSION, get_header=True, signal_sample=True) else: raise RuntimeError("The signal name defined as input does not correspond to any of the " "signal samples contained in the package.") if get_header is True: return out, header else: return out
[ "def", "load_signal", "(", "signal_handler", ",", "get_header", "=", "False", ")", ":", "available_signals", "=", "[", "\"ecg_4000_Hz\"", ",", "\"ecg_5_min\"", ",", "\"ecg_sample\"", ",", "\"ecg_20_sec_10_Hz\"", ",", "\"ecg_20_sec_100_Hz\"", ",", "\"ecg_20_sec_1000_Hz\"", ",", "\"emg_bursts\"", ",", "\"emg_fatigue\"", ",", "\"temp_res_8_16\"", ",", "\"bvp_sample\"", "]", "# Check if signal_handler is a url.", "# [Statements to be executed if signal_handler is a url]", "if", "any", "(", "mark", "in", "signal_handler", "for", "mark", "in", "[", "\"http://\"", ",", "\"https://\"", ",", "\"www.\"", ",", "\".pt\"", ",", "\".com\"", ",", "\".org\"", ",", "\".net\"", "]", ")", ":", "# Check if it is a Google Drive sharable link.", "if", "\"drive.google\"", "in", "signal_handler", ":", "signal_handler", "=", "_generate_download_google_link", "(", "signal_handler", ")", "# Load file.", "out", ",", "header", "=", "load", "(", "signal_handler", ",", "remote", "=", "True", ",", "get_header", "=", "True", ",", "signal_sample", "=", "True", ")", "# [Statements to be executed if signal_handler is an identifier of the signal]", "else", ":", "if", "signal_handler", "in", "available_signals", ":", "out", ",", "header", "=", "load", "(", "SIGNAL_PATH", "+", "signal_handler", "+", "FILE_EXTENSION", ",", "get_header", "=", "True", ",", "signal_sample", "=", "True", ")", "else", ":", "raise", "RuntimeError", "(", "\"The signal name defined as input does not correspond to any of the \"", "\"signal samples contained in the package.\"", ")", "if", "get_header", "is", "True", ":", "return", "out", ",", "header", "else", ":", "return", "out" ]
36.757576
0.002408
def get_field_kwargs(field_name, model_field): """ Creates a default instance of a basic non-relational field. """ kwargs = {} validator_kwarg = list(model_field.validators) # The following will only be used by ModelField classes. # Gets removed for everything else. kwargs['model_field'] = model_field if model_field.verbose_name and needs_label(model_field, field_name): kwargs['label'] = capfirst(model_field.verbose_name) if model_field.help_text: kwargs['help_text'] = model_field.help_text max_digits = getattr(model_field, 'max_digits', None) if max_digits is not None: kwargs['max_digits'] = max_digits decimal_places = getattr(model_field, 'decimal_places', None) if decimal_places is not None: kwargs['decimal_places'] = decimal_places if isinstance(model_field, models.TextField): kwargs['style'] = {'base_template': 'textarea.html'} if isinstance(model_field, models.AutoField) or not model_field.editable: # If this field is read-only, then return early. # Further keyword arguments are not valid. kwargs['read_only'] = True return kwargs if model_field.has_default() or model_field.blank or model_field.null: kwargs['required'] = False if model_field.null and not isinstance(model_field, models.NullBooleanField): kwargs['allow_null'] = True if model_field.blank and (isinstance(model_field, models.CharField) or isinstance(model_field, models.TextField)): kwargs['allow_blank'] = True if isinstance(model_field, models.FilePathField): kwargs['path'] = model_field.path if model_field.match is not None: kwargs['match'] = model_field.match if model_field.recursive is not False: kwargs['recursive'] = model_field.recursive if model_field.allow_files is not True: kwargs['allow_files'] = model_field.allow_files if model_field.allow_folders is not False: kwargs['allow_folders'] = model_field.allow_folders if model_field.choices: # If this model field contains choices, then return early. # Further keyword arguments are not valid. kwargs['choices'] = model_field.choices return kwargs # Our decimal validation is handled in the field code, not validator code. # (In Django 1.9+ this differs from previous style) if isinstance(model_field, models.DecimalField): validator_kwarg = [ validator for validator in validator_kwarg if DecimalValidator and not isinstance(validator, DecimalValidator) ] # Ensure that max_length is passed explicitly as a keyword arg, # rather than as a validator. max_length = getattr(model_field, 'max_length', None) if max_length is not None and (isinstance(model_field, models.CharField) or isinstance(model_field, models.TextField)): kwargs['max_length'] = max_length validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MaxLengthValidator) ] # Ensure that min_length is passed explicitly as a keyword arg, # rather than as a validator. min_length = next(( validator.limit_value for validator in validator_kwarg if isinstance(validator, validators.MinLengthValidator) ), None) if min_length is not None and isinstance(model_field, models.CharField): kwargs['min_length'] = min_length validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MinLengthValidator) ] # Ensure that max_value is passed explicitly as a keyword arg, # rather than as a validator. max_value = next(( validator.limit_value for validator in validator_kwarg if isinstance(validator, validators.MaxValueValidator) ), None) if max_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES): kwargs['max_value'] = max_value validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MaxValueValidator) ] # Ensure that max_value is passed explicitly as a keyword arg, # rather than as a validator. min_value = next(( validator.limit_value for validator in validator_kwarg if isinstance(validator, validators.MinValueValidator) ), None) if min_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES): kwargs['min_value'] = min_value validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MinValueValidator) ] # URLField does not need to include the URLValidator argument, # as it is explicitly added in. if isinstance(model_field, models.URLField): validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.URLValidator) ] # EmailField does not need to include the validate_email argument, # as it is explicitly added in. if isinstance(model_field, models.EmailField): validator_kwarg = [ validator for validator in validator_kwarg if validator is not validators.validate_email ] # SlugField do not need to include the 'validate_slug' argument, if isinstance(model_field, models.SlugField): validator_kwarg = [ validator for validator in validator_kwarg if validator is not validators.validate_slug ] # IPAddressField do not need to include the 'validate_ipv46_address' argument, if isinstance(model_field, models.GenericIPAddressField): validator_kwarg = [ validator for validator in validator_kwarg if validator is not validators.validate_ipv46_address ] if getattr(model_field, 'unique', False): unique_error_message = model_field.error_messages.get('unique', None) if unique_error_message: unique_error_message = unique_error_message % { 'model_name': model_field.model._meta.object_name, 'field_label': model_field.verbose_name } validator = UniqueValidator( queryset=model_field.model._default_manager, message=unique_error_message) validator_kwarg.append(validator) if validator_kwarg: kwargs['validators'] = validator_kwarg return kwargs
[ "def", "get_field_kwargs", "(", "field_name", ",", "model_field", ")", ":", "kwargs", "=", "{", "}", "validator_kwarg", "=", "list", "(", "model_field", ".", "validators", ")", "# The following will only be used by ModelField classes.", "# Gets removed for everything else.", "kwargs", "[", "'model_field'", "]", "=", "model_field", "if", "model_field", ".", "verbose_name", "and", "needs_label", "(", "model_field", ",", "field_name", ")", ":", "kwargs", "[", "'label'", "]", "=", "capfirst", "(", "model_field", ".", "verbose_name", ")", "if", "model_field", ".", "help_text", ":", "kwargs", "[", "'help_text'", "]", "=", "model_field", ".", "help_text", "max_digits", "=", "getattr", "(", "model_field", ",", "'max_digits'", ",", "None", ")", "if", "max_digits", "is", "not", "None", ":", "kwargs", "[", "'max_digits'", "]", "=", "max_digits", "decimal_places", "=", "getattr", "(", "model_field", ",", "'decimal_places'", ",", "None", ")", "if", "decimal_places", "is", "not", "None", ":", "kwargs", "[", "'decimal_places'", "]", "=", "decimal_places", "if", "isinstance", "(", "model_field", ",", "models", ".", "TextField", ")", ":", "kwargs", "[", "'style'", "]", "=", "{", "'base_template'", ":", "'textarea.html'", "}", "if", "isinstance", "(", "model_field", ",", "models", ".", "AutoField", ")", "or", "not", "model_field", ".", "editable", ":", "# If this field is read-only, then return early.", "# Further keyword arguments are not valid.", "kwargs", "[", "'read_only'", "]", "=", "True", "return", "kwargs", "if", "model_field", ".", "has_default", "(", ")", "or", "model_field", ".", "blank", "or", "model_field", ".", "null", ":", "kwargs", "[", "'required'", "]", "=", "False", "if", "model_field", ".", "null", "and", "not", "isinstance", "(", "model_field", ",", "models", ".", "NullBooleanField", ")", ":", "kwargs", "[", "'allow_null'", "]", "=", "True", "if", "model_field", ".", "blank", "and", "(", "isinstance", "(", "model_field", ",", "models", ".", "CharField", ")", "or", "isinstance", "(", "model_field", ",", "models", ".", "TextField", ")", ")", ":", "kwargs", "[", "'allow_blank'", "]", "=", "True", "if", "isinstance", "(", "model_field", ",", "models", ".", "FilePathField", ")", ":", "kwargs", "[", "'path'", "]", "=", "model_field", ".", "path", "if", "model_field", ".", "match", "is", "not", "None", ":", "kwargs", "[", "'match'", "]", "=", "model_field", ".", "match", "if", "model_field", ".", "recursive", "is", "not", "False", ":", "kwargs", "[", "'recursive'", "]", "=", "model_field", ".", "recursive", "if", "model_field", ".", "allow_files", "is", "not", "True", ":", "kwargs", "[", "'allow_files'", "]", "=", "model_field", ".", "allow_files", "if", "model_field", ".", "allow_folders", "is", "not", "False", ":", "kwargs", "[", "'allow_folders'", "]", "=", "model_field", ".", "allow_folders", "if", "model_field", ".", "choices", ":", "# If this model field contains choices, then return early.", "# Further keyword arguments are not valid.", "kwargs", "[", "'choices'", "]", "=", "model_field", ".", "choices", "return", "kwargs", "# Our decimal validation is handled in the field code, not validator code.", "# (In Django 1.9+ this differs from previous style)", "if", "isinstance", "(", "model_field", ",", "models", ".", "DecimalField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "DecimalValidator", "and", "not", "isinstance", "(", "validator", ",", "DecimalValidator", ")", "]", "# Ensure that max_length is passed explicitly as a keyword arg,", "# rather than as a validator.", "max_length", "=", "getattr", "(", "model_field", ",", "'max_length'", ",", "None", ")", "if", "max_length", "is", "not", "None", "and", "(", "isinstance", "(", "model_field", ",", "models", ".", "CharField", ")", "or", "isinstance", "(", "model_field", ",", "models", ".", "TextField", ")", ")", ":", "kwargs", "[", "'max_length'", "]", "=", "max_length", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MaxLengthValidator", ")", "]", "# Ensure that min_length is passed explicitly as a keyword arg,", "# rather than as a validator.", "min_length", "=", "next", "(", "(", "validator", ".", "limit_value", "for", "validator", "in", "validator_kwarg", "if", "isinstance", "(", "validator", ",", "validators", ".", "MinLengthValidator", ")", ")", ",", "None", ")", "if", "min_length", "is", "not", "None", "and", "isinstance", "(", "model_field", ",", "models", ".", "CharField", ")", ":", "kwargs", "[", "'min_length'", "]", "=", "min_length", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MinLengthValidator", ")", "]", "# Ensure that max_value is passed explicitly as a keyword arg,", "# rather than as a validator.", "max_value", "=", "next", "(", "(", "validator", ".", "limit_value", "for", "validator", "in", "validator_kwarg", "if", "isinstance", "(", "validator", ",", "validators", ".", "MaxValueValidator", ")", ")", ",", "None", ")", "if", "max_value", "is", "not", "None", "and", "isinstance", "(", "model_field", ",", "NUMERIC_FIELD_TYPES", ")", ":", "kwargs", "[", "'max_value'", "]", "=", "max_value", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MaxValueValidator", ")", "]", "# Ensure that max_value is passed explicitly as a keyword arg,", "# rather than as a validator.", "min_value", "=", "next", "(", "(", "validator", ".", "limit_value", "for", "validator", "in", "validator_kwarg", "if", "isinstance", "(", "validator", ",", "validators", ".", "MinValueValidator", ")", ")", ",", "None", ")", "if", "min_value", "is", "not", "None", "and", "isinstance", "(", "model_field", ",", "NUMERIC_FIELD_TYPES", ")", ":", "kwargs", "[", "'min_value'", "]", "=", "min_value", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MinValueValidator", ")", "]", "# URLField does not need to include the URLValidator argument,", "# as it is explicitly added in.", "if", "isinstance", "(", "model_field", ",", "models", ".", "URLField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "URLValidator", ")", "]", "# EmailField does not need to include the validate_email argument,", "# as it is explicitly added in.", "if", "isinstance", "(", "model_field", ",", "models", ".", "EmailField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "validator", "is", "not", "validators", ".", "validate_email", "]", "# SlugField do not need to include the 'validate_slug' argument,", "if", "isinstance", "(", "model_field", ",", "models", ".", "SlugField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "validator", "is", "not", "validators", ".", "validate_slug", "]", "# IPAddressField do not need to include the 'validate_ipv46_address' argument,", "if", "isinstance", "(", "model_field", ",", "models", ".", "GenericIPAddressField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "validator", "is", "not", "validators", ".", "validate_ipv46_address", "]", "if", "getattr", "(", "model_field", ",", "'unique'", ",", "False", ")", ":", "unique_error_message", "=", "model_field", ".", "error_messages", ".", "get", "(", "'unique'", ",", "None", ")", "if", "unique_error_message", ":", "unique_error_message", "=", "unique_error_message", "%", "{", "'model_name'", ":", "model_field", ".", "model", ".", "_meta", ".", "object_name", ",", "'field_label'", ":", "model_field", ".", "verbose_name", "}", "validator", "=", "UniqueValidator", "(", "queryset", "=", "model_field", ".", "model", ".", "_default_manager", ",", "message", "=", "unique_error_message", ")", "validator_kwarg", ".", "append", "(", "validator", ")", "if", "validator_kwarg", ":", "kwargs", "[", "'validators'", "]", "=", "validator_kwarg", "return", "kwargs" ]
38.650888
0.000448
def add_weights(self, name, nin, nout, mean=0, std=0, sparsity=0, diagonal=0): '''Helper method to create a new weight matrix. Parameters ---------- name : str Name of the parameter to add. nin : int Size of "input" for this weight matrix. nout : int Size of "output" for this weight matrix. mean : float, optional Mean value for randomly-initialized weights. Defaults to 0. std : float, optional Standard deviation of initial matrix values. Defaults to :math:`1 / sqrt(n_i + n_o)`. sparsity : float, optional Fraction of weights to be set to zero. Defaults to 0. diagonal : float, optional Initialize weights to a matrix of zeros with this value along the diagonal. Defaults to None, which initializes all weights randomly. ''' glorot = 1 / np.sqrt(nin + nout) m = self.kwargs.get( 'mean_{}'.format(name), self.kwargs.get('mean', mean)) s = self.kwargs.get( 'std_{}'.format(name), self.kwargs.get('std', std or glorot)) p = self.kwargs.get( 'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity)) d = self.kwargs.get( 'diagonal_{}'.format(name), self.kwargs.get('diagonal', diagonal)) self._params.append(theano.shared( util.random_matrix(nin, nout, mean=m, std=s, sparsity=p, diagonal=d, rng=self.rng), name=self._fmt(name)))
[ "def", "add_weights", "(", "self", ",", "name", ",", "nin", ",", "nout", ",", "mean", "=", "0", ",", "std", "=", "0", ",", "sparsity", "=", "0", ",", "diagonal", "=", "0", ")", ":", "glorot", "=", "1", "/", "np", ".", "sqrt", "(", "nin", "+", "nout", ")", "m", "=", "self", ".", "kwargs", ".", "get", "(", "'mean_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'mean'", ",", "mean", ")", ")", "s", "=", "self", ".", "kwargs", ".", "get", "(", "'std_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'std'", ",", "std", "or", "glorot", ")", ")", "p", "=", "self", ".", "kwargs", ".", "get", "(", "'sparsity_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'sparsity'", ",", "sparsity", ")", ")", "d", "=", "self", ".", "kwargs", ".", "get", "(", "'diagonal_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'diagonal'", ",", "diagonal", ")", ")", "self", ".", "_params", ".", "append", "(", "theano", ".", "shared", "(", "util", ".", "random_matrix", "(", "nin", ",", "nout", ",", "mean", "=", "m", ",", "std", "=", "s", ",", "sparsity", "=", "p", ",", "diagonal", "=", "d", ",", "rng", "=", "self", ".", "rng", ")", ",", "name", "=", "self", ".", "_fmt", "(", "name", ")", ")", ")" ]
44.342857
0.001261
def _add_defaults_python(self): """getting python files""" if self.distribution.has_pure_modules(): build_py = self.get_finalized_command('build_py') self.filelist.extend(build_py.get_source_files()) # This functionality is incompatible with include_package_data, and # will in fact create an infinite recursion if include_package_data # is True. Use of include_package_data will imply that # distutils-style automatic handling of package_data is disabled if not self.distribution.include_package_data: for _, src_dir, _, filenames in build_py.data_files: self.filelist.extend([os.path.join(src_dir, filename) for filename in filenames])
[ "def", "_add_defaults_python", "(", "self", ")", ":", "if", "self", ".", "distribution", ".", "has_pure_modules", "(", ")", ":", "build_py", "=", "self", ".", "get_finalized_command", "(", "'build_py'", ")", "self", ".", "filelist", ".", "extend", "(", "build_py", ".", "get_source_files", "(", ")", ")", "# This functionality is incompatible with include_package_data, and", "# will in fact create an infinite recursion if include_package_data", "# is True. Use of include_package_data will imply that", "# distutils-style automatic handling of package_data is disabled", "if", "not", "self", ".", "distribution", ".", "include_package_data", ":", "for", "_", ",", "src_dir", ",", "_", ",", "filenames", "in", "build_py", ".", "data_files", ":", "self", ".", "filelist", ".", "extend", "(", "[", "os", ".", "path", ".", "join", "(", "src_dir", ",", "filename", ")", "for", "filename", "in", "filenames", "]", ")" ]
61.846154
0.002451
def grab_literal(template, l_del): """Parse a literal from the template""" global _CURRENT_LINE try: # Look for the next tag and move the template to it literal, template = template.split(l_del, 1) _CURRENT_LINE += literal.count('\n') return (literal, template) # There are no more tags in the template? except ValueError: # Then the rest of the template is a literal return (template, '')
[ "def", "grab_literal", "(", "template", ",", "l_del", ")", ":", "global", "_CURRENT_LINE", "try", ":", "# Look for the next tag and move the template to it", "literal", ",", "template", "=", "template", ".", "split", "(", "l_del", ",", "1", ")", "_CURRENT_LINE", "+=", "literal", ".", "count", "(", "'\\n'", ")", "return", "(", "literal", ",", "template", ")", "# There are no more tags in the template?", "except", "ValueError", ":", "# Then the rest of the template is a literal", "return", "(", "template", ",", "''", ")" ]
29.733333
0.002174
def is_line_layer(layer): """Check if a QGIS layer is vector and its geometries are lines. :param layer: A vector layer. :type layer: QgsVectorLayer, QgsMapLayer :returns: True if the layer contains lines, otherwise False. :rtype: bool """ try: return (layer.type() == QgsMapLayer.VectorLayer) and ( layer.geometryType() == QgsWkbTypes.LineGeometry) except AttributeError: return False
[ "def", "is_line_layer", "(", "layer", ")", ":", "try", ":", "return", "(", "layer", ".", "type", "(", ")", "==", "QgsMapLayer", ".", "VectorLayer", ")", "and", "(", "layer", ".", "geometryType", "(", ")", "==", "QgsWkbTypes", ".", "LineGeometry", ")", "except", "AttributeError", ":", "return", "False" ]
28.933333
0.002232
def close_first_file(self): """ Attemtps to close the first **Script_Editor_tabWidget** Widget tab Model editor file. :return: Method success. :rtype: bool """ editor = self.get_current_editor() if len(self.__model.list_editors()) == 1 and editor.is_untitled and not editor.is_modified(): self.close_file(leave_first_editor=False) return True
[ "def", "close_first_file", "(", "self", ")", ":", "editor", "=", "self", ".", "get_current_editor", "(", ")", "if", "len", "(", "self", ".", "__model", ".", "list_editors", "(", ")", ")", "==", "1", "and", "editor", ".", "is_untitled", "and", "not", "editor", ".", "is_modified", "(", ")", ":", "self", ".", "close_file", "(", "leave_first_editor", "=", "False", ")", "return", "True" ]
34.416667
0.009434
def input_schema_clean(input_, input_schema): """ Updates schema default values with input data. :param input_: Input data :type input_: dict :param input_schema: Input schema :type input_schema: dict :returns: Nested dict with data (defaul values updated with input data) :rtype: dict """ if input_schema.get('type') == 'object': try: defaults = get_object_defaults(input_schema) except NoObjectDefaults: pass else: return deep_update(defaults, input_) return input_
[ "def", "input_schema_clean", "(", "input_", ",", "input_schema", ")", ":", "if", "input_schema", ".", "get", "(", "'type'", ")", "==", "'object'", ":", "try", ":", "defaults", "=", "get_object_defaults", "(", "input_schema", ")", "except", "NoObjectDefaults", ":", "pass", "else", ":", "return", "deep_update", "(", "defaults", ",", "input_", ")", "return", "input_" ]
29.263158
0.001742
def sample(self, sample_indices=None, num_samples=1): """ returns samples according to the KDE Parameters ---------- sample_inices: list of ints Indices into the training data used as centers for the samples num_samples: int if samples_indices is None, this specifies how many samples are drawn. """ if sample_indices is None: sample_indices = np.random.choice(self.data.shape[0], size=num_samples) samples = self.data[sample_indices] samples = samples.squeeze() if self.num_values == 1: # handle cases where there is only one value! return(samples) probs = self.bw * np.ones(self.num_values)/(self.num_values-1) probs[0] = 1-self.bw delta = np.random.choice(self.num_values, size=num_samples, p = probs) samples = np.mod(samples + delta, self.num_values) return(samples)
[ "def", "sample", "(", "self", ",", "sample_indices", "=", "None", ",", "num_samples", "=", "1", ")", ":", "if", "sample_indices", "is", "None", ":", "sample_indices", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "size", "=", "num_samples", ")", "samples", "=", "self", ".", "data", "[", "sample_indices", "]", "samples", "=", "samples", ".", "squeeze", "(", ")", "if", "self", ".", "num_values", "==", "1", ":", "# handle cases where there is only one value!", "return", "(", "samples", ")", "probs", "=", "self", ".", "bw", "*", "np", ".", "ones", "(", "self", ".", "num_values", ")", "/", "(", "self", ".", "num_values", "-", "1", ")", "probs", "[", "0", "]", "=", "1", "-", "self", ".", "bw", "delta", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "num_values", ",", "size", "=", "num_samples", ",", "p", "=", "probs", ")", "samples", "=", "np", ".", "mod", "(", "samples", "+", "delta", ",", "self", ".", "num_values", ")", "return", "(", "samples", ")" ]
27.566667
0.044393
def obtain_hosting_device_credentials_from_config(): """Obtains credentials from config file and stores them in memory. To be called before hosting device templates defined in the config file are created. """ cred_dict = get_specific_config('cisco_hosting_device_credential') attr_info = { 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'user_name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'password': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'type': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}} credentials = {} for cred_uuid, kv_dict in cred_dict.items(): # ensure cred_uuid is properly formatted cred_uuid = uuidify(cred_uuid) verify_resource_dict(kv_dict, True, attr_info) credentials[cred_uuid] = kv_dict return credentials
[ "def", "obtain_hosting_device_credentials_from_config", "(", ")", ":", "cred_dict", "=", "get_specific_config", "(", "'cisco_hosting_device_credential'", ")", "attr_info", "=", "{", "'name'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'description'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'user_name'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'password'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'type'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", "}", "credentials", "=", "{", "}", "for", "cred_uuid", ",", "kv_dict", "in", "cred_dict", ".", "items", "(", ")", ":", "# ensure cred_uuid is properly formatted", "cred_uuid", "=", "uuidify", "(", "cred_uuid", ")", "verify_resource_dict", "(", "kv_dict", ",", "True", ",", "attr_info", ")", "credentials", "[", "cred_uuid", "]", "=", "kv_dict", "return", "credentials" ]
48.793103
0.000693
def picture( relationshiplist, picname, picdescription, pixelwidth=None, pixelheight=None, nochangeaspect=True, nochangearrowheads=True, imagefiledict=None): """ Take a relationshiplist, picture file name, and return a paragraph containing the image and an updated relationshiplist """ if imagefiledict is None: warn( 'Using picture() without imagefiledict parameter will be depreca' 'ted in the future.', PendingDeprecationWarning ) # http://openxmldeveloper.org/articles/462.aspx # Create an image. Size may be specified, otherwise it will based on the # pixel size of image. Return a paragraph containing the picture # Set relationship ID to that of the image or the first available one picid = '2' picpath = abspath(picname) if imagefiledict is not None: # Keep track of the image files in a separate dictionary so they don't # need to be copied into the template directory if picpath not in imagefiledict: picrelid = 'rId' + str(len(relationshiplist) + 1) imagefiledict[picpath] = picrelid relationshiplist.append([ 'http://schemas.openxmlformats.org/officeDocument/2006/relat' 'ionships/image', 'media/%s_%s' % (picrelid, basename(picpath)) ]) else: picrelid = imagefiledict[picpath] else: # Copy files into template directory for backwards compatibility # Images still accumulate in the template directory this way picrelid = 'rId' + str(len(relationshiplist) + 1) relationshiplist.append([ 'http://schemas.openxmlformats.org/officeDocument/2006/relations' 'hips/image', 'media/' + picname ]) media_dir = join(template_dir, 'word', 'media') if not os.path.isdir(media_dir): os.mkdir(media_dir) shutil.copyfile(picname, join(media_dir, picname)) image = Image.open(picpath) # Extract EXIF data, if available try: exif = image._getexif() exif = {} if exif is None else exif except: exif = {} imageExif = {} for tag, value in exif.items(): imageExif[TAGS.get(tag, tag)] = value imageOrientation = imageExif.get('Orientation', 1) imageAngle = { 1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270 }[imageOrientation] imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false' imageFlipV = 'true' if imageOrientation == 4 else 'false' # Check if the user has specified a size if not pixelwidth or not pixelheight: # If not, get info from the picture itself pixelwidth, pixelheight = image.size[0:2] # Swap width and height if necessary if imageOrientation in (5, 6, 7, 8): pixelwidth, pixelheight = pixelheight, pixelwidth # OpenXML measures on-screen objects in English Metric Units # 1cm = 36000 EMUs emuperpixel = 12700 width = str(pixelwidth * emuperpixel) height = str(pixelheight * emuperpixel) # There are 3 main elements inside a picture # 1. The Blipfill - specifies how the image fills the picture area # (stretch, tile, etc.) blipfill = makeelement('blipFill', nsprefix='pic') blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r', attributes={'embed': picrelid})) stretch = makeelement('stretch', nsprefix='a') stretch.append(makeelement('fillRect', nsprefix='a')) blipfill.append(makeelement('srcRect', nsprefix='a')) blipfill.append(stretch) # 2. The non visual picture properties nvpicpr = makeelement('nvPicPr', nsprefix='pic') cnvpr = makeelement( 'cNvPr', nsprefix='pic', attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription} ) nvpicpr.append(cnvpr) cnvpicpr = makeelement('cNvPicPr', nsprefix='pic') cnvpicpr.append(makeelement( 'picLocks', nsprefix='a', attributes={'noChangeAspect': str(int(nochangeaspect)), 'noChangeArrowheads': str(int(nochangearrowheads))})) nvpicpr.append(cnvpicpr) # 3. The Shape properties sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'}) xfrm = makeelement( 'xfrm', nsprefix='a', attributes={ 'rot': str(imageAngle * 60000), 'flipH': imageFlipH, 'flipV': imageFlipV } ) xfrm.append( makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'}) ) xfrm.append( makeelement( 'ext', nsprefix='a', attributes={'cx': width, 'cy': height} ) ) prstgeom = makeelement( 'prstGeom', nsprefix='a', attributes={'prst': 'rect'} ) prstgeom.append(makeelement('avLst', nsprefix='a')) sppr.append(xfrm) sppr.append(prstgeom) # Add our 3 parts to the picture element pic = makeelement('pic', nsprefix='pic') pic.append(nvpicpr) pic.append(blipfill) pic.append(sppr) # Now make the supporting elements # The following sequence is just: make element, then add its children graphicdata = makeelement( 'graphicData', nsprefix='a', attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200' '6/picture')}) graphicdata.append(pic) graphic = makeelement('graphic', nsprefix='a') graphic.append(graphicdata) framelocks = makeelement('graphicFrameLocks', nsprefix='a', attributes={'noChangeAspect': '1'}) framepr = makeelement('cNvGraphicFramePr', nsprefix='wp') framepr.append(framelocks) docpr = makeelement('docPr', nsprefix='wp', attributes={'id': picid, 'name': 'Picture 1', 'descr': picdescription}) effectextent = makeelement('effectExtent', nsprefix='wp', attributes={'l': '25400', 't': '0', 'r': '0', 'b': '0'}) extent = makeelement('extent', nsprefix='wp', attributes={'cx': width, 'cy': height}) inline = makeelement('inline', attributes={'distT': "0", 'distB': "0", 'distL': "0", 'distR': "0"}, nsprefix='wp') inline.append(extent) inline.append(effectextent) inline.append(docpr) inline.append(framepr) inline.append(graphic) drawing = makeelement('drawing') drawing.append(inline) run = makeelement('r') run.append(drawing) paragraph = makeelement('p') paragraph.append(run) if imagefiledict is not None: return relationshiplist, paragraph, imagefiledict else: return relationshiplist, paragraph
[ "def", "picture", "(", "relationshiplist", ",", "picname", ",", "picdescription", ",", "pixelwidth", "=", "None", ",", "pixelheight", "=", "None", ",", "nochangeaspect", "=", "True", ",", "nochangearrowheads", "=", "True", ",", "imagefiledict", "=", "None", ")", ":", "if", "imagefiledict", "is", "None", ":", "warn", "(", "'Using picture() without imagefiledict parameter will be depreca'", "'ted in the future.'", ",", "PendingDeprecationWarning", ")", "# http://openxmldeveloper.org/articles/462.aspx", "# Create an image. Size may be specified, otherwise it will based on the", "# pixel size of image. Return a paragraph containing the picture", "# Set relationship ID to that of the image or the first available one", "picid", "=", "'2'", "picpath", "=", "abspath", "(", "picname", ")", "if", "imagefiledict", "is", "not", "None", ":", "# Keep track of the image files in a separate dictionary so they don't", "# need to be copied into the template directory", "if", "picpath", "not", "in", "imagefiledict", ":", "picrelid", "=", "'rId'", "+", "str", "(", "len", "(", "relationshiplist", ")", "+", "1", ")", "imagefiledict", "[", "picpath", "]", "=", "picrelid", "relationshiplist", ".", "append", "(", "[", "'http://schemas.openxmlformats.org/officeDocument/2006/relat'", "'ionships/image'", ",", "'media/%s_%s'", "%", "(", "picrelid", ",", "basename", "(", "picpath", ")", ")", "]", ")", "else", ":", "picrelid", "=", "imagefiledict", "[", "picpath", "]", "else", ":", "# Copy files into template directory for backwards compatibility", "# Images still accumulate in the template directory this way", "picrelid", "=", "'rId'", "+", "str", "(", "len", "(", "relationshiplist", ")", "+", "1", ")", "relationshiplist", ".", "append", "(", "[", "'http://schemas.openxmlformats.org/officeDocument/2006/relations'", "'hips/image'", ",", "'media/'", "+", "picname", "]", ")", "media_dir", "=", "join", "(", "template_dir", ",", "'word'", ",", "'media'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "media_dir", ")", ":", "os", ".", "mkdir", "(", "media_dir", ")", "shutil", ".", "copyfile", "(", "picname", ",", "join", "(", "media_dir", ",", "picname", ")", ")", "image", "=", "Image", ".", "open", "(", "picpath", ")", "# Extract EXIF data, if available", "try", ":", "exif", "=", "image", ".", "_getexif", "(", ")", "exif", "=", "{", "}", "if", "exif", "is", "None", "else", "exif", "except", ":", "exif", "=", "{", "}", "imageExif", "=", "{", "}", "for", "tag", ",", "value", "in", "exif", ".", "items", "(", ")", ":", "imageExif", "[", "TAGS", ".", "get", "(", "tag", ",", "tag", ")", "]", "=", "value", "imageOrientation", "=", "imageExif", ".", "get", "(", "'Orientation'", ",", "1", ")", "imageAngle", "=", "{", "1", ":", "0", ",", "2", ":", "0", ",", "3", ":", "180", ",", "4", ":", "0", ",", "5", ":", "90", ",", "6", ":", "90", ",", "7", ":", "270", ",", "8", ":", "270", "}", "[", "imageOrientation", "]", "imageFlipH", "=", "'true'", "if", "imageOrientation", "in", "(", "2", ",", "5", ",", "7", ")", "else", "'false'", "imageFlipV", "=", "'true'", "if", "imageOrientation", "==", "4", "else", "'false'", "# Check if the user has specified a size", "if", "not", "pixelwidth", "or", "not", "pixelheight", ":", "# If not, get info from the picture itself", "pixelwidth", ",", "pixelheight", "=", "image", ".", "size", "[", "0", ":", "2", "]", "# Swap width and height if necessary", "if", "imageOrientation", "in", "(", "5", ",", "6", ",", "7", ",", "8", ")", ":", "pixelwidth", ",", "pixelheight", "=", "pixelheight", ",", "pixelwidth", "# OpenXML measures on-screen objects in English Metric Units", "# 1cm = 36000 EMUs", "emuperpixel", "=", "12700", "width", "=", "str", "(", "pixelwidth", "*", "emuperpixel", ")", "height", "=", "str", "(", "pixelheight", "*", "emuperpixel", ")", "# There are 3 main elements inside a picture", "# 1. The Blipfill - specifies how the image fills the picture area", "# (stretch, tile, etc.)", "blipfill", "=", "makeelement", "(", "'blipFill'", ",", "nsprefix", "=", "'pic'", ")", "blipfill", ".", "append", "(", "makeelement", "(", "'blip'", ",", "nsprefix", "=", "'a'", ",", "attrnsprefix", "=", "'r'", ",", "attributes", "=", "{", "'embed'", ":", "picrelid", "}", ")", ")", "stretch", "=", "makeelement", "(", "'stretch'", ",", "nsprefix", "=", "'a'", ")", "stretch", ".", "append", "(", "makeelement", "(", "'fillRect'", ",", "nsprefix", "=", "'a'", ")", ")", "blipfill", ".", "append", "(", "makeelement", "(", "'srcRect'", ",", "nsprefix", "=", "'a'", ")", ")", "blipfill", ".", "append", "(", "stretch", ")", "# 2. The non visual picture properties", "nvpicpr", "=", "makeelement", "(", "'nvPicPr'", ",", "nsprefix", "=", "'pic'", ")", "cnvpr", "=", "makeelement", "(", "'cNvPr'", ",", "nsprefix", "=", "'pic'", ",", "attributes", "=", "{", "'id'", ":", "'0'", ",", "'name'", ":", "'Picture 1'", ",", "'descr'", ":", "picdescription", "}", ")", "nvpicpr", ".", "append", "(", "cnvpr", ")", "cnvpicpr", "=", "makeelement", "(", "'cNvPicPr'", ",", "nsprefix", "=", "'pic'", ")", "cnvpicpr", ".", "append", "(", "makeelement", "(", "'picLocks'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'noChangeAspect'", ":", "str", "(", "int", "(", "nochangeaspect", ")", ")", ",", "'noChangeArrowheads'", ":", "str", "(", "int", "(", "nochangearrowheads", ")", ")", "}", ")", ")", "nvpicpr", ".", "append", "(", "cnvpicpr", ")", "# 3. The Shape properties", "sppr", "=", "makeelement", "(", "'spPr'", ",", "nsprefix", "=", "'pic'", ",", "attributes", "=", "{", "'bwMode'", ":", "'auto'", "}", ")", "xfrm", "=", "makeelement", "(", "'xfrm'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'rot'", ":", "str", "(", "imageAngle", "*", "60000", ")", ",", "'flipH'", ":", "imageFlipH", ",", "'flipV'", ":", "imageFlipV", "}", ")", "xfrm", ".", "append", "(", "makeelement", "(", "'off'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'x'", ":", "'0'", ",", "'y'", ":", "'0'", "}", ")", ")", "xfrm", ".", "append", "(", "makeelement", "(", "'ext'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'cx'", ":", "width", ",", "'cy'", ":", "height", "}", ")", ")", "prstgeom", "=", "makeelement", "(", "'prstGeom'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'prst'", ":", "'rect'", "}", ")", "prstgeom", ".", "append", "(", "makeelement", "(", "'avLst'", ",", "nsprefix", "=", "'a'", ")", ")", "sppr", ".", "append", "(", "xfrm", ")", "sppr", ".", "append", "(", "prstgeom", ")", "# Add our 3 parts to the picture element", "pic", "=", "makeelement", "(", "'pic'", ",", "nsprefix", "=", "'pic'", ")", "pic", ".", "append", "(", "nvpicpr", ")", "pic", ".", "append", "(", "blipfill", ")", "pic", ".", "append", "(", "sppr", ")", "# Now make the supporting elements", "# The following sequence is just: make element, then add its children", "graphicdata", "=", "makeelement", "(", "'graphicData'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'uri'", ":", "(", "'http://schemas.openxmlformats.org/drawingml/200'", "'6/picture'", ")", "}", ")", "graphicdata", ".", "append", "(", "pic", ")", "graphic", "=", "makeelement", "(", "'graphic'", ",", "nsprefix", "=", "'a'", ")", "graphic", ".", "append", "(", "graphicdata", ")", "framelocks", "=", "makeelement", "(", "'graphicFrameLocks'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'noChangeAspect'", ":", "'1'", "}", ")", "framepr", "=", "makeelement", "(", "'cNvGraphicFramePr'", ",", "nsprefix", "=", "'wp'", ")", "framepr", ".", "append", "(", "framelocks", ")", "docpr", "=", "makeelement", "(", "'docPr'", ",", "nsprefix", "=", "'wp'", ",", "attributes", "=", "{", "'id'", ":", "picid", ",", "'name'", ":", "'Picture 1'", ",", "'descr'", ":", "picdescription", "}", ")", "effectextent", "=", "makeelement", "(", "'effectExtent'", ",", "nsprefix", "=", "'wp'", ",", "attributes", "=", "{", "'l'", ":", "'25400'", ",", "'t'", ":", "'0'", ",", "'r'", ":", "'0'", ",", "'b'", ":", "'0'", "}", ")", "extent", "=", "makeelement", "(", "'extent'", ",", "nsprefix", "=", "'wp'", ",", "attributes", "=", "{", "'cx'", ":", "width", ",", "'cy'", ":", "height", "}", ")", "inline", "=", "makeelement", "(", "'inline'", ",", "attributes", "=", "{", "'distT'", ":", "\"0\"", ",", "'distB'", ":", "\"0\"", ",", "'distL'", ":", "\"0\"", ",", "'distR'", ":", "\"0\"", "}", ",", "nsprefix", "=", "'wp'", ")", "inline", ".", "append", "(", "extent", ")", "inline", ".", "append", "(", "effectextent", ")", "inline", ".", "append", "(", "docpr", ")", "inline", ".", "append", "(", "framepr", ")", "inline", ".", "append", "(", "graphic", ")", "drawing", "=", "makeelement", "(", "'drawing'", ")", "drawing", ".", "append", "(", "inline", ")", "run", "=", "makeelement", "(", "'r'", ")", "run", ".", "append", "(", "drawing", ")", "paragraph", "=", "makeelement", "(", "'p'", ")", "paragraph", ".", "append", "(", "run", ")", "if", "imagefiledict", "is", "not", "None", ":", "return", "relationshiplist", ",", "paragraph", ",", "imagefiledict", "else", ":", "return", "relationshiplist", ",", "paragraph" ]
36.861878
0.000292