Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
400
def get_moderation(request): with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute() moderations = [x[0] for x in cursor.fetchall()] return moderations
Return the list of publications that need moderation.
401
def _change_precision(self, val, base=0): if not isinstance(val, int): raise TypeError() val = round(abs(val)) val = (lambda num: base if is_num(num) else num)(val) return val
Check and normalise the value of precision (must be positive integer). Args: val (INT): must be positive integer base (INT): Description Returns: VAL (INT): Description
402
def _add_file(self, key, path): filename = os.path.basename(path) base, ext = os.path.splitext(filename) if os.path.exists(self.file_path(filename)): with tempfile.NamedTemporaryFile( dir=self.path, prefix=base, suffix=ext) as tf: filename = os.path.basename(tf.name) shutil.copyfile(path, self.file_path(filename)) self.contents[][key] = filename
Copy a file into the reference package.
403
def Open(self): self.h_process = kernel32.OpenProcess( PROCESS_VM_READ | PROCESS_QUERY_INFORMATION, 0, self.pid) if not self.h_process: raise process_error.ProcessError( "Failed to open process (pid %d)." % self.pid) if self.Is64bit(): si = self.GetNativeSystemInfo() self.max_addr = si.lpMaximumApplicationAddress else: si = self.GetSystemInfo() self.max_addr = 2147418111 self.min_addr = si.lpMinimumApplicationAddress
Opens the process for reading.
404
def _parse_fields_http(self, *args, **kwargs): from warnings import warn warn( ) return self.parse_fields_http(*args, **kwargs)
Deprecated. This will be removed in a future release.
405
def enable_pretty_logging(options: Any = None, logger: logging.Logger = None) -> None: if options is None: import tornado.options options = tornado.options.options if options.logging is None or options.logging.lower() == "none": return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == "size": channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups, encoding="utf-8", ) elif rotate_mode == "time": channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups, encoding="utf-8", ) else: error_message = ( "The value of log_rotate_mode option should be " + % rotate_mode ) raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers): channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`.
406
def interleave(*arrays,**kwargs): abcd@ anum = arrays.__len__() rslt = [] length = arrays[0].__len__() for j in range(0,length): for i in range(0,anum): array = arrays[i] rslt.append(array[j]) return(rslt)
arr1 = [1,2,3,4] arr2 = ['a','b','c','d'] arr3 = ['@','#','%','*'] interleave(arr1,arr2,arr3)
407
def add_or_update_records(cls, tables: I2B2Tables, records: List["ObservationFact"]) -> Tuple[int, int]: return cls._add_or_update_records(tables.crc_connection, tables.observation_fact, records)
Add or update the observation_fact table as needed to reflect the contents of records :param tables: i2b2 sql connection :param records: records to apply :return: number of records added / modified
408
def get(self, remote_file, local_file): sftp = self.get_sftp() try: sftp.get(remote_file, local_file) except Exception as e: logger.error() logger.error( % (remote_file, local_file)) logger.error(e)
下载文件 :param remote_file: :param local_file: :return:
409
def com_google_fonts_check_family_equal_font_versions(ttFonts): all_detected_versions = [] fontfile_versions = {} for ttFont in ttFonts: v = ttFont[].fontRevision fontfile_versions[ttFont] = v if v not in all_detected_versions: all_detected_versions.append(v) if len(all_detected_versions) != 1: versions_list = "" for v in fontfile_versions.keys(): versions_list += "* {}: {}\n".format(v.reader.file.name, fontfile_versions[v]) yield WARN, ("version info differs among font" " files of the same font project.\n" "These were the version values found:\n" "{}").format(versions_list) else: yield PASS, "All font files have the same version."
Make sure all font files have the same version value.
410
def grow(self, *args): if len(args) == 1: return Region.grow(self.x, self.y, args[0], args[0]) elif len(args) == 2: return Region(self.x, self.y, args[0], args[1]) elif len(args) == 4: return Region.create(self, *args) else: raise ValueError("Unrecognized arguments for grow")
Creates a region around the given point Valid arguments: * ``grow(wh)`` - Creates a region centered on this point with a width and height of ``wh``. * ``grow(w, h)`` - Creates a region centered on this point with a width of ``w`` and height of ``h``. * ``grow(Region.CREATE_X_DIRECTION, Region.CREATE_Y_DIRECTION, w, h)`` - Creates a region with this point as one corner, expanding in the specified direction
411
def locate(pattern, root=os.curdir): for path, dummy, files in os.walk(os.path.abspath(root)): for filename in fnmatch.filter(files, pattern): yield os.path.join(path, filename)
Locate all files matching supplied filename pattern recursively.
412
def _create_checkable_action(self, text, conf_name, editorstack_method): def toogle(checked): self.switch_to_plugin() self._toggle_checkable_action(checked, editorstack_method, conf_name) action = create_action(self, text, toggled=toogle) action.setChecked(CONF.get(, conf_name)) return action
Helper function to create a checkable action. Args: text (str): Text to be displayed in the action. conf_name (str): configuration setting associated with the action editorstack_method (str): name of EditorStack class that will be used to update the changes in each editorstack.
413
def start(self): cert_path = os.path.join(self.work_dir, ) public_keys_dir = os.path.join(cert_path, ) private_keys_dir = os.path.join(cert_path, ) client_secret_file = os.path.join(private_keys_dir, "client.key") client_public, client_secret = zmq.auth.load_certificate(client_secret_file) server_public_file = os.path.join(public_keys_dir, "server.key") server_public, _ = zmq.auth.load_certificate(server_public_file) self.outgoing_msg_greenlet = gevent.spawn(self.outgoing_server_comms, server_public, client_public, client_secret) self.outgoing_msg_greenlet.link_exception(self.on_exception) self.incoming_msg_greenlet = gevent.spawn(self.incoming_server_comms, server_public, client_public, client_secret) self.incoming_msg_greenlet.link_exception(self.on_exception) logger.info() gevent.joinall([self.outgoing_msg_greenlet])
Starts services.
414
def cast_pars_dict(pars_dict): o = {} for pname, pdict in pars_dict.items(): o[pname] = {} for k, v in pdict.items(): if k == : o[pname][k] = bool(int(v)) elif k == : o[pname][k] = v else: o[pname][k] = float(v) return o
Cast the bool and float elements of a parameters dict to the appropriate python types.
415
def parse_na(txt: str) -> (MetarData, Units): units = Units(**NA_UNITS) clean = core.sanitize_report_string(txt) wxresp = {: txt, : clean} wxdata, wxresp[] = core.get_remarks(clean) wxdata, wxresp[], _ = core.sanitize_report_list(wxdata) wxdata, wxresp[], wxresp[] = core.get_station_and_time(wxdata) wxdata, wxresp[] = core.get_clouds(wxdata) wxdata, wxresp[], wxresp[], \ wxresp[], wxresp[] = core.get_wind(wxdata, units) wxdata, wxresp[] = core.get_altimeter(wxdata, units, ) wxdata, wxresp[] = core.get_visibility(wxdata, units) wxresp[], wxresp[], wxresp[] = core.get_temp_and_dew(wxdata) condition = core.get_flight_rules(wxresp[], core.get_ceiling(wxresp[])) wxresp[] = FLIGHT_RULES[condition] wxresp[] = remarks.parse(wxresp[]) wxresp[] = core.make_timestamp(wxresp[]) return MetarData(**wxresp), units
Parser for the North American METAR variant
416
def multiply_slow(x, y, prim=0x11b): def cl_mult(x,y): z = 0 i = 0 while (y>>i) > 0: if y & (1<<i): z ^= x<<i i += 1 return z def bit_length(n): bits = 0 while n >> bits: bits += 1 return bits def cl_div(dividend, divisor=None): dl1 = bit_length(dividend) dl2 = bit_length(divisor) if dl1 < dl2: return dividend for i in _range(dl1-dl2,-1,-1): if dividend & (1 << i+dl2-1): dividend ^= divisor << i return dividend result = cl_mult(x,y) if prim > 0: result = cl_div(result, prim) return result
Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table. This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.
417
def start_receive(self, fd, data=None): self._rfds[fd] = (data or fd, self._generation) self._update(fd)
Cause :meth:`poll` to yield `data` when `fd` is readable.
418
def measure_old_norse_syllable(syllable: list) -> Union[Length, None]: index = 0 while index < len(syllable) and not isinstance(syllable[index], Vowel): index += 1 if index == len(syllable): return None else: long_vowel_number = 0 short_vowel_number = 0 geminated_consonant_number = 0 simple_consonant_number = 0 for c in syllable[index:]: if isinstance(c, Vowel): if c.length == Length.long: long_vowel_number += 1 elif c.length == Length.short: short_vowel_number += 1 elif isinstance(c, Consonant): if c.geminate: geminated_consonant_number += 1 else: simple_consonant_number += 1 if long_vowel_number == 0 and short_vowel_number == 1 and simple_consonant_number <= 1 and\ geminated_consonant_number == 0: return Length.short elif (short_vowel_number == 1 and (simple_consonant_number > 1 or geminated_consonant_number > 0)) or \ long_vowel_number > 0 and simple_consonant_number <= 1 and geminated_consonant_number == 0: return Length.long elif long_vowel_number > 0 and (simple_consonant_number > 1 or geminated_consonant_number > 0): return Length.overlong
Old Norse syllables are considered as: - short if - long if - overlong if >>> measure_old_norse_syllable([m, a.lengthen(), l]).name 'long' >>> measure_old_norse_syllable([a, l]).name 'short' >>> measure_old_norse_syllable([s, t, ee, r, k, r]).name 'long' >>> measure_old_norse_syllable([m, o.lengthen()]).name 'long' :param syllable: list of Vowel and Consonant instances :return: instance of Length (short, long or overlong)
419
def _create_hidden_port(self, context, network_id, device_id, fixed_ips, port_type=DEVICE_OWNER_ROUTER_INTF): port = {: { : , : network_id, : ATTR_NOT_SPECIFIED, : fixed_ips, : device_id, : port_type, : True, : }} if extensions.is_extension_supported(self._core_plugin, "dns-integration"): port[].update(dns_name=) core_plugin = bc.get_plugin() return core_plugin.create_port(context, port)
Creates port used specially for HA purposes.
420
def polygen(*coefficients): if not coefficients: return lambda i: 0 else: c0 = coefficients[0] coefficients = coefficients[1:] def _(i): v = c0 for c in coefficients: v += c*i i *= i return v return _
Polynomial generating function
421
def stopping_function(results, args=None, rstate=None, M=None, return_vals=False): if args is None: args = dict({}) if rstate is None: rstate = np.random if M is None: M = map pfrac = args.get(, 1.0) if not 0. <= pfrac <= 1.: raise ValueError("The provided `pfrac` {0} is not between 0. and 1." .format(pfrac)) evid_thresh = args.get(, 0.1) if pfrac < 1. and evid_thresh < 0.: raise ValueError("The provided `evid_thresh` {0} is not non-negative " "even though `1. - pfrac` is {1}." .format(evid_thresh, 1. - pfrac)) post_thresh = args.get(, 0.02) if pfrac > 0. and post_thresh < 0.: raise ValueError("The provided `post_thresh` {0} is not non-negative " "even though `pfrac` is {1}." .format(post_thresh, pfrac)) n_mc = args.get(, 128) if n_mc <= 1: raise ValueError("The number of realizations {0} must be greater " "than 1.".format(n_mc)) elif n_mc < 20: warnings.warn("Using a small number of realizations might result in " "excessively noisy stopping value estimates.") error = args.get(, ) if error not in {, , }: raise ValueError("The chosen `` option {0} is not valid." .format(error)) if error == : error = boost = 2. else: boost = 1. approx = args.get(, True) rlist = [results for i in range(n_mc)] error_list = [error for i in range(n_mc)] approx_list = [approx for i in range(n_mc)] args = zip(rlist, error_list, approx_list) outputs = list(M(_kld_error, args)) kld_arr, lnz_arr = np.array([(kld[-1], res.logz[-1]) for kld, res in outputs]).T lnz_std = np.std(lnz_arr) stop_evid = np.sqrt(boost) * lnz_std / evid_thresh kld_mean, kld_std = np.mean(kld_arr), np.std(kld_arr) stop_post = boost * (kld_std / kld_mean) / post_thresh stop = pfrac * stop_post + (1. - pfrac) * stop_evid if return_vals: return stop <= 1., (stop_post, stop_evid, stop) else: return stop <= 1.
The default stopping function utilized by :class:`DynamicSampler`. Zipped parameters are passed to the function via :data:`args`. Assigns the run a stopping value based on a weighted average of the stopping values for the posterior and evidence:: stop = pfrac * stop_post + (1.- pfrac) * stop_evid The evidence stopping value is based on the estimated evidence error (i.e. standard deviation) relative to a given threshold:: stop_evid = evid_std / evid_thresh The posterior stopping value is based on the fractional error (i.e. standard deviation / mean) in the Kullback-Leibler (KL) divergence relative to a given threshold:: stop_post = (kld_std / kld_mean) / post_thresh Estimates of the mean and standard deviation are computed using `n_mc` realizations of the input using a provided `'error'` keyword (either `'jitter'` or `'simulate'`, which call related functions :meth:`jitter_run` and :meth:`simulate_run` in :mod:`dynesty.utils`, respectively, or `'sim_approx'`, which boosts `'jitter'` by a factor of two). Returns the boolean `stop <= 1`. If `True`, the :class:`DynamicSampler` will stop adding new samples to our results. Parameters ---------- results : :class:`Results` instance :class:`Results` instance. args : dictionary of keyword arguments, optional Arguments used to set the stopping values. Default values are `pfrac = 1.0`, `evid_thresh = 0.1`, `post_thresh = 0.02`, `n_mc = 128`, `error = 'sim_approx'`, and `approx = True`. rstate : `~numpy.random.RandomState`, optional `~numpy.random.RandomState` instance. M : `map` function, optional An alias to a `map`-like function. This allows users to pass functions from pools (e.g., `pool.map`) to compute realizations in parallel. By default the standard `map` function is used. return_vals : bool, optional Whether to return the stopping value (and its components). Default is `False`. Returns ------- stop_flag : bool Boolean flag indicating whether we have passed the desired stopping criteria. stop_vals : tuple of shape (3,), optional The individual stopping values `(stop_post, stop_evid, stop)` used to determine the stopping criteria.
422
def _rewind(self): DFReader._rewind(self) self.line = 0 while self.line < len(self.lines): if self.lines[self.line].startswith("FMT, "): break self.line += 1
rewind to start of log
423
def convertforoutput(self,outputfile): super(CharEncodingConverter,self).convertforoutput(outputfile) return withheaders( flask.make_response( ( line.encode(self.charset) for line in outputfile ) ) , + self.charset)
Convert from one of the source formats into target format. Relevant if converters are used in OutputTemplates. Outputfile is a CLAMOutputFile instance.
424
def set_port_profile_created(self, vlan_id, profile_name, device_id): with self.session.begin(subtransactions=True): port_profile = self.session.query( ucsm_model.PortProfile).filter_by( vlan_id=vlan_id, profile_id=profile_name, device_id=device_id).first() if port_profile: port_profile.created_on_ucs = True self.session.merge(port_profile) else: new_profile = ucsm_model.PortProfile(profile_id=profile_name, vlan_id=vlan_id, device_id=device_id, created_on_ucs=True) self.session.add(new_profile)
Sets created_on_ucs flag to True.
425
def get_learning_objective_ids_metadata(self): metadata = dict(self._learning_objective_ids_metadata) metadata.update({: self.my_osid_object_form._my_map[][0]}) return Metadata(**metadata)
get the metadata for learning objective
426
def remove_col_label(self, event=None, col=None): if event: col = event.GetCol() if not col: return label = self.grid.GetColLabelValue(col) if in label: label = label.strip() elif in label: label = label.strip() if label in self.reqd_headers: pw.simple_warning("That header is required, and cannot be removed") return False else: print(, label) self.grid.remove_col(col) if self.grid_type in self.contribution.tables: if label in self.contribution.tables[self.grid_type].df.columns: del self.contribution.tables[self.grid_type].df[label] self.main_sizer.Fit(self)
check to see if column is required if it is not, delete it from grid
427
def preorder(self): if not self: return yield self if self.left: for x in self.left.preorder(): yield x if self.right: for x in self.right.preorder(): yield x
iterator for nodes: root, left, right
428
def _get_system_volume(vm_): disk_size = get_size(vm_)[] if in vm_: disk_size = vm_[] volume = Volume( name=.format(vm_[]), size=disk_size, disk_type=get_disk_type(vm_) ) if in vm_: image_password = vm_[] volume.image_password = image_password ssh_keys = get_public_keys(vm_) volume.ssh_keys = ssh_keys if in vm_.keys(): volume.image_alias = vm_[] else: volume.image = get_image(vm_)[] if in vm_: volume.availability_zone = vm_[] return volume
Construct VM system volume list from cloud profile config
429
def TRUE(classical_reg): warn("`TRUE a` has been deprecated. Use `MOVE a 1` instead.") if isinstance(classical_reg, int): classical_reg = Addr(classical_reg) return MOVE(classical_reg, 1)
Produce a TRUE instruction. :param classical_reg: A classical register to modify. :return: An instruction object representing the equivalent MOVE.
430
def __get_strut_token(self): try: response = self.lc.session.get() soup = BeautifulSoup(response.text, "html5lib") strut_tag = None strut_token_name = soup.find(, {: }) if strut_token_name and strut_token_name[].strip(): form = soup.form for parent in strut_token_name.parents: if parent and parent.name == : form = parent break strut_token_name = strut_token_name[] strut_tag = soup.find(, {: strut_token_name}) if strut_tag and strut_tag[].strip(): return {: strut_token_name, : strut_tag[].strip()} self.__log(.format(response.text)) raise LendingClubError(, response) except Exception as e: self.__log(.format(str(e))) raise LendingClubError(.format(str(e)))
Move the staged loan notes to the order stage and get the struts token from the place order HTML. The order will not be placed until calling _confirm_order() Returns ------- dict A dict with the token name and value
431
def script(self, s): try: script = self._network.script.compile(s) script_info = self._network.contract.info_for_script(script) return Contract(script_info, self._network) except Exception: return None
Parse a script by compiling it. Return a :class:`Contract` or None.
432
def key_wait(): while 1: for event in get(): if event.type == : return event if event.type == : return KeyDown(, , True, False, True, False, False) _time.sleep(.001)
Waits until the user presses a key. Then returns a :any:`KeyDown` event. Key events will repeat if held down. A click to close the window will be converted into an Alt+F4 KeyDown event. Returns: tdl.event.KeyDown: The pressed key.
433
def capture(board): game = Game() v = (0, 0) stub_actor = base.Actor(, v, v, v, v, v, v, v, v, v) root = base.State(board, stub_actor, stub_actor, turn=1, actions_remaining=1) solution_node = None for eot in game.all_ends_of_turn(root): if eot.is_mana_drain: if eot.parent.board.is_empty(): solution_node = eot break solution_sequence = list() if solution_node: node = solution_node while node: if not isinstance(node, base.Swap): node = node.parent continue summary = base.Summary(node.parent.board, node.position_pair, None, None, None) solution_sequence.append(summary) node = node.parent return tuple(reversed(solution_sequence))
Try to solve the board described by board_string. Return sequence of summaries that describe how to get to the solution.
434
def _read_descriptions(self, password): descfiles = [FRITZ_IGD_DESC_FILE] if password: descfiles.append(FRITZ_TR64_DESC_FILE) for descfile in descfiles: parser = FritzDescParser(self.address, self.port, descfile) if not self.modelname: self.modelname = parser.get_modelname() services = parser.get_services() self._read_services(services)
Read and evaluate the igddesc.xml file and the tr64desc.xml file if a password is given.
435
def getlanguage(self, language=None, windowsversion=None): if not language: language = self.language if language in (None, "", "*", "neutral"): return (LANGUAGE_NEUTRAL_NT5, LANGUAGE_NEUTRAL_NT6)[(windowsversion or sys.getwindowsversion()) >= (6, )] return language
Get and return the manifest's language as string. Can be either language-culture e.g. 'en-us' or a string indicating language neutrality, e.g. 'x-ww' on Windows XP or 'none' on Vista and later.
436
def _get_account_number(self, token, uuid): data = {"accessToken": token, "uuid": uuid} try: raw_res = yield from self._session.post(ACCOUNT_URL, data=data, headers=self._headers, timeout=self._timeout) except OSError: raise PyFidoError("Can not get account number") try: json_content = yield from raw_res.json() account_number = json_content\ .get(, {})\ .get(, [{}])[0]\ .get() except (OSError, ValueError): raise PyFidoError("Bad json getting account number") if account_number is None: raise PyFidoError("Can not get account number") return account_number
Get fido account number.
437
def call(method, *args, **kwargs): kwargs = clean_kwargs(**kwargs) return getattr(pyeapi_device[], method)(*args, **kwargs)
Calls an arbitrary pyeapi method.
438
def run_band_structure(self, paths, with_eigenvectors=False, with_group_velocities=False, is_band_connection=False, path_connections=None, labels=None, is_legacy_plot=False): if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) if with_group_velocities: if self._group_velocity is None: self._set_group_velocity() group_velocity = self._group_velocity else: group_velocity = None self._band_structure = BandStructure( paths, self._dynamical_matrix, with_eigenvectors=with_eigenvectors, is_band_connection=is_band_connection, group_velocity=group_velocity, path_connections=path_connections, labels=labels, is_legacy_plot=is_legacy_plot, factor=self._factor)
Run phonon band structure calculation. Parameters ---------- paths : List of array_like Sets of qpoints that can be passed to phonopy.set_band_structure(). Numbers of qpoints can be different. shape of each array_like : (qpoints, 3) with_eigenvectors : bool, optional Flag whether eigenvectors are calculated or not. Default is False. with_group_velocities : bool, optional Flag whether group velocities are calculated or not. Default is False. is_band_connection : bool, optional Flag whether each band is connected or not. This is achieved by comparing similarity of eigenvectors of neghboring poins. Sometimes this fails. Default is False. path_connections : List of bool, optional This is only used in graphical plot of band structure and gives whether each path is connected to the next path or not, i.e., if False, there is a jump of q-points. Number of elements is the same at that of paths. Default is None. labels : List of str, optional This is only used in graphical plot of band structure and gives labels of end points of each path. The number of labels is equal to (2 - np.array(path_connections)).sum(). is_legacy_plot: bool, optional This makes the old style band structure plot. Default is False.
439
def save_features(self, train_features, test_features, feature_names, feature_list_id): self.save_feature_names(feature_names, feature_list_id) self.save_feature_list(train_features, , feature_list_id) self.save_feature_list(test_features, , feature_list_id)
Save features for the training and test sets to disk, along with their metadata. Args: train_features: A NumPy array of features for the training set. test_features: A NumPy array of features for the test set. feature_names: A list containing the names of the feature columns. feature_list_id: The name for this feature list.
440
def restart(self, container, instances=None, map_name=None, **kwargs): return self.run_actions(, container, instances=instances, map_name=map_name, **kwargs)
Restarts instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to stop. If not specified, will restart all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container restart. :return: Return values of restarted containers. :rtype: list[dockermap.map.runner.ActionOutput]
441
def _format_conditions_and_actions(self, raw_data): keys = raw_data.keys() formatted_set = {} return formatted_set
This function gets a set of actions and conditionswith the following format: {'action-0': 'repeat', 'action-1': 'repeat', 'analysisservice-0': '30cd952b0bb04a05ac27b70ada7feab2', 'analysisservice-1': '30cd952b0bb04a05ac27b70ada7feab2', 'and_or-0': 'and', 'and_or-1': 'no', 'range0-0': '12', 'range0-1': '31', 'range1-0': '12', 'range1-1': '33', 'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf', 'setresulton-0': 'original', 'setresulton-1': 'original', 'trigger': 'submit', 'value': '', 'an_result_id-0':'rep-1', 'an_result_id-1':'rep-2'} and returns a formatted set with the conditions and actions sorted like this one: { 'conditions':[{ 'range1': 'X', 'range0': 'X', 'cond_row_idx':'X' 'and_or': 'and', 'analysisservice': '<as_uid>', }, { 'range1': 'X', 'range0': 'X', 'cond_row_idx':'X' 'and_or': 'and', 'analysisservice': '<as_uid>', }, {...}], 'trigger': 'xxx', 'actions':[ {'action':'duplicate', 'act_row_idx':'0', 'otherWS': to_another, 'analyst': 'sussan1', 'setresultdiscrete': '1', 'setresultvalue': '2', 'worksheettemplate-0': '70d48adfb34c4231a145f76a858e94cf', 'setresulton': 'original','an_result_id-0':'rep-1'}, {'action':'repeat', 'act_row_idx':'1', 'otherWS': current, 'analyst': '', ...}, ] }
442
def parse_genemap2(lines): LOG.info("Parsing the omim genemap2") header = [] for i,line in enumerate(lines): line = line.rstrip() if line.startswith(): if i < 10: if line.startswith(): header = line[2:].split() continue if len(line) < 5: continue parsed_entry = parse_omim_line(line, header) parsed_entry[] = int(parsed_entry[]) parsed_entry[] = line hgnc_symbol = parsed_entry.get("Approved Symbol") gene_symbols = [] if parsed_entry.get(): gene_symbols = [symbol.strip() for symbol in parsed_entry[].split()] parsed_entry[] = gene_symbols if not hgnc_symbol and gene_symbols: hgnc_symbol = gene_symbols[0] parsed_entry[] = hgnc_symbol gene_inheritance = set() parsed_phenotypes = [] for phenotype_info in parsed_entry.get(, ).split(): if not phenotype_info: continue phenotype_info = phenotype_info.lstrip() phenotype_status = OMIM_STATUS_MAP.get(phenotype_info[0], ) if phenotype_status == : continue phenotype_description = "" splitted_info = phenotype_info.split() for i, text in enumerate(splitted_info): match = entry_pattern.search(text) if not match: phenotype_description += text else: mimnr_match = mimnr_pattern.search(phenotype_info) if mimnr_match: phenotype_mim = int(mimnr_match.group()) else: phenotype_mim = parsed_entry[] phenotype_description += text[:-4] break inheritance = set() inheritance_text = .join(splitted_info[i:]) for term in mim_inheritance_terms: if term in inheritance_text: inheritance.add(TERMS_MAPPER[term]) gene_inheritance.add(TERMS_MAPPER[term]) parsed_phenotypes.append( { :phenotype_mim, : inheritance, : phenotype_description.strip(), : phenotype_status, } ) parsed_entry[] = parsed_phenotypes parsed_entry[] = gene_inheritance yield parsed_entry
Parse the omim source file called genemap2.txt Explanation of Phenotype field: Brackets, "[ ]", indicate "nondiseases," mainly genetic variations that lead to apparently abnormal laboratory test values. Braces, "{ }", indicate mutations that contribute to susceptibility to multifactorial disorders (e.g., diabetes, asthma) or to susceptibility to infection (e.g., malaria). A question mark, "?", before the phenotype name indicates that the relationship between the phenotype and gene is provisional. More details about this relationship are provided in the comment field of the map and in the gene and phenotype OMIM entries. The number in parentheses after the name of each disorder indicates the following: (1) the disorder was positioned by mapping of the wildtype gene; (2) the disease phenotype itself was mapped; (3) the molecular basis of the disorder is known; (4) the disorder is a chromosome deletion or duplication syndrome. Args: lines(iterable(str)) Yields: parsed_entry(dict)
443
def unicorn_edit(path, **kwargs): ctx = Context(**kwargs) ctx.timeout = None ctx.execute_action(, **{ : ctx.repo.create_secure_service(), : path, })
Edit Unicorn node interactively.
444
def _EvaluateExpressions(self, frame): return [self._FormatExpression(frame, expression) for expression in self._definition.get() or []]
Evaluates watched expressions into a string form. If expression evaluation fails, the error message is used as evaluated expression string. Args: frame: Python stack frame of breakpoint hit. Returns: Array of strings where each string corresponds to the breakpoint expression with the same index.
445
def runInactiveDeviceCleanup(self): yield self.deleteInactiveDevicesByQuota( self.__inactive_per_jid_max, self.__inactive_global_max ) yield self.deleteInactiveDevicesByAge(self.__inactive_max_age)
Runs both the deleteInactiveDevicesByAge and the deleteInactiveDevicesByQuota methods with the configuration that was set when calling create.
446
def unlink(self, request, uuid=None): service = self.get_object() service.unlink_descendants() self.perform_destroy(service) return Response(status=status.HTTP_204_NO_CONTENT)
Unlink all related resources, service project link and service itself.
447
def product(*arrays): arrays = [np.asarray(x) for x in arrays] shape = (len(x) for x in arrays) dtype = arrays[0].dtype ix = np.indices(shape) ix = ix.reshape(len(arrays), -1).T out = np.empty_like(ix, dtype=dtype) for n, _ in enumerate(arrays): out[:, n] = arrays[n][ix[:, n]] return out
Generate a cartesian product of input arrays. Parameters ---------- arrays : list of array-like 1-D arrays to form the cartesian product of. Returns ------- out : ndarray 2-D array of shape (M, len(arrays)) containing cartesian products formed of input arrays.
448
def _delete_unwanted_caracters(self, chain): try: chain = chain.decode(, ) except UnicodeEncodeError: pass except AttributeError: pass for char in self.illegal_macro_output_chars: chain = chain.replace(char, ) return chain
Remove not wanted char from chain unwanted char are illegal_macro_output_chars attribute :param chain: chain to remove char from :type chain: str :return: chain cleaned :rtype: str
449
def transform_txn_for_ledger(txn): txn_data = get_payload_data(txn) txn_data[AUDIT_TXN_LEDGERS_SIZE] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGERS_SIZE].items()} txn_data[AUDIT_TXN_LEDGER_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_LEDGER_ROOT].items()} txn_data[AUDIT_TXN_STATE_ROOT] = {int(k): v for k, v in txn_data[AUDIT_TXN_STATE_ROOT].items()} return txn
Makes sure that we have integer as keys after possible deserialization from json :param txn: txn to be transformed :return: transformed txn
450
def paintEvent(self, event): super(XTextEdit, self).paintEvent(event) if self.document().isEmpty() and self.hint(): text = self.hint() rect = self.rect() rect.setX(4) rect.setY(4) align = int(Qt.AlignLeft | Qt.AlignTop) clr = self.hintColor() with XPainter(self.viewport()) as painter: painter.setPen(clr) painter.drawText(rect, align | Qt.TextWordWrap, text)
Overloads the paint event to support rendering of hints if there are no items in the tree. :param event | <QPaintEvent>
451
def head(draw=True, show=True, max_shape=256): import ipyvolume as ipv from scipy.interpolate import interp1d colors = [[0.91, 0.7, 0.61, 0.0], [0.91, 0.7, 0.61, 80.0], [1.0, 1.0, 0.85, 82.0], [1.0, 1.0, 0.85, 256]] x = np.array([k[-1] for k in colors]) rgb = np.array([k[:3] for k in colors]) N = 256 xnew = np.linspace(0, 256, N) tf_data = np.zeros((N, 4)) kind = for channel in range(3): f = interp1d(x, rgb[:, channel], kind=kind) ynew = f(xnew) tf_data[:, channel] = ynew alphas = [[0, 0], [0, 40], [0.2, 60], [0.05, 63], [0, 80], [0.9, 82], [1.0, 256]] x = np.array([k[1] * 1.0 for k in alphas]) y = np.array([k[0] * 1.0 for k in alphas]) f = interp1d(x, y, kind=kind) ynew = f(xnew) tf_data[:, 3] = ynew tf = ipv.TransferFunction(rgba=tf_data.astype(np.float32)) head_data = ipv.datasets.head.fetch().data if draw: vol = ipv.volshow(head_data, tf=tf, max_shape=max_shape) if show: ipv.show() return vol else: return head_data
Show a volumetric rendering of a human male head.
452
def add_concept(self, concept_obj): if concept_obj is None: raise Exception("Concept object cannot be None") elif concept_obj in self.__concepts: raise Exception("Concept object is already inside") elif concept_obj.cidx in self.__concept_map: raise Exception("Duplicated concept ID ({})".format(concept_obj.cidx)) self.__concepts.append(concept_obj) self.__concept_map[concept_obj.cidx] = concept_obj concept_obj.sent = self return concept_obj
Add a concept to current concept list
453
def delete(cls, resources, background=False, force=False): if not isinstance(resources, (list, tuple)): resources = [resources] ifaces = [] for item in resources: try: ip_ = cls.info(item) except UsageError: cls.error("Caniface_idid']) return Iface.delete(ifaces, background)
Delete an ip by deleting the iface
454
def _weld_unary(array, weld_type, operation): if weld_type not in {WeldFloat(), WeldDouble()}: raise TypeError() obj_id, weld_obj = create_weld_object(array) weld_template = weld_obj.weld_code = weld_template.format(array=obj_id, type=weld_type, op=operation) return weld_obj
Apply operation on each element in the array. As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h Parameters ---------- array : numpy.ndarray or WeldObject Data weld_type : WeldType Of the data operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'} Which unary operation to apply. Returns ------- WeldObject Representation of this computation.
455
def add_datepart(df, fldname, drop=True, time=False, errors="raise"): fld = df[fldname] fld_dtype = fld.dtype if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype): fld_dtype = np.datetime64 if not np.issubdtype(fld_dtype, np.datetime64): df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors) targ_pre = re.sub(, , fldname) attr = [, , , , , , , , , , , ] if time: attr = attr + [, , ] for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower()) df[targ_pre + ] = fld.astype(np.int64) // 10 ** 9 if drop: df.drop(fldname, axis=1, inplace=True)
add_datepart converts a column of df from a datetime64 to many columns containing the information from the date. This applies changes inplace. Parameters: ----------- df: A pandas data frame. df gain several new columns. fldname: A string that is the name of the date column you wish to expand. If it is not a datetime64 series, it will be converted to one with pd.to_datetime. drop: If true then the original date column will be removed. time: If true time features: Hour, Minute, Second will be added. Examples: --------- >>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) }) >>> df A 0 2000-03-11 1 2000-03-12 2 2000-03-13 >>> add_datepart(df, 'A') >>> df AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed 0 2000 3 10 11 5 71 False False False False False False 952732800 1 2000 3 10 12 6 72 False False False False False False 952819200 2 2000 3 11 13 0 73 False False False False False False 952905600
456
def h_v_t(header, key): if key not in header: key = key.title() if key not in header: raise ValueError("Unexpected header in response, missing: " + key + " headers:\n" + str(header)) return header[key]
get header value with title try to get key from header and consider case sensitive e.g. header['x-log-abc'] or header['X-Log-Abc'] :param header: :param key: :return:
457
def _proxy(self): if self._context is None: self._context = DocumentContext( self._version, service_sid=self._solution[], sid=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: DocumentContext for this DocumentInstance :rtype: twilio.rest.preview.sync.service.document.DocumentContext
458
def printMe(self, selfKey, selfValue): text = .format(keyName=selfKey) if len(selfValue) == 0: return else: valueText = for element in selfValue: if singleOrPair(element) == : valueText += element.printMe(element.tag, element.value) elif singleOrPair(element) == : valueText += element.printMe(element.key, element.value) text += valueText return text
Parse the single and its value and return the parsed str. Args: selfTag (str): The tag. Normally just ``self.tag`` selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value`` Returns: str: A parsed text
459
def p_declare_list(p): if len(p) == 4: p[0] = [ast.Directive(p[1], p[3], lineno=p.lineno(1))] else: p[0] = p[1] + [ast.Directive(p[3], p[5], lineno=p.lineno(2))]
declare_list : STRING EQUALS static_scalar | declare_list COMMA STRING EQUALS static_scalar
460
def get_ssh_keys(sshdir): keys = Queue() for root, _, files in os.walk(os.path.abspath(sshdir)): if not files: continue for filename in files: fullname = os.path.join(root, filename) if (os.path.isfile(fullname) and fullname.endswith() or fullname.endswith()): keys.put(fullname) return keys
Get SSH keys
461
def drop_layer(self, layer): if self._frozen: raise TypeError() for child in self._children.values(): child.drop_layer(layer) self._layers.remove(layer)
Removes the named layer and the value associated with it from the node. Parameters ---------- layer : str Name of the layer to drop. Raises ------ TypeError If the node is frozen KeyError If the named layer does not exist
462
def _check_not_in_finally(self, node, node_name, breaker_classes=()): if not self._tryfinallys: return _parent = node.parent _node = node while _parent and not isinstance(_parent, breaker_classes): if hasattr(_parent, "finalbody") and _node in _parent.finalbody: self.add_message("lost-exception", node=node, args=node_name) return _node = _parent _parent = _node.parent
check that a node is not inside a finally clause of a try...finally statement. If we found before a try...finally bloc a parent which its type is in breaker_classes, we skip the whole check.
463
def remove_csv_from_json(d): logger_jsons.info("enter remove_csv_from_json") if "paleoData" in d: d = _remove_csv_from_section(d, "paleoData") if "chronData" in d: d = _remove_csv_from_section(d, "chronData") logger_jsons.info("exit remove_csv_from_json") return d
Remove all CSV data 'values' entries from paleoData table in the JSON structure. :param dict d: JSON data - old structure :return dict: Metadata dictionary without CSV values
464
def _check_filepath(changes): filename = None for change_ in changes: try: cmd, arg = change_.split(, 1) if cmd not in METHOD_MAP: error = .format(cmd) raise ValueError(error) method = METHOD_MAP[cmd] parts = salt.utils.args.shlex_split(arg) if method in [, , , ]: filename_ = parts[0] else: _, _, filename_ = parts if not filename_.startswith(): error = \ \ .format(change_) raise ValueError(error) filename_ = re.sub(, , filename_) if filename is not None: if filename != filename_: error = \ \ .format(filename, filename_) raise ValueError(error) filename = filename_ except (ValueError, IndexError) as err: log.error(err) if not in locals(): error = \ \ .format(change_) else: error = six.text_type(err) raise ValueError(error) filename = _workout_filename(filename) return filename
Ensure all changes are fully qualified and affect only one file. This ensures that the diff output works and a state change is not incorrectly reported.
465
def tostring(self, inject): return inject(self, .join(document.tostring(inject) for document in self.documents))
Get the entire text content as str
466
def define_parser(self): point = Group(integer.setResultsName("x") + integer.setResultsName("y")) n_points = (integer.setResultsName("n") + OneOrMore(point).setResultsName("points")) n_bytes = Suppress(integer) + Suppress(minus) + \ Word(printables).setResultsName("b") justify = ToInteger( Literal("-1") | Literal("0") | Literal("1") ).setResultsName("j") fill = (Literal("C").suppress() + Suppress(integer) + Suppress(minus) + colour.setResultsName("color")).setResultsName("fill") stroke = (Literal("c").suppress() + Suppress(integer) + Suppress(minus) + colour.setResultsName("color") ).setResultsName("stroke") font = (Literal("F").suppress() + real.setResultsName("s") + n_bytes).setResultsName("font") style = (Literal("S").suppress() + n_bytes).setResultsName("style") filled_ellipse = (Literal("E").suppress() + integer.setResultsName("x0") + integer.setResultsName("y0") + integer.setResultsName("w") + integer.setResultsName("h") ).setResultsName("filled_ellipse") ellipse = (Literal("e").suppress() + integer.setResultsName("x0") + integer.setResultsName("y0") + integer.setResultsName("w") + integer.setResultsName("h") ).setResultsName("ellipse") filled_polygon = (Literal("P").suppress() + n_points).setResultsName("filled_polygon") polygon = (Literal("p").suppress() + n_points).setResultsName("polygon") polyline = (Literal("L").suppress() + n_points).setResultsName("polyline") bspline = (Literal("B").suppress() + n_points).setResultsName("bspline") filled_bspline = (Literal("b").suppress() + n_points).setResultsName("filled_bspline") text = (Literal("T").suppress() + integer.setResultsName("x") + integer.setResultsName("y") + justify + integer.setResultsName("w") + n_bytes).setResultsName("text") image = (Literal("I").suppress() + integer.setResultsName("x") + integer.setResultsName("y") + integer.setResultsName("w") + integer.setResultsName("h") + n_bytes).setResultsName("image") value = (Optional(quote).suppress() + OneOrMore(filled_ellipse | ellipse | filled_polygon | polygon | polyline | bspline | filled_bspline | text | fill | stroke | font | style | image) + Optional(quote).suppress()).setResultsName("value") fill.setParseAction(self.proc_fill_color) stroke.setParseAction(self.proc_stroke_color) font.setParseAction(self.proc_font) style.setParseAction(self.proc_style) filled_ellipse.setParseAction(self.proc_filled_ellipse) ellipse.setParseAction(self.proc_unfilled_ellipse) filled_polygon.setParseAction(self.proc_filled_polygon) polygon.setParseAction(self.proc_unfilled_polygon) polyline.setParseAction(self.proc_polyline) bspline.setParseAction(self.proc_unfilled_bspline) filled_bspline.setParseAction(self.proc_filled_bspline) text.setParseAction(self.proc_text) image.setParseAction(self.proc_image) return value
Defines xdot grammar. @see: http://graphviz.org/doc/info/output.html#d:xdot
467
def sort_dict_by_key(obj): sort_func = lambda x: x[0] return OrderedDict(sorted(obj.items(), key=sort_func))
Sort dict by its keys >>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4)) OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
468
def extract_args(self, data): args = [] data = data.strip() if in data: lhs, rhs = data.split(, 1) if lhs: args.extend(lhs.rstrip().split()) args.append(rhs) else: args.extend(data.split()) return tuple(args)
It extracts irc msg arguments.
469
def set_pin_retries(ctx, pw_attempts, admin_pin, force): controller = ctx.obj[] resets_pins = controller.version < (4, 0, 0) if resets_pins: click.echo( ) force or click.confirm(.format( *pw_attempts), abort=True, err=True) controller.set_pin_retries(*(pw_attempts + (admin_pin.encode(),))) click.echo() if resets_pins: click.echo() echo_default_pins()
Manage pin-retries. Sets the number of attempts available before locking for each PIN. PW_ATTEMPTS should be three integer values corresponding to the number of attempts for the PIN, Reset Code, and Admin PIN, respectively.
470
def dt_weekofyear(x): import pandas as pd return pd.Series(x).dt.weekofyear.values
Returns the week ordinal of the year. :returns: an expression containing the week ordinal of the year, extracted from a datetime column. Example: >>> import vaex >>> import numpy as np >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64) >>> df = vaex.from_arrays(date=date) >>> df # date 0 2009-10-12 03:31:00 1 2016-02-11 10:17:34 2 2015-11-12 11:34:22 >>> df.date.dt.weekofyear Expression = dt_weekofyear(date) Length: 3 dtype: int64 (expression) ----------------------------------- 0 42 1 6 2 46
471
def close(self): self._serial.write(b"@c") self._serial.read() self._serial.close()
Closes the connection to the serial port and ensure no pending operatoin are left
472
def launch_batch_workflow(self, batch_workflow): url = % { : self.base_url } try: r = self.gbdx_connection.post(url, json=batch_workflow) batch_workflow_id = r.json()[] return batch_workflow_id except TypeError as e: self.logger.debug(.format(e))
Launches GBDX batch workflow. Args: batch_workflow (dict): Dictionary specifying batch workflow tasks. Returns: Batch Workflow id (str).
473
def first_consumed_mesh(self): for instruction in self.instructions: if instruction.consumes_meshes(): return instruction.first_consumed_mesh raise IndexError("{} consumes no meshes".format(self))
The first consumed mesh. :return: the first consumed mesh :rtype: knittingpattern.Mesh.Mesh :raises IndexError: if no mesh is consumed .. seealso:: :attr:`number_of_consumed_meshes`
474
def configure(self, options, conf): super(S3Logging, self).configure(options, conf) self.options = options
Get the options.
475
def detect_ts(df, max_anoms=0.10, direction=, alpha=0.05, only_last=None, threshold=None, e_value=False, longterm=False, piecewise_median_period_weeks=2, plot=False, y_log=False, xlabel = , ylabel = , title=None, verbose=False): if not isinstance(df, DataFrame): raise ValueError("data must be a single data frame.") else: if len(df.columns) != 2 or not df.iloc[:,1].map(np.isreal).all(): raise ValueError(("data must be a 2 column data.frame, with the" "first column being a set of timestamps, and " "the second coloumn being numeric values.")) if (not (df.dtypes[0].type is np.datetime64) and not (df.dtypes[0].type is np.int64)): df = format_timestamp(df) if list(df.columns.values) != ["timestamp", "value"]: df.columns = ["timestamp", "value"] if max_anoms > 0.49: length = len(df.value) raise ValueError( ("max_anoms must be less than 50% of " "the data points (max_anoms =%f data_points =%s).") % (round(max_anoms * length, 0), length)) if not direction in [, , ]: raise ValueError("direction options are: pos | neg | both.") if not (0.01 <= alpha or alpha <= 0.1): if verbose: import warnings warnings.warn(("alpha is the statistical signifigance, " "and is usually between 0.01 and 0.1")) if only_last and not only_last in [, ]: raise ValueError("only_last must be either or ") if not threshold in [None,,,]: raise ValueError("threshold options are: None | med_max | p95 | p99") if not isinstance(e_value, bool): raise ValueError("e_value must be a boolean") if not isinstance(longterm, bool): raise ValueError("longterm must be a boolean") if piecewise_median_period_weeks < 2: raise ValueError( "piecewise_median_period_weeks must be at greater than 2 weeks") if not isinstance(plot, bool): raise ValueError("plot must be a boolean") if not isinstance(y_log, bool): raise ValueError("y_log must be a boolean") if not isinstance(xlabel, string_types): raise ValueError("xlabel must be a string") if not isinstance(ylabel, string_types): raise ValueError("ylabel must be a string") if title and not isinstance(title, string_types): raise ValueError("title must be a string") if not title: title = else: title = title + " : " gran = get_gran(df) if gran == "day": num_days_per_line = 7 if isinstance(only_last, string_types) and only_last == : only_last = else: num_days_per_line = 1 if gran == : df.timestamp = date_format(df.timestamp, "%Y-%m-%d %H:%M:00") df = format_timestamp(df.groupby().aggregate(np.sum)) gran_period = { : 1440, : 24, : 7 } period = gran_period.get(gran) if not period: raise ValueError( % gran) num_obs = len(df.value) clamp = (1 / float(num_obs)) if max_anoms < clamp: max_anoms = clamp if longterm: if gran == "day": num_obs_in_period = period * piecewise_median_period_weeks + 1 num_days_in_period = 7 * piecewise_median_period_weeks + 1 else: num_obs_in_period = period * 7 * piecewise_median_period_weeks num_days_in_period = 7 * piecewise_median_period_weeks last_date = df.timestamp.iloc[-1] all_data = [] for j in range(0, len(df.timestamp), num_obs_in_period): start_date = df.timestamp.iloc[j] end_date = min(start_date + datetime.timedelta(days=num_days_in_period), df.timestamp.iloc[-1]) if (end_date - start_date).days == num_days_in_period: sub_df = df[(df.timestamp >= start_date) & (df.timestamp < end_date)] else: sub_df = df[(df.timestamp > (last_date - datetime.timedelta(days=num_days_in_period))) & (df.timestamp <= last_date)] all_data.append(sub_df) else: all_data = [df] all_anoms = DataFrame(columns=[, ]) seasonal_plus_trend = DataFrame(columns=[, ]) for i in range(len(all_data)): directions = { : Direction(True, True), : Direction(True, False), : Direction(False, True) } anomaly_direction = directions[direction] s_h_esd_timestamps = detect_anoms(all_data[i], k=max_anoms, alpha=alpha, num_obs_per_period=period, use_decomp=True, one_tail=anomaly_direction.one_tail, upper_tail=anomaly_direction.upper_tail, verbose=verbose) data_decomp = s_h_esd_timestamps[] s_h_esd_timestamps = s_h_esd_timestamps[] if s_h_esd_timestamps: anoms = all_data[i][all_data[i].timestamp.isin(s_h_esd_timestamps)] else: anoms = DataFrame(columns=[, ]) if threshold: periodic_maxes = df.groupby( df.timestamp.map(Timestamp.date)).aggregate(np.max).value if threshold == : thresh = periodic_maxes.median() elif threshold == : thresh = periodic_maxes.quantile(.95) elif threshold == : thresh = periodic_maxes.quantile(.99) anoms = anoms[anoms.value >= thresh] all_anoms = all_anoms.append(anoms) seasonal_plus_trend = seasonal_plus_trend.append(data_decomp) try: all_anoms.drop_duplicates(subset=[], inplace=True) seasonal_plus_trend.drop_duplicates(subset=[], inplace=True) except TypeError: all_anoms.drop_duplicates(cols=[], inplace=True) seasonal_plus_trend.drop_duplicates(cols=[], inplace=True) if only_last: start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=7) start_anoms = df.timestamp.iloc[-1] - datetime.timedelta(days=1) if gran is "day": breaks = 3 * 12 num_days_per_line = 7 else: if only_last == : breaks = 12 else: start_date = df.timestamp.iloc[-1] - datetime.timedelta(days=2) start_date = datetime.date(start_date.year, start_date.month, start_date.day) start_anoms = (df.timestamp.iloc[-1] - datetime.timedelta(hours=1)) breaks = 3 x_subset_single_day = df[df.timestamp > start_anoms] x_subset_week = df[(df.timestamp <= start_anoms) & (df.timestamp > start_date)] if len(all_anoms) > 0: all_anoms = all_anoms[all_anoms.timestamp >= x_subset_single_day.timestamp.iloc[0]] num_obs = len(x_subset_single_day.value) anom_pct = (len(df.value) / float(num_obs)) * 100 if anom_pct == 0: return { "anoms": None, "plot": None } all_anoms.index = all_anoms.timestamp if e_value: d = { : all_anoms.timestamp, : all_anoms.value, : seasonal_plus_trend[ seasonal_plus_trend.timestamp.isin( all_anoms.timestamp)].value } else: d = { : all_anoms.timestamp, : all_anoms.value } anoms = DataFrame(d, index=d[].index) return { : anoms, : None }
Anomaly Detection Using Seasonal Hybrid ESD Test A technique for detecting anomalies in seasonal univariate time series where the input is a series of <timestamp, value> pairs. Args: x: Time series as a two column data frame where the first column consists of the timestamps and the second column consists of the observations. max_anoms: Maximum number of anomalies that S-H-ESD will detect as a percentage of the data. direction: Directionality of the anomalies to be detected. Options are: ('pos' | 'neg' | 'both'). alpha: The level of statistical significance with which to accept or reject anomalies. only_last: Find and report anomalies only within the last day or hr in the time series. Options: (None | 'day' | 'hr') threshold: Only report positive going anoms above the threshold specified. Options are: (None | 'med_max' | 'p95' | 'p99') e_value: Add an additional column to the anoms output containing the expected value. longterm: Increase anom detection efficacy for time series that are greater than a month. See Details below. piecewise_median_period_weeks: The piecewise median time window as described in Vallis, Hochenbaum, and Kejariwal (2014). Defaults to 2. plot: (Currently unsupported) A flag indicating if a plot with both the time series and the estimated anoms, indicated by circles, should also be returned. y_log: Apply log scaling to the y-axis. This helps with viewing plots that have extremely large positive anomalies relative to the rest of the data. xlabel: X-axis label to be added to the output plot. ylabel: Y-axis label to be added to the output plot. Details 'longterm' This option should be set when the input time series is longer than a month. The option enables the approach described in Vallis, Hochenbaum, and Kejariwal (2014). 'threshold' Filter all negative anomalies and those anomalies whose magnitude is smaller than one of the specified thresholds which include: the median of the daily max values (med_max), the 95th percentile of the daily max values (p95), and the 99th percentile of the daily max values (p99). 'title' Title for the output plot. 'verbose' Enable debug messages The returned value is a dictionary with the following components: anoms: Data frame containing timestamps, values, and optionally expected values. plot: A graphical object if plotting was requested by the user. The plot contains the estimated anomalies annotated on the input time series
476
def total(self): if self._result_cache: return self._result_cache.total return self.all().total
Return the total number of records
477
def masked_local_attention_2d(q, k, v, query_shape=(8, 16), memory_flange=(8, 16), name=None): with tf.variable_scope( name, default_name="local_masked_self_attention_2d", values=[q, k, v]): v_shape = common_layers.shape_list(v) q = pad_to_multiple_2d(q, query_shape) q_indices = gather_indices_2d(q, query_shape, query_shape) q_new = gather_blocks_2d(q, q_indices) k_flange, k_center = get_memory_region(k, query_shape, memory_flange, q_indices) v_flange, v_center = get_memory_region(v, query_shape, memory_flange, q_indices) if k_flange is not None: k_new = tf.concat([k_flange, k_center], axis=3) v_new = tf.concat([v_flange, v_center], axis=3) else: k_new = k_center v_new = v_center query_elements = np.prod(query_shape) padding_mask = None if k_flange is not None: padding_mask = tf.expand_dims( embedding_to_padding(k_flange) * -1e9, axis=-2) padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1]) center_attention_bias = attention_bias_lower_triangle( np.prod(query_elements)) center_attention_bias = tf.reshape( center_attention_bias, [1, 1, 1, query_elements, query_elements]) v_center_shape = common_layers.shape_list(v_center) center_attention_bias = tf.tile( center_attention_bias, [v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1]) if padding_mask is not None: attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4) else: attention_bias = center_attention_bias output = dot_product_attention( q_new, k_new, v_new, attention_bias, dropout_rate=0., name="masked_local_2d", make_image_summary=False) padded_q_shape = common_layers.shape_list(q) output = scatter_blocks_2d(output, q_indices, padded_q_shape) output = tf.slice(output, [0, 0, 0, 0, 0], [-1, -1, v_shape[2], v_shape[3], -1]) return output
Strided block local self-attention. Each position in a query block can attend to all the generated queries in the query block, which are generated in raster scan, and positions that are generated to the left and top. The shapes are specified by query shape and memory flange. Note that if you're using this function, you do not need to right shift. Right shifting happens inside this function separately for each block. Args: q: a Tensor with shape [batch, heads, h, w, depth_k] k: a Tensor with shape [batch, heads, h, w, depth_k] v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current implementation, depth_v must be equal to depth_k. query_shape: an tuple indicating the height and width of each query block. query_shape = block_shape memory_flange: an integer indicating how much to look in height and width from each query block. memory shape = query_shape + (block_flange[0], 2*block_flange[1]) name: an optional string Returns: a Tensor of shape [batch, heads, h, w, depth_v]
478
def dataframe_setup(self): genesippr_dict = dict() try: sippr_matrix = pd.read_csv(os.path.join(self.reportpath, ), delimiter=, index_col=0).T.to_dict() except FileNotFoundError: sippr_matrix = dict() try: conf_matrix = pd.read_csv(os.path.join(self.reportpath, ), delimiter=, index_col=0).T.to_dict() except FileNotFoundError: conf_matrix = dict() try: gdcs_matrix = pd.read_csv(os.path.join(self.reportpath, ), delimiter=, index_col=0).T.to_dict() except FileNotFoundError: gdcs_matrix = dict() for sample in self.metadata: genesippr_dict[sample.name] = dict() try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(sippr_matrix[sample.name][]) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise(gdcs_matrix[sample.name][], header=) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise( conf_matrix[sample.name][], header=) except KeyError: genesippr_dict[sample.name][] = 0 try: genesippr_dict[sample.name][] = self.data_sanitise( gdcs_matrix[sample.name][], header=) except KeyError: genesippr_dict[sample.name][] = 0 with open(self.image_report, ) as csv: data = .format(.join(self.header_list)) for strain in sorted(genesippr_dict): data += .format(str=strain) for header in self.header_list[1:]: data += .format(value=genesippr_dict[strain][header]) data = data.rstrip() data += csv.write(data)
Set-up a report to store the desired header: sanitized string combinations
479
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix): embedded_input_seq = snt.BatchApply( embed_layer, name="input_embed_seq")(data_ops.sparse_obs) initial_rnn_state = nest.map_structure( lambda t: tf.get_local_variable( "{}/rnn_state/{}".format(name_prefix, t.op.name), initializer=t), rnn_core.initial_state(FLAGS.batch_size)) assign_zero_rnn_state = nest.map_structure( lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state) assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state)) rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn( cell=rnn_core, inputs=embedded_input_seq, initial_state=initial_rnn_state, time_major=True) update_rnn_state = nest.map_structure( tf.assign, initial_rnn_state, rnn_final_state) with tf.control_dependencies(nest.flatten(update_rnn_state)): rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq") output_logits = snt.BatchApply( output_linear, name="output_embed_seq")(rnn_output_seq) return output_logits, assign_zero_rnn_state
This is the core model logic. Unrolls a Bayesian RNN over the given sequence. Args: data_ops: A `sequence_data.SequenceDataOps` namedtuple. embed_layer: A `snt.Embed` instance. rnn_core: A `snt.RNNCore` instance. output_linear: A `snt.Linear` instance. name_prefix: A string to use to prefix local variable names. Returns: A 3D time-major tensor representing the model's logits for a sequence of predictions. Shape `[time_steps, batch_size, vocab_size]`.
480
def get_locations(self, locations, columns=None, **kwargs): indexes = [self._index[x] for x in locations] return self.get(indexes, columns, **kwargs)
For list of locations and list of columns return a DataFrame of the values. :param locations: list of index locations :param columns: list of column names :param kwargs: will pass along these parameters to the get() method :return: DataFrame
481
def distinct_letters(string_matrix: List[List[str]]) -> Set[str]: return set([letter for sentence in string_matrix for word in sentence for letter in word])
Diagnostic function :param string_matrix: a data matrix: a list wrapping a list of strings, with each sublist being a sentence. :return: >>> dl = distinct_letters([['the', 'quick', 'brown'],['how', 'now', 'cow']]) >>> sorted(dl) ['b', 'c', 'e', 'h', 'i', 'k', 'n', 'o', 'q', 'r', 't', 'u', 'w']
482
def batch_(self, rpc_calls): batch_data = [] for rpc_call in rpc_calls: AuthServiceProxy.__id_count += 1 m = rpc_call.pop(0) batch_data.append({"jsonrpc":"2.0", "method":m, "params":rpc_call, "id":AuthServiceProxy.__id_count}) postdata = json.dumps(batch_data, default=EncodeDecimal) log.debug("--> "+postdata) self.__conn.request(, self.__url.path, postdata, {: self.__url.hostname, : USER_AGENT, : self.__auth_header, : }) results = [] responses = self._get_response() for response in responses: if response[] is not None: raise JSONRPCException(response[]) elif not in response: raise JSONRPCException({ : -343, : }) else: results.append(response[]) return results
Batch RPC call. Pass array of arrays: [ [ "method", params... ], ... ] Returns array of results.
483
def new(cls, ns_path, script, campaign_dir, runner_type=, overwrite=False, optimized=True, check_repo=True): ns_path = os.path.abspath(ns_path) campaign_dir = os.path.abspath(campaign_dir) if Path(campaign_dir).exists() and not overwrite: manager = CampaignManager.load(campaign_dir, ns_path, runner_type=runner_type, optimized=optimized, check_repo=check_repo) if manager.db.get_script() == script: return manager else: del manager runner = CampaignManager.create_runner(ns_path, script, runner_type=runner_type, optimized=optimized) params = runner.get_available_parameters() commit = "" if check_repo: from git import Repo, exc commit = Repo(ns_path).head.commit.hexsha db = DatabaseManager.new(script=script, params=params, commit=commit, campaign_dir=campaign_dir, overwrite=overwrite) return cls(db, runner, check_repo)
Create a new campaign from an ns-3 installation and a campaign directory. This method will create a DatabaseManager, which will install a database in the specified campaign_dir. If a database is already available at the ns_path described in the specified campaign_dir and its configuration matches config, this instance is used instead. If the overwrite argument is set to True instead, the specified directory is wiped and a new campaign is created in its place. Furthermore, this method will initialize a SimulationRunner, of type specified by the runner_type parameter, which will be locked on the ns-3 installation at ns_path and set up to run the desired script. Finally, note that creation of a campaign requires a git repository to be initialized at the specified ns_path. This will allow SEM to save the commit at which the simulations are run, enforce reproducibility and avoid mixing results coming from different versions of ns-3 and its libraries. Args: ns_path (str): path to the ns-3 installation to employ in this campaign. script (str): ns-3 script that will be executed to run simulations. campaign_dir (str): path to the directory in which to save the simulation campaign database. runner_type (str): implementation of the SimulationRunner to use. Value can be: SimulationRunner (for running sequential simulations locally), ParallelRunner (for running parallel simulations locally), GridRunner (for running simulations using a DRMAA-compatible parallel task scheduler). Use Auto to automatically pick the best runner. overwrite (bool): whether to overwrite already existing campaign_dir folders. This deletes the directory if and only if it only contains files that were detected to be created by sem. optimized (bool): whether to configure the runner to employ an optimized ns-3 build.
484
def delete_floatingip(self, floatingip_id): ret = self.network_conn.delete_floatingip(floatingip_id) return ret if ret else True
Deletes the specified floatingip
485
def _emit_no_set_found(environment_name, product_name): sys.stdout.write(colorama.Fore.YELLOW + .format(environment_name, product_name) + colorama.Fore.RESET) sys.stdout.write() logger.warning( .format(environment_name, product_name))
writes to std out and logs if no connection string is found for deployment :param environment_name: :param product_name: :return:
486
def radius_server_host_retries(self, **kwargs): config = ET.Element("config") radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa") host = ET.SubElement(radius_server, "host") hostname_key = ET.SubElement(host, "hostname") hostname_key.text = kwargs.pop() retries = ET.SubElement(host, "retries") retries.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
487
def get_wind_url(self): wind_direction = self.f_d.get(, None) if wind_direction is not None: rounded = int(5 * round(float(wind_direction)/5)) return WIND_ARROW_URL.format(rounded)
Get wind arrow url.
488
def execute(self): config.logger.debug() config.logger.debug(self.params ) if in self.params: self.params.pop(, None) if in self.params: self.params.pop(, None) create_result = config.sfdc_client.create_apex_checkpoint(self.params) if type(create_result) is list: create_result = create_result[0] IndexApexOverlaysCommand(params=self.params).execute() if type(create_result) is not str and type(create_result) is not unicode: return json.dumps(create_result) else: return create_result
self.params = { "ActionScriptType" : "None", "ExecutableEntityId" : "01pd0000001yXtYAAU", "IsDumpingHeap" : True, "Iteration" : 1, "Line" : 3, "ScopeId" : "005d0000000xxzsAAA" }
489
def _complete_last_byte(self, packet): padded_size = self.get_size() padding_bytes = padded_size - len(packet) if padding_bytes > 0: packet += Pad(padding_bytes).pack() return packet
Pad until the packet length is a multiple of 8 (bytes).
490
def Division(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: return Double(context.jvm_view().DivisionVertex, label, cast_to_double_vertex(left), cast_to_double_vertex(right))
Divides one vertex by another :param left: the vertex to be divided :param right: the vertex to divide
491
def _unparse_entry_record(self, entry): for attr_type in sorted(entry.keys()): for attr_value in entry[attr_type]: self._unparse_attr(attr_type, attr_value)
:type entry: Dict[string, List[string]] :param entry: Dictionary holding an entry
492
def example_load_data(self): self.x = constant([[0.7, 0.9]]) self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1)) self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))
加载数据
493
def get_github_hostname_user_repo_from_url(url): parsed = parse.urlparse(url) if parsed.netloc == : host, sep, path = parsed.path.partition(":") if "@" in host: username, sep, host = host.partition("@") else: path = parsed.path[1:].rstrip() host = parsed.netloc user, repo = path.split("/", 1) return host, user, repo[:-4] if repo.endswith() else repo
Return hostname, user and repository to fork from. :param url: The URL to parse :return: hostname, user, repository
494
def add_device_not_active_callback(self, callback): _LOGGER.debug(, callback) self._cb_device_not_active.append(callback)
Register callback to be invoked when a device is not responding.
495
def get_as_type_with_default(self, index, value_type, default_value): value = self[index] return TypeConverter.to_type_with_default(value_type, value, default_value)
Converts array element into a value defined by specied typecode. If conversion is not possible it returns default value. :param index: an index of element to get. :param value_type: the TypeCode that defined the type of the result :param default_value: the default value :return: element value defined by the typecode or default value if conversion is not supported.
496
def get_ip_prefixes_from_bird(filename): prefixes = [] with open(filename, ) as bird_conf: lines = bird_conf.read() for line in lines.splitlines(): line = line.strip() if valid_ip_prefix(line): prefixes.append(line) return prefixes
Build a list of IP prefixes found in Bird configuration. Arguments: filename (str): The absolute path of the Bird configuration file. Notes: It can only parse a file with the following format define ACAST_PS_ADVERTISE = [ 10.189.200.155/32, 10.189.200.255/32 ]; Returns: A list of IP prefixes.
497
def create_access_token_response(self, uri, http_method=, body=None, headers=None, credentials=None): resp_headers = {: } try: request = self._create_request(uri, http_method, body, headers) valid, processed_request = self.validate_access_token_request( request) if valid: token = self.create_access_token(request, credentials or {}) self.request_validator.invalidate_request_token( request.client_key, request.resource_owner_key, request) return resp_headers, token, 200 else: return {}, None, 401 except errors.OAuth1Error as e: return resp_headers, e.urlencoded, e.status_code
Create an access token response, with a new request token if valid. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :param credentials: A list of extra credentials to include in the token. :returns: A tuple of 3 elements. 1. A dict of headers to set on the response. 2. The response body as a string. 3. The response status code as an integer. An example of a valid request:: >>> from your_validator import your_validator >>> from oauthlib.oauth1 import AccessTokenEndpoint >>> endpoint = AccessTokenEndpoint(your_validator) >>> h, b, s = endpoint.create_access_token_response( ... 'https://your.provider/access_token?foo=bar', ... headers={ ... 'Authorization': 'OAuth oauth_token=234lsdkf....' ... }, ... credentials={ ... 'my_specific': 'argument', ... }) >>> h {'Content-Type': 'application/x-www-form-urlencoded'} >>> b 'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument' >>> s 200 An response to invalid request would have a different body and status:: >>> b 'error=invalid_request&description=missing+resource+owner+key' >>> s 400 The same goes for an an unauthorized request: >>> b '' >>> s 401
498
def chmod(self, mode): self.sftp._log(DEBUG, % (hexlify(self.handle), mode)) attr = SFTPAttributes() attr.st_mode = mode self.sftp._request(CMD_FSETSTAT, self.handle, attr)
Change the mode (permissions) of this file. The permissions are unix-style and identical to those used by python's C{os.chmod} function. @param mode: new permissions @type mode: int
499
def compute_all_sg_permutations(positions, rotations, translations, lattice, symprec): out = [] for (sym, t) in zip(rotations, translations): rotated_positions = np.dot(positions, sym.T) + t out.append(compute_permutation_for_rotation(positions, rotated_positions, lattice, symprec)) return np.array(out, dtype=, order=)
Compute a permutation for every space group operation. See 'compute_permutation_for_rotation' for more info. Output has shape (num_rot, num_pos)