Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
600
def _bfd_rx(self, **kwargs): int_type = kwargs[] method_name = % int_type bfd_rx = getattr(self._interface, method_name) config = bfd_rx(**kwargs) if kwargs[]: tag = config.find( % tag).set(, ) pass return config
Return the BFD minimum receive interval XML. You should not use this method. You probably want `BGP.bfd`. Args: min_rx (str): BFD receive interval in milliseconds (300, 500, etc) delete (bool): Remove the configuration if ``True``. Returns: XML to be passed to the switch. Raises: None
601
def is_complex(self) -> bool: from .main import BaseModel return ( self.shape != Shape.SINGLETON or lenient_issubclass(self.type_, (BaseModel, list, set, dict)) or hasattr(self.type_, ) )
Whether the field is "complex" eg. env variables should be parsed as JSON.
602
def _sitelist(self, matrix): _list = [] for item in matrix: sites = [] if isinstance(matrix[item], list): sites = matrix[item] elif isinstance(matrix[item], dict): sites = matrix[item][] for site in sites: if len(site.keys()) > 4: continue domain = self.params.get() if domain: if domain in site[]: _list.append(site[]) else: _list.append(site[]) return _list
Returns a list of sites from a SiteMatrix, optionally filtered by 'domain' param
603
def write_meta(self): path = os.path.join(self.get_private_dir(create=True), "meta.yaml") units = {key: str(value) for key, value in self.units.items()} meta_info = dict(description=self.description, ucds=self.ucds, units=units, descriptions=self.descriptions, ) vaex.utils.write_json_or_yaml(path, meta_info)
Writes all meta data, ucd,description and units The default implementation is to write this to a file called meta.yaml in the directory defined by :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself. (For instance the vaex hdf5 implementation does this) This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta` is called, so that the information is not lost between sessions. Note: opening a DataFrame twice may result in corruption of this file.
604
def _sync_outlineexplorer_file_order(self): if self.outlineexplorer is not None: self.outlineexplorer.treewidget.set_editor_ids_order( [finfo.editor.get_document_id() for finfo in self.data])
Order the root file items of the outline explorer as in the tabbar of the current EditorStack.
605
def _check_axis(self, ds, name): TXYZ allowed_axis = [, , , ] variable = ds.variables[name] axis = variable.axis valid_axis = TestCtx(BaseCheck.HIGH, self.section_titles[]) axis_is_string = isinstance(axis, basestring), valid_axis.assert_true(axis_is_string and len(axis) > 0, "{}t a string we cans axis attribute must be T, X, Y, or Z, ".format(name)+\ "currently {}".format(axis)) return valid_axis.to_result()
Checks that the axis attribute is a string and an allowed value, namely one of 'T', 'X', 'Y', or 'Z'. :param netCDF4.Dataset ds: An open netCDF dataset :param str name: Name of the variable :rtype: compliance_checker.base.Result
606
def grad(self, params, epsilon=0.0001): grad = [] for x in range(len(params)): temp = np.copy(params) temp[x] += epsilon temp2 = np.copy(params) temp2[x] -= epsilon grad.append((self.__cost_function(temp)-self.__cost_function(temp2))/(2*epsilon)) return np.array(grad)
Used to check gradient estimation through slope approximation.
607
def _parse_source_sections(self, diff_str): source_dict = dict() src_path = None if src_path not in source_dict: source_dict[src_path] = [] if found_hunk or line.startswith(): found_hunk = True if src_path is not None: source_dict[src_path].append(line) else: raise GitDiffError(msg) return source_dict
Given the output of `git diff`, return a dictionary with keys that are source file paths. Each value is a list of lines from the `git diff` output related to the source file. Raises a `GitDiffError` if `diff_str` is in an invalid format.
608
def GetScriptHashesForVerifying(self): if not self.References and len(self.Attributes) < 1: return [] hashes = set() for coinref, output in self.References.items(): hashes.add(output.ScriptHash) for attr in self.Attributes: if attr.Usage == TransactionAttributeUsage.Script: if type(attr.Data) is UInt160: hashes.add(attr.Data) else: hashes.add(UInt160(data=attr.Data)) for key, group in groupby(self.outputs, lambda p: p.AssetId): if self.raw_tx: asset = Helper.StaticAssetState(key) else: asset = GetBlockchain().GetAssetState(key.ToBytes()) if asset is None: raise Exception("Invalid operation") if asset.AssetType == AssetType.DutyFlag: for p in group: hashes.add(p.ScriptHash) hashlist = list(hashes) hashlist.sort() return hashlist
Get a list of script hashes for verifying transactions. Raises: Exception: if there are no valid assets in the transaction. Returns: list: of UInt160 type script hashes.
609
def copy(self): new_client = self._client.copy() return self.__class__( self.instance_id, new_client, self.configuration_name, node_count=self.node_count, display_name=self.display_name, )
Make a copy of this instance. Copies the local data stored as simple types and copies the client attached to this instance. :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` :returns: A copy of the current instance.
610
def update_entity(self, entity, if_match=): request = _update_entity(entity, if_match) self._add_to_batch(entity[], entity[], request)
Adds an update entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.update_entity` for more information on updates. The operation will not be executed until the batch is committed. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*).
611
def fasta(self): if not self._fasta: self._fasta = .format(self.id, self.sequence) return self._fasta
str: Returns the sequence, as a FASTA-formatted string Note: The FASTA string is built using ``Sequence.id`` and ``Sequence.sequence``.
612
def encode(strs): res = for string in strs.split(): res += str(len(string)) + ":" + string return res
Encodes a list of strings to a single string. :type strs: List[str] :rtype: str
613
def notes(path): df = pd.read_csv(path, delimiter=) text_row = df.iloc[0:-1, 0].str.contains(, ) text_row_index = text_row.index[text_row].tolist() notes = df.loc[text_row_index] return notes
This function extracts any experimental notes from a ProCoDA data file. :param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient. :type path: string :return: The rows of the data file that contain text notes inserted during the experiment. Use this to identify the section of the data file that you want to extract. :rtype: pandas.Dataframe
614
def find_commands(cls): cmds = [] for subclass in cls.__subclasses__(): cmds.append(subclass) cmds.extend(find_commands(subclass)) return cmds
Finds commands by finding the subclasses of Command
615
def get_bestfit_line(self, x_min=None, x_max=None, resolution=None): x = self.args["x"] if x_min is None: x_min = min(x) if x_max is None: x_max = max(x) if resolution is None: resolution = self.args.get("resolution", 1000) bestfit_x = np.linspace(x_min, x_max, resolution) return [bestfit_x, self.bestfit_func(bestfit_x)]
Method to get bestfit line using the defined self.bestfit_func method args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max returns: [bestfit_x, bestfit_y]
616
def time(self): if self.isMilitaryTime(): format = time_of_day = else: format = time_of_day = self._timeOfDayCombo.currentText().lower() try: hour = int(self._hourCombo.currentText()) if self.showHours() else 1 except ValueError: hour = 1 try: minute = int(self._minuteCombo.currentText()) if self.showMinutes() else 0 except ValueError: minute = 0 try: second = int(self._secondCombo.currentText()) if self.showSeconds() else 0 except ValueError: second = 0 combined = .format(hour, minute, second, time_of_day) return QtCore.QTime.fromString(combined, format)
Returns the current time for this edit. :return <QtCore.QTime>
617
def get_objective_banks(self): catalogs = self._get_provider_session().get_objective_banks() cat_list = [] for cat in catalogs: cat_list.append(ObjectiveBank(self._provider_manager, cat, self._runtime, self._proxy)) return ObjectiveBankList(cat_list)
Pass through to provider ObjectiveBankLookupSession.get_objective_banks
618
def sample_distinct(self, n_to_sample, **kwargs): n_notsampled = np.sum(np.isnan(self.cached_labels_)) if n_notsampled == 0: raise Exception("All distinct items have already been sampled.") if n_to_sample > n_notsampled: warnings.warn("Only {} distinct item(s) have not yet been sampled." " Setting n_to_sample = {}.".format(n_notsampled, \ n_notsampled)) n_to_sample = n_notsampled n_sampled = 0 while n_sampled < n_to_sample: self.sample(1,**kwargs) n_sampled += self._queried_oracle[self.t_ - 1]*1
Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations.
619
def regex(pattern, prompt=None, empty=False, flags=0): s = _prompt_input(prompt) if empty and not s: return None else: m = re.match(pattern, s, flags=flags) if m: return m else: return regex(pattern, prompt=prompt, empty=empty, flags=flags)
Prompt a string that matches a regular expression. Parameters ---------- pattern : str A regular expression that must be matched. prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. flags : int, optional Flags that will be passed to ``re.match``. Returns ------- Match or None A match object if the user entered a matching string. None if the user pressed only Enter and ``empty`` was True. See Also -------- re.match
620
def retrieve_by_id(self, id_): items_with_id = [item for item in self if item.id == int(id_)] if len(items_with_id) == 1: return items_with_id[0].retrieve()
Return a JSSObject for the element with ID id_
621
def start_worker(self): if not self.include_rq: return None worker = Worker(queues=self.queues, connection=self.connection) worker_pid_path = current_app.config.get( "{}_WORKER_PID".format(self.config_prefix), ) try: worker_pid_file = open(worker_pid_path, ) worker_pid = int(worker_pid_file.read()) print("Worker already started with PID=%d" % worker_pid) worker_pid_file.close() return worker_pid except (IOError, TypeError): self.worker_process = Process(target=worker_wrapper, kwargs={ : worker, : worker_pid_path }) self.worker_process.start() worker_pid_file = open(worker_pid_path, ) worker_pid_file.write("%d" % self.worker_process.pid) worker_pid_file.close() print("Start a worker process with PID=%d" % self.worker_process.pid) return self.worker_process.pid
Trigger new process as a RQ worker.
622
def server(self): server = [s for s in self._server.resources() if s.clientIdentifier == self.machineIdentifier] if len(server) == 0: raise NotFound( % self.machineIdentifier) return server[0]
Returns :class:`plexapi.myplex.MyPlexResource` with server of current item.
623
def token(self): if not self._token: self._token = self.fetch_token() logger.payment(self) return self._token
Token given by Transbank for payment initialization url. Will raise PaymentError when an error ocurred.
624
def save(self, model_filename, optimizer_filename): serializers.save_hdf5(model_filename, self.model) serializers.save_hdf5(optimizer_filename, self.optimizer)
Save the state of the model & optimizer to disk
625
def database_admin_api(self): if self._database_admin_api is None: self._database_admin_api = DatabaseAdminClient( credentials=self.credentials, client_info=_CLIENT_INFO ) return self._database_admin_api
Helper for session-related API calls.
626
def retrieve_equities(self, sids): cache = self._asset_cache try: return { k: cache[k] for k in sids } except KeyError: raise EquitiesNotFound(sids=sids)
Retrieve Equity objects for a list of sids. Users generally shouldn't need to this method (instead, they should prefer the more general/friendly `retrieve_assets`), but it has a documented interface and tests because it's used upstream. Parameters ---------- sids : iterable[string] Returns ------- equities : dict[str -> Equity] Raises ------ EquitiesNotFound When any requested asset isn't found.
627
def Readdir(self, path, fh=None): del fh if not self._IsDir(path): raise fuse.FuseOSError(errno.ENOTDIR) fd = aff4.FACTORY.Open(self.root.Add(path), token=self.token) children = fd.ListChildren() for directory in [u".", u".."]: yield directory for child in children: if child.Path() not in self.ignored_dirs: yield child.Basename()
Reads a directory given by path. Args: path: The path to list children of. fh: A file handler. Not used. Yields: A generator of filenames. Raises: FuseOSError: If we try and list a file.
628
def from_json(cls, json_doc): d = json.loads(json_doc) token = cls() token.__dict__.update(d) return token
Create and return a new Session Token based on the contents of a JSON document. :type json_doc: str :param json_doc: A string containing a JSON document with a previously saved Credentials object.
629
def is_exported(bundle): if not ckan: raise EnvironmentError(MISSING_CREDENTIALS_MSG) params = {: .format(bundle.dataset.vid.lower())} resp = ckan.action.package_search(**params) return len(resp[]) > 0
Returns True if dataset is already exported to CKAN. Otherwise returns False.
630
def find_cell_end(self, lines): if self.in_region: self.cell_type = for i, line in enumerate(lines): if self.end_region_re.match(line): return i, i + 1, True elif self.metadata is None: self.cell_type = prev_blank = 0 in_explicit_code_block = False in_indented_code_block = False for i, line in enumerate(lines): if in_explicit_code_block and self.end_code_re.match(line): in_explicit_code_block = False continue if self.non_jupyter_code_re and self.non_jupyter_code_re.match(line): in_explicit_code_block = True prev_blank = 0 continue if prev_blank and line.startswith() and not _BLANK_LINE.match(line): in_indented_code_block = True prev_blank = 0 continue if in_indented_code_block and not _BLANK_LINE.match(line) and not line.startswith(): in_indented_code_block = False if in_indented_code_block or in_explicit_code_block: continue if self.start_code_re.match(line) or self.start_region_re.match(line): if i > 1 and prev_blank: return i - 1, i, False return i, i, False if self.split_at_heading and line.startswith() and prev_blank >= 1: return i - 1, i, False if _BLANK_LINE.match(lines[i]): prev_blank += 1 elif i > 2 and prev_blank >= 2: return i - 2, i, True else: prev_blank = 0 else: self.cell_type = for i, line in enumerate(lines): if i == 0: continue if self.end_code_re.match(line): return i, i + 1, True return len(lines), len(lines), False
Return position of end of cell marker, and position of first line after cell
631
def anneal(self, mode, matches, orig_matches): changed = False def dupes_in_matches(): items_by_path = config.engine.group_by() hashes = set([x.hash for x in matches]) for idx, item in enumerate(matches): same_path_but_not_in_matches = any( x.hash not in hashes for x in items_by_path.get(item.realpath, []) ) if item.realpath and same_path_but_not_in_matches: yield idx if mode == : items_by_path = config.engine.group_by() hashes = set([x.hash for x in matches]) dupes = [] for item in matches: if item.realpath: for dupe in items_by_path.get(item.realpath, []): if dupe.hash not in hashes: changed = True dupes.append(dupe) hashes.add(dupe.hash) matches.extend(dupes) elif mode == : for idx in reversed(list(dupes_in_matches())): changed = True del matches[idx] elif mode == : items_by_path = config.engine.group_by() dupes = list(i for i in matches if i.realpath and len(items_by_path.get(i.realpath, [])) > 1) if len(dupes) != len(matches): changed = True matches[:] = dupes elif mode == : hashes = set([x.hash for x in matches]) changed = True matches[:] = list(i for i in orig_matches if i.hash not in hashes) elif mode == : seen, dupes = set(), [] for i, item in enumerate(matches): if item.name in seen: changed = True dupes.append(i) seen.add(item.name) for i in reversed(dupes): del matches[i] else: raise RuntimeError( + mode) return changed
Perform post-processing. Return True when any changes were applied.
632
def _fix_quantities(tree): if isinstance(tree, dict): tree = {k: _fix_quantities(v) for k, v in tree.items()} if isinstance(tree.get(), list): tree[] = len(tree[]) if not tree[]: tree.pop() return tree elif isinstance(tree, list): return [_fix_quantities(t) for t in tree] else: return tree
Stupidly simple function to fix any Items/Quantity disparities inside a DistributionConfig block before use. Since AWS only accepts JSON-encodable data types, this implementation is "good enough" for our purposes.
633
def _set_fcoe_fcf_map(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("fcf_map_name",fcoe_fcf_map.fcoe_fcf_map, yang_name="fcoe-fcf-map", rest_name="fcf-group", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: u, u: u, u: u}}), is_container=, yang_name="fcoe-fcf-map", rest_name="fcf-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: u, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "list", : , }) self.__fcoe_fcf_map = t if hasattr(self, ): self._set()
Setter method for fcoe_fcf_map, mapped from YANG variable /fcoe/fcoe_fabric_map/fcoe_fcf_map (list) If this variable is read-only (config: false) in the source YANG file, then _set_fcoe_fcf_map is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fcoe_fcf_map() directly. YANG Description: The list of FCF Groups. Each row contains the FCF group name, member FCoE map, FCF rbid and FDF rbids
634
def resolve_dst(self, dst_dir, src): if os.path.isabs(src): return os.path.join(dst_dir, os.path.basename(src)) return os.path.join(dst_dir, src)
finds the destination based on source if source is an absolute path, and there's no pattern, it copies the file to base dst_dir
635
def get_activity_admin_session_for_objective_bank(self, objective_bank_id=None): if not objective_bank_id: raise NullArgument if not self.supports_activity_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ActivityAdminSession(objective_bank_id, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the OsidSession associated with the activity admin service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivityAdminSession) - an ActivityAdminSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_admin() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_admin() and supports_visible_federation() are true.
636
def _make_parent(self): if self.is_partial: parent_args = self.flat_path[:-1] else: parent_args = self.flat_path[:-2] if parent_args: return self.__class__( *parent_args, project=self.project, namespace=self.namespace )
Creates a parent key for the current path. Extracts all but the last element in the key path and creates a new key, while still matching the namespace and the project. :rtype: :class:`google.cloud.datastore.key.Key` or :class:`NoneType` :returns: A new ``Key`` instance, whose path consists of all but the last element of current path. If the current key has only one path element, returns ``None``.
637
def merge_with(self, another, ubound=None, top_id=None): self.top_id = max(self.top_id, top_id if top_id != None else 0, another.top_id) self.ubound = max(self.ubound, ubound if ubound != None else 0, another.ubound) self.lits.extend(another.lits) def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) self.tobj, clauses, self.rhs, self.top_id = pycard.itot_mrg(self.tobj, another.tobj, self.ubound, self.top_id) def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) self.cnf.clauses.extend(another.cnf.clauses) self.cnf.clauses.extend(clauses) self.cnf.nv = self.top_id self.nof_new = len(another.cnf.clauses) + len(clauses) another._merged = True
This method merges a tree of the current :class:`ITotalizer` object, with a tree of another object and (if needed) increases a potential upper bound that can be imposed on the complete list of literals in the sum of an existing :class:`ITotalizer` object to a new value. :param another: another totalizer to merge with. :param ubound: a new upper bound. :param top_id: a new top variable identifier. :type another: :class:`ITotalizer` :type ubound: int :type top_id: integer or None The top identifier ``top_id`` applied only if it is greater than the one used in ``self``. This method creates additional clauses encoding the existing totalizer tree merged with another totalizer tree into *one* sum and updating the upper bound. As a result, it appends the new clauses to the list of clauses of :class:`.CNF` ``self.cnf``. The number of newly created clauses is stored in variable ``self.nof_new``. Also, if the upper bound is updated, a list of bounds ``self.rhs`` gets increased and its length becomes ``ubound+1``. Otherwise, it is updated with new values. The method can be used in the following way: .. code-block:: python >>> from pysat.card import ITotalizer >>> with ITotalizer(lits=[1, 2], ubound=1) as t1: ... print t1.cnf.clauses [[-2, 3], [-1, 3], [-1, -2, 4]] ... print t1.rhs [3, 4] ... ... t2 = ITotalizer(lits=[5, 6], ubound=1) ... print t1.cnf.clauses [[-6, 7], [-5, 7], [-5, -6, 8]] ... print t1.rhs [7, 8] ... ... t1.merge_with(t2) ... print t1.cnf.clauses [[-2, 3], [-1, 3], [-1, -2, 4], [-6, 7], [-5, 7], [-5, -6, 8], [-7, 9], [-8, 10], [-3, 9], [-4, 10], [-3, -7, 10]] ... print t1.cnf.clauses[-t1.nof_new:] [[-6, 7], [-5, 7], [-5, -6, 8], [-7, 9], [-8, 10], [-3, 9], [-4, 10], [-3, -7, 10]] ... print t1.rhs [9, 10] ... ... t2.delete()
638
def _prep_datum(self, datum, dialect, col, needs_conversion): if datum is None or (needs_conversion and not str(datum).strip()): return pytype = self.columns[col][] if needs_conversion: if pytype == datetime.datetime: datum = dateutil.parser.parse(datum) elif pytype == bool: datum = th.coerce_to_specific(datum) if dialect.startswith(): datum = 1 if datum else 0 else: datum = pytype(str(datum)) if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date): if dialect in self._datetime_format: return datum.strftime(self._datetime_format[dialect]) else: return "" % datum elif hasattr(datum, ): return "" % datum.replace("'") else: return datum
Puts a value in proper format for a SQL string
639
def add_graph(patterns, G): if not patterns: patterns.append([G]) return for i, graphs in enumerate(patterns): if networkx.is_isomorphic(graphs[0], G, node_match=type_match, edge_match=type_match): patterns[i].append(G) return patterns.append([G])
Add a graph to a set of unique patterns.
640
def exists(self): self_object = self.query.filter_by(id=self.id).first() if self_object is None: return False return True
Checks if item already exists in database
641
def _get_size(size, size_max, size_min, default_max, default_min): if len(default_max) != len(default_min): raise ValueError( .format(str(default_max), str(default_min)) + ) if size is not None: if (size_max is not None) or (size_min is not None): raise ValueError( .format(size, size_max, size_min)) else: if size_max is None: size_max = default_max if size_min is None: size_min = default_min size = np.array([np.random.uniform(size_min[i], size_max[i]) for i in range(len(default_max))]) return size
Helper method for providing a size, or a range to randomize from
642
def fit(self, X, y=None, **kwargs): self.k_scores_ = [] self.k_timers_ = [] if self.locate_elbow: self.elbow_value_ = None self.elbow_score_ = None for k in self.k_values_: start = time.time() self.estimator.set_params(n_clusters=k) self.estimator.fit(X) self.k_timers_.append(time.time() - start) self.k_scores_.append( self.scoring_metric(X, self.estimator.labels_) ) if self.locate_elbow: locator_kwargs = { : {: , : }, : {: , : }, : {: , : }, }.get(self.metric, {}) elbow_locator = KneeLocator(self.k_values_,self.k_scores_,**locator_kwargs) self.elbow_value_ = elbow_locator.knee if self.elbow_value_ == None: warning_message=\ "No or point detected, " \ "pass `locate_elbow=False` to remove the warning" warnings.warn(warning_message,YellowbrickWarning) else: self.elbow_score_ = self.k_scores_[self.k_values_.index(self.elbow_value_)] self.draw() return self
Fits n KMeans models where n is the length of ``self.k_values_``, storing the silhouette scores in the ``self.k_scores_`` attribute. The "elbow" and silhouette score corresponding to it are stored in ``self.elbow_value`` and ``self.elbow_score`` respectively. This method finishes up by calling draw to create the plot.
643
def to_string(cls, error_code): if error_code == cls.RTT_ERROR_CONTROL_BLOCK_NOT_FOUND: return return super(JLinkRTTErrors, cls).to_string(error_code)
Returns the string message for the given error code. Args: cls (JLinkRTTErrors): the ``JLinkRTTErrors`` class error_code (int): error code to convert Returns: An error string corresponding to the error code. Raises: ValueError: if the error code is invalid.
644
def _prep_cnv_file(cns_file, svcaller, work_dir, data): in_file = cns_file out_file = os.path.join(work_dir, "%s-%s-prep.csv" % (utils.splitext_plus(os.path.basename(in_file))[0], svcaller)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: with open(tx_out_file, "w") as out_handle: reader = csv.reader(in_handle, dialect="excel-tab") writer = csv.writer(out_handle) writer.writerow(["chrom", "start", "end", "num.mark", "seg.mean"]) header = next(reader) for line in reader: cur = dict(zip(header, line)) if chromhacks.is_autosomal(cur["chromosome"]): writer.writerow([_to_ucsc_style(cur["chromosome"]), cur["start"], cur["end"], cur["probes"], cur["log2"]]) return out_file
Create a CSV file of CNV calls with log2 and number of marks.
645
def information_coefficient(total1,total2,intersect): total = total1 + total2 return 2.0*len(intersect) / total
a simple jacaard (information coefficient) to compare two lists of overlaps/diffs
646
def _submit(self): user_prompt_issueI want to do the thing.record_asciinema/tmp/helpme.93o__nt5.jsonrecord_environment self.authenticate() title = "HelpMe UserVoice Ticket: %s" %(self.run_id) body = self.data[] envars = self.data.get() if envars not in [None, , []]: body += for envar in envars: body += %(envar[0], envar[1]) asciinema = self.data.get() if asciinema not in [None, ]: url = upload_asciinema(asciinema) if url is not None: body += "\n\nAsciinema Recording: %s" %url body += "\ngenerated by HelpMe: https://vsoch.github.io/helpme/" self.post_ticket(title, body)
submit a uservoice ticket. When we get here we should have: {'user_prompt_issue': 'I want to do the thing.', 'record_asciinema': '/tmp/helpme.93o__nt5.json', 'record_environment': ((1,1),(2,2)...(N,N))} Required Client Variables self.api_key self.api_secret self.subdomain self.email
647
async def stop(self): self.stopped = True self.loop_event.set() await self.stopped_event.wait()
Stop heartbeat.
648
def data_directory(self): return expand_path(self.get(property_name=, environment_variable=, configuration_option=, default= if is_root() else ))
The absolute pathname of the directory where pip-accel's data files are stored (a string). - Environment variable: ``$PIP_ACCEL_CACHE`` - Configuration option: ``data-directory`` - Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
649
def add_track(self, *args, **kwargs): new_track = Track(*args, **kwargs) self.tracks = self.tracks + [new_track]
Add a track to a position. Parameters ---------- track_type: string The type of track to add (e.g. "heatmap", "line") position: string One of 'top', 'bottom', 'center', 'left', 'right' tileset: hgflask.tilesets.Tileset The tileset to be plotted in this track server: string The server serving this track height: int The height of the track, if it is a top, bottom or a center track width: int The width of the track, if it is a left, right or a center track
650
def makedoetree(ddict, bdict): dlist = list(ddict.keys()) blist = list(bdict.keys()) dlist.sort() blist.sort() doesnot = lst = [] for num in range(0, len(blist)): if bdict[blist[num]] == doesnot: lst = lst + [blist[num]] doedict = {} for num in range(0, len(lst)): doedict[lst[num]] = {} lv1list = list(doedict.keys()) lv1list.sort() for i in range(0, len(lv1list)): walllist = [] adict = doedict[lv1list[i]] for num in range(0, len(blist)): if bdict[blist[num]] == lv1list[i]: walllist = walllist + [blist[num]] for j in range(0, len(walllist)): adict[walllist[j]] = {} for i in range(0, len(lv1list)): adict1 = doedict[lv1list[i]] walllist = list(adict1.keys()) walllist.sort() for j in range(0, len(walllist)): windlist = [] adict2 = adict1[walllist[j]] for num in range(0, len(blist)): if bdict[blist[num]] == walllist[j]: windlist = windlist + [blist[num]] for k in range(0, len(windlist)): adict2[windlist[k]] = {} return doedict
makedoetree
651
def take_along_axis(large_array, indexes): if len(large_array.shape) > len(indexes.shape): indexes = indexes.reshape(indexes.shape + tuple([1] * (len(large_array.shape) - len(indexes.shape)))) return np.take_along_axis(large_array, indexes, axis=0)
Take along axis
652
def reshape(self, shape: tf.TensorShape) -> : s shape. Returns: A TensorFluent wrapping the reshape operation. ' t = tf.reshape(self.tensor, shape) scope = self.scope.as_list() batch = self.batch return TensorFluent(t, scope, batch=batch)
Returns a TensorFluent for the reshape operation with given `shape`. Args: shape: The output's shape. Returns: A TensorFluent wrapping the reshape operation.
653
def _candidate_merges(self, f): candidates = [self._get_feature(f.id)] c = self.conn.cursor() results = c.execute( constants._SELECT + , (f.id,) ) for i in results: candidates.append( feature.Feature(dialect=self.iterator.dialect, **i)) return list(set(candidates))
Identifies those features that originally had the same ID as `f` (according to the id_spec), but were modified because of duplicate IDs.
654
def get_displays_params(self) -> str: output, error = self._execute( , self.device_sn, , , , ) return output
Show displays parameters.
655
def make_PCEExtension_for_prebuilding_Code( name, Code, prebuild_sources, srcdir, downloads=None, **kwargs): import glob from .dist import PCEExtension build_files = [] dist_files = [(os.path.join(srcdir, x[0]), x[1]) for x in getattr(Code, , [])] for attr in (, ): for cf in getattr(Code, attr, []) or []: if not cf.startswith(): build_files.append(os.path.join(srcdir, cf)) dist_files.append((os.path.join(srcdir, cf), None)) def prebuilder(build_temp, ext_fullpath, ext, src_paths, **prebuilder_kwargs): build_temp = os.path.abspath(build_temp) if not os.path.isdir(build_temp): make_dirs(build_temp) if downloads: websrc, src_md5 = downloads download_dir = os.path.join(build_temp, srcdir) if not os.path.isdir(download_dir): make_dirs(download_dir) download_files(websrc, src_md5.keys(), src_md5, cwd=download_dir, logger=ext.logger) for p in src_paths: if p not in build_files: copy(os.path.join(srcdir, p), os.path.join(build_temp, srcdir), dest_is_dir=True, create_dest_dirs=True, only_update=ext.only_update, logger=ext.logger) dst = os.path.abspath(os.path.join( os.path.dirname(ext_fullpath), )) make_dirs(dst, logger=ext.logger) objs = compile_sources( [os.path.join(srcdir, x) for x in src_paths], destdir=dst, cwd=build_temp, metadir=dst, only_update=True, logger=ext.logger, **prebuilder_kwargs) glb = os.path.join(ext_fullpath, ) dist_files.extend(glob.glob(glb)) for obj in objs: copy(os.path.join(build_temp, obj), dst, dest_is_dir=True, create_dest_dirs=True, only_update=ext.only_update, logger=ext.logger) return objs compile_kwargs = Code.compile_kwargs.copy() logger = kwargs.pop(, True) compile_kwargs.update(kwargs) return PCEExtension( name, [], build_files=build_files, dist_files=dist_files, build_callbacks=[ ( prebuilder, (prebuild_sources,), compile_kwargs ), ], logger=logger, link_ext=False )
If subclass of codeexport.Generic_Code needs to have some of it sources compiled to objects and cached in a `prebuilt/` directory at invocation of `setup.py build_ext` this convenience function makes setting up a PCEExtension easier. Use together with cmdclass = {'build_ext': pce_build_ext}. files called ".metadata*" will be added to dist_files
656
def node(self, name): nodes = self.nodes(path=name) return next(node for node in nodes)
Gets a single node from PuppetDB. :param name: The name of the node search. :type name: :obj:`string` :return: An instance of Node :rtype: :class:`pypuppetdb.types.Node`
657
def cli(conf): if conf: if not os.path.isfile(conf): raise click.exceptions.BadParameter("{} is not a file".format(conf)) try: config.conf.load_config(config_path=conf) except exceptions.ConfigurationException as e: raise click.exceptions.BadParameter(str(e)) twisted_observer = legacy_twisted_log.PythonLoggingObserver() twisted_observer.start() config.conf.setup_logging()
The fedora-messaging command line interface.
658
def display_waypoints(self): from MAVProxy.modules.mavproxy_map import mp_slipmap self.mission_list = self.module().wploader.view_list() polygons = self.module().wploader.polygon_list() self.map.add_object(mp_slipmap.SlipClearLayer()) for i in range(len(polygons)): p = polygons[i] if len(p) > 1: items = [MPMenuItem(, returnkey=), MPMenuItem(, returnkey=), MPMenuItem(, returnkey=), MPMenuItem(, returnkey=), ] popup = MPMenuSubMenu(, items) self.map.add_object(mp_slipmap.SlipPolygon( % i, p, layer=, linewidth=2, colour=(255,255,255), arrow = self.map_settings.showdirection, popup_menu=popup)) labeled_wps = {} self.map.add_object(mp_slipmap.SlipClearLayer()) for i in range(len(self.mission_list)): next_list = self.mission_list[i] for j in range(len(next_list)): if (next_list[j] not in labeled_wps): label = self.label_for_waypoint(next_list[j]) colour = self.colour_for_wp(next_list[j]) self.map.add_object(mp_slipmap.SlipLabel( % (i,j), polygons[i][j], label, , colour=colour)) if (self.map_settings.loitercircle and self.module().wploader.wp_is_loiter(next_list[j])): wp = self.module().wploader.wp(next_list[j]) if wp.command != mavutil.mavlink.MAV_CMD_NAV_LOITER_TO_ALT and wp.param3 != 0: loiter_rad = wp.param3 elif wp.command == mavutil.mavlink.MAV_CMD_NAV_LOITER_TO_ALT and wp.param2 != 0: loiter_rad = wp.param2 else: loiter_rad = self.get_mav_param() self.map.add_object(mp_slipmap.SlipCircle( % (next_list[j] + 1), , polygons[i][j], loiter_rad, (255, 255, 255), 2, arrow = self.map_settings.showdirection)) labeled_wps[next_list[j]] = (i,j)
display the waypoints
659
def _canonicalize(self, filename): path, ext = os.path.splitext(filename) if not ext: ext = ".collection" return path + ext
Use .collection as extension unless provided
660
def resource(self, uri, methods=frozenset({}), host=None, strict_slashes=None, stream=False, version=None, name=None, **kwargs): if strict_slashes is None: strict_slashes = self.strict_slashes def decorator(handler): self.resources.append(( FutureRoute(handler, uri, methods, host, strict_slashes, stream, version, name), kwargs)) return handler return decorator
Create a blueprint resource route from a decorated function. :param uri: endpoint at which the route will be accessible. :param methods: list of acceptable HTTP methods. :param host: :param strict_slashes: :param version: :param name: user defined route name for url_for :return: function or class instance Accepts any keyword argument that will be passed to the app resource.
661
def add_to_item_list(self, item_urls, item_list_url): item_list_url = str(item_list_url) name = self.get_item_list(item_list_url).name() return self.add_to_item_list_by_name(item_urls, name)
Instruct the server to add the given items to the specified Item List :type item_urls: List or ItemGroup :param item_urls: List of URLs for the items to add, or an ItemGroup object :type item_list_url: String or ItemList :param item_list_url: the URL of the list to which to add the items, or an ItemList object :rtype: String :returns: the server success message, if successful :raises: APIError if the request was not successful
662
def do_handle_log(self, workunit, level, *msg_elements): entry_info = { : self._log_level_str[level], : self._render_messages(*msg_elements), } root_id = str(workunit.root().id) current_stack = self._root_id_to_workunit_stack[root_id] if current_stack: current_stack[-1][].append(entry_info) else: self.results[root_id][].append(entry_info)
Implementation of Reporter callback.
663
def get_allowed_methods(self): return ", ".join([method for method in dir(self) if method.upper() == method and callable(getattr(self, method))])
Returns a coma-separated list of method names that are allowed on this instance. Useful to set the ``Allowed`` response header.
664
def sunset_utc(self, date, latitude, longitude, observer_elevation=0): try: return self._calc_time(90 + 0.833, SUN_SETTING, date, latitude, longitude, observer_elevation) except ValueError as exc: if exc.args[0] == "math domain error": raise AstralError( ("Sun never reaches the horizon on this day, " "at this location.") ) else: raise
Calculate sunset time in the UTC timezone. :param date: Date to calculate for. :type date: :class:`datetime.date` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :param observer_elevation: Elevation in metres to calculate sunset for :type observer_elevation: int :return: The UTC date and time at which sunset occurs. :rtype: :class:`~datetime.datetime`
665
def Call(self, Id=0): o = Call(self, Id) o.Status return o
Queries a call object. :Parameters: Id : int Call identifier. :return: Call object. :rtype: `call.Call`
666
def get_item_type_id_from_identifier(self, identifier, item_types=None): if item_types is None: item_types = ItemType.objects.get_all_types() identifier_type, _ = identifier.split() item_types = [it for it in item_types.values() if it[].endswith(identifier_type)] if len(item_types) > 1: raise Exception(.format(identifier_type)) if len(item_types) == 0: raise Exception(.format(identifier_type)) return item_types[0][]
Get an ID of item type for the given identifier. Identifier is a string of the following form: <model_prefix>/<model_identifier> where <model_prefix> is any suffix of database table of the given model which uniquely specifies the table, and <model_identifier> is identifier of the object. Args: identifier (str): item identifier item_types (dict): ID -> item type JSON Returns: int: ID of the corresponding item type
667
def default(cls) -> : if cls._default_cache is not None: return cls._default_cache if pkg_resources.resource_exists(__name__, ): import lzma with pkg_resources.resource_stream(__name__, ) as f: with lzma.open(f, "rt") as g: cls._default_cache = PrecalculatedTextMeasurer.from_json( cast(TextIO, g)) return cls._default_cache elif pkg_resources.resource_exists(__name__, ): with pkg_resources.resource_stream(__name__, ) as f: cls._default_cache = PrecalculatedTextMeasurer.from_json( io.TextIOWrapper(f, encoding=)) return cls._default_cache else: raise ValueError()
Returns a reasonable default PrecalculatedTextMeasurer.
668
def get_legacy_storage_path(self): config_dir = os.path.dirname( self.py3_wrapper.config.get("i3status_config_path", "/tmp") ) storage_path = os.path.join(config_dir, "py3status.data") if os.path.exists(storage_path): return storage_path else: return None
Detect and return existing legacy storage path.
669
def BSearchCeil(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos < hi else -1
Returns lowest i such as a[i] >= x, or -1 if x > all elements in a So, if x is in between two elements in a, this function will return the index of the higher element, hence "Ceil". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search
670
def from_computed_structure_entry(entry, miller_index, label=None, adsorbates=None, clean_entry=None, **kwargs): return SlabEntry(entry.structure, entry.energy, miller_index, label=label, adsorbates=adsorbates, clean_entry=clean_entry, **kwargs)
Returns SlabEntry from a ComputedStructureEntry
671
def write(self, node, filehandle): dictexporter = self.dictexporter or DictExporter() data = dictexporter.export(node) return json.dump(data, filehandle, **self.kwargs)
Write JSON to `filehandle` starting at `node`.
672
def _unpack_oxm_field(self): field_int = self.oxm_field_and_mask >> 1 if self.oxm_class == OxmClass.OFPXMC_OPENFLOW_BASIC: return OxmOfbMatchField(field_int) return field_int
Unpack oxm_field from oxm_field_and_mask. Returns: :class:`OxmOfbMatchField`, int: oxm_field from oxm_field_and_mask. Raises: ValueError: If oxm_class is OFPXMC_OPENFLOW_BASIC but :class:`OxmOfbMatchField` has no such integer value.
673
def _run_play(self, play): self.callbacks.on_play_start(play.name) if not self.inventory.list_hosts(play.hosts): self.callbacks.on_no_hosts_matched() return True self._do_setup_step(play) all_hosts = self._list_available_hosts(play.hosts) play.update_vars_files(all_hosts) serialized_batch = [] if play.serial <= 0: serialized_batch = [all_hosts] else: while len(all_hosts) > 0: play_hosts = [] for x in range(play.serial): if len(all_hosts) > 0: play_hosts.append(all_hosts.pop()) serialized_batch.append(play_hosts) for on_hosts in serialized_batch: self.inventory.also_restrict_to(on_hosts) for task in play.tasks(): should_run = False for x in self.only_tags: for y in task.tags: if (x==y): should_run = True break if should_run: if not self._run_task(play, task, False): return False host_list = self._list_available_hosts(play.hosts) if not host_list: self.callbacks.on_no_hosts_remaining() return False for handler in play.handlers(): if len(handler.notified_by) > 0: self.inventory.restrict_to(handler.notified_by) self._run_task(play, handler, True) self.inventory.lift_restriction() handler.notified_by = [] self.inventory.lift_also_restriction() return True
run a list of tasks for a given pattern, in order
674
def augmentation_transform(self, data, label): for aug in self.auglist: data, label = aug(data, label) return (data, label)
Override Transforms input data with specified augmentations.
675
def activate(self, target=None, **options): log.debug("initiator options: {0}".format(options)) self.did = options.get(, None) self.nad = options.get(, None) self.gbi = options.get(, )[0:48] self.brs = min(max(0, options.get(, 2)), 2) self.lri = min(max(0, options.get(, 3)), 3) if self._acm is None or in options: self._acm = bool(options.get(, True)) assert self.did is None or 0 <= self.did <= 255 assert self.nad is None or 0 <= self.nad <= 255 ppi = (self.lri << 4) | (bool(self.gbi) << 1) | int(bool(self.nad)) did = 0 if self.did is None else self.did atr_req = ATR_REQ(os.urandom(10), did, 0, 0, ppi, self.gbi) psl_req = PSL_REQ(did, (0, 9, 18)[self.brs], self.lri) atr_res = psl_res = None self.target = target if self.target is None and self.acm is True: log.debug("searching active communication mode target at 106A") tg = nfc.clf.RemoteTarget("106A", atr_req=atr_req.encode()) try: self.target = self.clf.sense(tg, iterations=2, interval=0.1) except nfc.clf.UnsupportedTargetError: self._acm = False except nfc.clf.CommunicationError: pass else: if self.target: atr_res = ATR_RES.decode(self.target.atr_res) else: self._acm = None if self.target is None: log.debug("searching passive communication mode target at 106A") target = nfc.clf.RemoteTarget("106A") target = self.clf.sense(target, iterations=2, interval=0.1) if target and target.sel_res and bool(target.sel_res[0] & 0x40): self.target = target if self.target is None and self.brs > 0: log.debug("searching passive communication mode target at 212F") target = nfc.clf.RemoteTarget("212F", sensf_req=b) target = self.clf.sense(target, iterations=2, interval=0.1) if target and target.sensf_res.startswith(b): atr_req.nfcid3 = target.sensf_res[1:9] + b self.target = target if self.target and self.target.atr_res is None: try: atr_res = self.send_req_recv_res(atr_req, 1.0) except nfc.clf.CommunicationError: pass if atr_res is None: log.debug("NFC-DEP Attribute Request failed") return None if self.target and atr_res: if self.brs > (, , ).index(self.target.brty): try: psl_res = self.send_req_recv_res(psl_req, 0.1) except nfc.clf.CommunicationError: pass if psl_res is None: log.debug("NFC-DEP Parameter Selection failed") return None self.target.brty = (, )[self.brs-1] self.rwt = (4096/13.56E6 * 2**(atr_res.wt if atr_res.wt < 15 else 14)) self.miu = (atr_res.lr-3 - int(self.did is not None) - int(self.nad is not None)) self.gbt = atr_res.gb self.pni = 0 log.info("running as " + str(self)) return self.gbt
Activate DEP communication with a target.
676
def initialize(self, params, repetition): self.name = params["name"] self.dataDir = params.get("datadir", "data") self.seed = params.get("seed", 42) + repetition torch.manual_seed(self.seed) np.random.seed(self.seed) random.seed(self.seed) self.epochs = params.get("epochs", 1) self.batch_size = params.get("batch_size", 16) self.batches_in_epoch = params.get("batches_in_epoch", 60000) self.first_epoch_batch_size = params.get("first_epoch_batch_size", self.batch_size) self.batches_in_first_epoch = params.get("batches_in_first_epoch", self.batches_in_epoch) self.test_batch_size = params.get("test_batch_size", 1000) self.optimizer_class = eval(params.get("optimizer", "torch.optim.SGD")) self.optimizer_params = eval(params.get("optimizer_params", "{}")) self.lr_scheduler_class = eval(params.get("lr_scheduler", None)) self.lr_scheduler_params = eval(params.get("lr_scheduler_params", "{}")) self.loss_function = eval(params.get("loss_function", "torch.nn.functional.nll_loss")) c, h, w = map(int, params.get("input_shape", "1_28_28").split("_")) self.in_channels = c self.out_channels = map(int, params.get("out_channels", "30_30").split("_")) self.kernel_size = map(int, params.get("kernel_size", "5_5").split("_")) self.stride = map(int, params.get("stride", "1_1").split("_")) self.padding = map(int, params.get("padding", "0_0").split("_")) self.maxpool = [] self.maxpool.append( ((w + 2 * self.padding[0] - self.kernel_size[0]) // self.stride[0] + 1) // 2) self.maxpool.append( ((self.maxpool[0] + 2 * self.padding[1] - self.kernel_size[1]) // self.stride[1] + 1) // 2) self.cnn_output_len = [self.maxpool[i] * self.maxpool[i] * self.out_channels[i] for i in range(len(self.maxpool))] self.n = params.get("n", 1000) self.output_size = params.get("output_size", 10) if "c1_k" in params: self.cnn_k = map(int, params["c1_k"].split("_")) else: self.cnn_k = self.cnn_output_len self.k = params.get("k", self.n) self.k_inference_factor = params.get("k_inference_factor", 1.0) self.boost_strength = params.get("boost_strength", 1.0) self.boost_strength_factor = params.get("boost_strength_factor", 1.0) self.weight_sparsity = params.get("weight_sparsity", 1.0) self.weight_sparsity_cnn = params.get("weight_sparsity_cnn", 1.0)
Initialize experiment parameters and default values from configuration file
677
def water_self_diffusion_coefficient(T=None, units=None, warn=True, err_mult=None): if units is None: K = 1 m = 1 s = 1 else: K = units.Kelvin m = units.meter s = units.second if T is None: T = 298.15*K _D0 = D0 * m**2 * s**-1 _TS = TS * K if err_mult is not None: _dD0 = dD0 * m**2 * s**-1 _dTS = dTS * K _D0 += err_mult[0]*_dD0 _TS += err_mult[1]*_dTS if warn and (_any(T < low_t_bound*K) or _any(T > high_t_bound*K)): warnings.warn("Temperature is outside range (0-100 degC)") return _D0*((T/_TS) - 1)**gamma
Temperature-dependent self-diffusion coefficient of water. Parameters ---------- T : float Temperature (default: in Kelvin) units : object (optional) object with attributes: Kelvin, meter, kilogram warn : bool (default: True) Emit UserWarning when outside temperature range. err_mult : length 2 array_like (default: None) Perturb paramaters D0 and TS with err_mult[0]*dD0 and err_mult[1]*dTS respectively, where dD0 and dTS are the reported uncertainties in the fitted paramters. Useful for estimating error in diffusion coefficient. References ---------- Temperature-dependent self-diffusion coefficients of water and six selected molecular liquids for calibration in accurate 1H NMR PFG measurements Manfred Holz, Stefan R. Heila, Antonio Saccob; Phys. Chem. Chem. Phys., 2000,2, 4740-4742 http://pubs.rsc.org/en/Content/ArticleLanding/2000/CP/b005319h DOI: 10.1039/B005319H
678
def map_names(lang="en"): cache_name = "map_names.%s.json" % lang data = get_cached("map_names.json", cache_name, params=dict(lang=lang)) return dict([(item["id"], item["name"]) for item in data])
This resource returns an dictionary of the localized map names for the specified language. Only maps with events are listed - if you need a list of all maps, use ``maps.json`` instead. :param lang: The language to query the names for. :return: the response is a dictionary where the key is the map id and the value is the name of the map in the specified language.
679
def get_data(self, environment_title_or_num=-1, frequency=None): if isinstance(environment_title_or_num, int): environment_title = tuple(self._raw_environments.keys())[environment_title_or_num] else: environment_title = environment_title_or_num if environment_title not in self._dfs: raise ValueError(f"No environment named {environment_title}. Available environments: {tuple(self._dfs)}.") environment_dfs = self._dfs[environment_title] if frequency is None: for frequency in FREQUENCIES: if environment_dfs[frequency] is not None: break if frequency not in FREQUENCIES: raise ValueError(f"Unknown frequency: {frequency}. Available frequencies: {FREQUENCIES}") return self._dfs[environment_title][frequency]
Parameters ---------- environment_title_or_num frequency: 'str', default None 'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period' If None, will look for the smallest frequency of environment.
680
def log_likelihood(self): ll = GP.log_likelihood(self) jacobian = self.warping_function.fgrad_y(self.Y_untransformed) return ll + np.log(jacobian).sum()
Notice we add the jacobian of the warping function here.
681
def step_random_processes(oscillators): if not rand.prob_bool(0.01): return amp_bias_weights = [(0.001, 1), (0.1, 100), (0.15, 40), (1, 0)] num_moves = iching.get_hexagram() % len(oscillators) for i in range(num_moves): pair = [gram % len(oscillators) for gram in iching.get_hexagram()] amplitudes = [(gram / 64) * rand.weighted_rand(amp_bias_weights) for gram in iching.get_hexagram()] oscillators[pair[0]].amplitude.drift_target = amplitudes[0] oscillators[pair[1]].amplitude.drift_target = amplitudes[1]
Args: oscillators (list): A list of oscillator.Oscillator objects to operate on Returns: None
682
def fit(self, x, y=None, batch_size=32, nb_epoch=10, validation_data=None, distributed=True): if distributed: if isinstance(x, np.ndarray) and isinstance(y, np.ndarray): training_data = to_sample_rdd(x, y) if validation_data: validation_data = to_sample_rdd(*validation_data) elif (isinstance(x, RDD) or isinstance(x, DataSet)) and not y: training_data = x else: raise TypeError("Unsupported training data type: %s" % type(x)) callBigDlFunc(self.bigdl_type, "fit", self.value, training_data, batch_size, nb_epoch, validation_data) else: if validation_data: val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])] val_y = JTensor.from_ndarray(validation_data[1]) else: val_x, val_y = None, None callBigDlFunc(self.bigdl_type, "fit", self.value, [JTensor.from_ndarray(x) for x in to_list(x)], JTensor.from_ndarray(y), batch_size, nb_epoch, val_x, val_y, multiprocessing.cpu_count())
Train a model for a fixed number of epochs on a dataset. # Arguments x: Input data. A Numpy array or RDD of Sample or Image DataSet. y: Labels. A Numpy array. Default is None if x is already RDD of Sample or Image DataSet. batch_size: Number of samples per gradient update. nb_epoch: Number of iterations to train. validation_data: Tuple (x_val, y_val) where x_val and y_val are both Numpy arrays. Or RDD of Sample. Default is None if no validation is involved. distributed: Boolean. Whether to train the model in distributed mode or local mode. Default is True. In local mode, x and y must both be Numpy arrays.
683
def get(self, key, default=None, as_int=False, setter=None): if as_int: val = uwsgi.cache_num(key, self.name) else: val = decode(uwsgi.cache_get(key, self.name)) if val is None: if setter is None: return default val = setter(key) if val is None: return default self.set(key, val) return val
Gets a value from the cache. :param str|unicode key: The cache key to get value for. :param default: Value to return if none found in cache. :param bool as_int: Return 64bit number instead of str. :param callable setter: Setter callable to automatically set cache value if not already cached. Required to accept a key and return a value that will be cached. :rtype: str|unicode|int
684
def delete_state_definition(self, process_id, wit_ref_name, state_id): route_values = {} if process_id is not None: route_values[] = self._serialize.url(, process_id, ) if wit_ref_name is not None: route_values[] = self._serialize.url(, wit_ref_name, ) if state_id is not None: route_values[] = self._serialize.url(, state_id, ) self._send(http_method=, location_id=, version=, route_values=route_values)
DeleteStateDefinition. [Preview API] Removes a state definition in the work item type of the process. :param str process_id: ID of the process :param str wit_ref_name: The reference name of the work item type :param str state_id: ID of the state
685
def single_page_members(self, page_number=1): url = % (self.dismiss_url, page_number) html = self.request(url).text soup = BeautifulSoup(html) members_html = soup.find(id=) if not members_html: return [] def get_tag_string(html, class_, tag=, n=0): return html.find_all(tag, class_=class_)[n].get_text().strip() members = [] for member_html in members_html.find_all(, class_=): _id = member_html.attrs[] try: user_url = member_html.find_all(, class_= )[0].find().attrs[] username = self.get_username( + user_url) except Exception as e: logger.exception(e) username = try: nickname = get_tag_string(member_html, , ) except Exception as e: logger.exception(e) nickname = username try: role = member_html.find_all(, class_= )[0].find_all(, class_= )[0].get_text().strip() except IndexError: role = except Exception as e: logger.exception(e) role = member = { : int(_id), : username, : nickname, : role, : int(get_tag_string(member_html, )), : int(get_tag_string(member_html, )), : float(get_tag_string(member_html, ).split()[0]), : get_tag_string(member_html, ) != , : get_tag_string(member_html, , n=1) != , } members.append(member) return members
获取单个页面内的小组成员信息 :param page_number: 页码 :return: 包含小组成员信息的列表 返回值示例: :: [{ 'id': 123, # member_id 'username': 'jim', # username 'nickname': 'Jim', # 昵称 'role': u'小组长', # 身份 'points': 1234, # 贡献成长值 'days': 100, # 组龄 'rate': 99.9, # 打卡率 'checked_yesterday': True, # 昨天是否打卡 'checked': False, # 今天是否打卡 }, { # ... }]
686
def show_fabric_trunk_info_input_rbridge_id(self, **kwargs): config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info input = ET.SubElement(show_fabric_trunk_info, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
687
def vertices(self): if self._faces is None: if self._vertices is None: return None self.triangulate() return self._vertices
Return an array (Nf, 3) of vertices. If only faces exist, the function computes the vertices and returns them. If no vertices or faces are specified, the function returns None.
688
def _get_event_and_context(self, event, arg_type): eid = _choose_id(event, arg_type) ev = self.concept_dict[eid] concept, metadata = self._make_concept(ev) ev_delta = {: [], : get_states(ev), : get_polarity(ev)} context = self._make_context(ev) event_obj = Event(concept, delta=ev_delta, context=context) return event_obj
Return an INDRA Event based on an event entry.
689
def _format_params(self, type_, params): if in params: initial_state = params[] if isinstance(initial_state, Mapping): initial_state_list = [3]*self.properties[] low = -1 if type_ == else 0 for v, val in initial_state.items(): if val == 3: continue if val <= 0: initial_state_list[v] = low else: initial_state_list[v] = 1 params[] = initial_state_list
Reformat some of the parameters for sapi.
690
def version(self) -> Optional[str]: if self._version is None: self._version = self._parser.get_http_version() return self._version
获取 http 版本
691
def get_biome_color_based_on_elevation(world, elev, x, y, rng): s rgb value, potentially modifying the noise based on elevation, and finally incorporating this with the base biome color. The basic rules regarding noise generation are: - Oceans have no noise added - land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value - land tiles with high elevations further modify the noise by set amounts (to drain some of the color and make the map look more like mountains) The biome v = world.biome_at((x, y)).name() biome_color = _biome_satellite_colors[v] noise = (0, 0, 0) if world.is_land((x, y)): noise = rng.randint(-NOISE_RANGE, NOISE_RANGE, size=3) if elev > HIGH_MOUNTAIN_ELEV: noise = add_colors(noise, HIGH_MOUNTAIN_NOISE_MODIFIER) biome_color = average_colors(biome_color, MOUNTAIN_COLOR) elif elev > HIGH_HILL_ELEV: noise = add_colors(noise, HIGH_HILL_NOISE_MODIFIER) elif elev > HILL_ELEV: noise = add_colors(noise, HILL_NOISE_MODIFIER) modification_amount = int(elev / BASE_ELEVATION_INTENSITY_MODIFIER) base_elevation_modifier = (modification_amount, modification_amount, modification_amount) this_tile_color = add_colors(biome_color, noise, base_elevation_modifier) return this_tile_color
This is the "business logic" for determining the base biome color in satellite view. This includes generating some "noise" at each spot in a pixel's rgb value, potentially modifying the noise based on elevation, and finally incorporating this with the base biome color. The basic rules regarding noise generation are: - Oceans have no noise added - land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value - land tiles with high elevations further modify the noise by set amounts (to drain some of the color and make the map look more like mountains) The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough. Finally, the noise plus the biome color are added and returned. rng refers to an instance of a random number generator used to draw the random samples needed by this function.
692
def anonymous_login(self): self._LOG.debug("Attempting Anonymous login") self._pre_login() self.username = None self.login_key = None message = MsgProto(EMsg.ClientLogon) message.header.steamid = SteamID(type=, universe=) message.body.protocol_version = 65579 self.send(message) resp = self.wait_msg(EMsg.ClientLogOnResponse, timeout=30) return EResult(resp.body.eresult) if resp else EResult.Fail
Login as anonymous user :return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_ :rtype: :class:`.EResult`
693
def _draw_circle(self, pos_x, pos_y, radius, depth, stroke_width=1., fill_color=None, border_color=None, from_angle=0., to_angle=2 * pi): visible = False if not self.point_outside_view((pos_x, pos_y)): visible = True if not visible: for i in range(0, 8): angle = 2 * pi / 8. * i x = pos_x + cos(angle) * radius y = pos_y + sin(angle) * radius if not self.point_outside_view((x, y)): visible = True break if not visible: return False angle_sum = to_angle - from_angle if angle_sum < 0: angle_sum = float(to_angle + 2 * pi - from_angle) segments = self.pixel_to_size_ratio() * radius * 1.5 segments = max(4, segments) segments = int(round(segments * angle_sum / (2. * pi))) types = [] if fill_color is not None: types.append(GL_POLYGON) if border_color is not None: types.append(GL_LINE_LOOP) for type in types: if type == GL_POLYGON: fill_color.set() else: self._set_closest_stroke_width(stroke_width) border_color.set() glBegin(type) angle = from_angle for i in range(0, segments): x = pos_x + cos(angle) * radius y = pos_y + sin(angle) * radius glVertex3f(x, y, depth) angle += angle_sum / (segments - 1) if angle > 2 * pi: angle -= 2 * pi if i == segments - 2: angle = to_angle glEnd() return True
Draws a circle Draws a circle with a line segment a desired position with desired size. :param float pos_x: Center x position :param float pos_y: Center y position :param float depth: The Z layer :param float radius: Radius of the circle
694
def delete_message(self, messageid="", folderid="", stackid=""): if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req(, post_data={ : folderid, : messageid, : stackid }) return response
Delete a message or a message stack :param folderid: The folder to delete the message from, defaults to inbox :param messageid: The message to delete :param stackid: The stack to delete
695
def _GetStringValue(self, data_dict, name, default_value=None): values = data_dict.get(name, None) if not values: return default_value for index, value in enumerate(values): if in value: values[index] = .format(value) return .join(values)
Retrieves a specific string value from the data dict. Args: data_dict (dict[str, list[str]): values per name. name (str): name of the value to retrieve. default_value (Optional[object]): value to return if the name has no value set in data_dict. Returns: str: value represented as a string.
696
def parse_orgtable(lines): def parseline(l): w = l.split()[1:-1] return [wi.strip() for wi in w] columns = parseline(lines[0]) data = [] for line in lines[2:]: data.append(map(str, parseline(line))) dataframe = _pd.DataFrame(data=data, columns=columns) dataframe.set_index("RunNo") return dataframe
Parse an org-table (input as a list of strings split by newline) into a Pandas data frame. Parameters ---------- lines : string an org-table input as a list of strings split by newline Returns ------- dataframe : pandas.DataFrame A data frame containing the org-table's data
697
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]: return strings_to_(strings, lambda x: int(float(x)))
Convert a list of strings to a list of integers. :param strings: a list of string :return: a list of converted integers .. doctest:: >>> strings_to_integers(['1', '1.0', '-0.2']) [1, 1, 0]
698
def multipoint(self, points): shapeType = MULTIPOINT points = [points] self._shapeparts(parts=points, shapeType=shapeType)
Creates a MULTIPOINT shape. Points is a list of xy values.
699
def is_step_visible(self, step): return self.idempotent_dict.get(step, True) or \ step not in self.storage.validated_step_data
Returns whether the given `step` should be included in the wizard; it is included if either the form is idempotent or not filled in before.