code
stringlengths
64
7.01k
docstring
stringlengths
2
15.8k
text
stringlengths
144
19.2k
#vtb def get_configuration_dict(self, secret_attrs=False): cd = super(TaxonomicAmendmentsShard, self).get_configuration_dict(secret_attrs=secret_attrs) cd[] = cd.pop() cd[] = cd.pop() if self._next_ott_id is not None: cd[] = self._next_ott_id, return cd
Overrides superclass method and renames some properties
### Input: Overrides superclass method and renames some properties ### Response: #vtb def get_configuration_dict(self, secret_attrs=False): cd = super(TaxonomicAmendmentsShard, self).get_configuration_dict(secret_attrs=secret_attrs) cd[] = cd.pop() cd[] = cd.pop() if self._next_ott_id is not None: cd[] = self._next_ott_id, return cd
#vtb def disconnect(cls): app = AndroidApplication.instance() f = app.create_future() def on_permission_result(result): if not result: f.set_result(None) return def on_ready(mgr): mgr.disconnect().then(f.set_result) WifiManager.get().then(on_ready) WifiManager.request_permission([ WifiManager.PERMISSION_CHANGE_WIFI_STATE ]).then(on_permission_result) return f
Disconnect from the current network (if connected). Returns -------- result: future A future that resolves to true if the disconnect was successful. Will be set to None if the change network permission is denied.
### Input: Disconnect from the current network (if connected). Returns -------- result: future A future that resolves to true if the disconnect was successful. Will be set to None if the change network permission is denied. ### Response: #vtb def disconnect(cls): app = AndroidApplication.instance() f = app.create_future() def on_permission_result(result): if not result: f.set_result(None) return def on_ready(mgr): mgr.disconnect().then(f.set_result) WifiManager.get().then(on_ready) WifiManager.request_permission([ WifiManager.PERMISSION_CHANGE_WIFI_STATE ]).then(on_permission_result) return f
#vtb def _wire_events(self): self._device.on_open += self._on_open self._device.on_close += self._on_close self._device.on_read += self._on_read self._device.on_write += self._on_write self._zonetracker.on_fault += self._on_zone_fault self._zonetracker.on_restore += self._on_zone_restore
Wires up the internal device events.
### Input: Wires up the internal device events. ### Response: #vtb def _wire_events(self): self._device.on_open += self._on_open self._device.on_close += self._on_close self._device.on_read += self._on_read self._device.on_write += self._on_write self._zonetracker.on_fault += self._on_zone_fault self._zonetracker.on_restore += self._on_zone_restore
#vtb def refetch_fields(self, missing_fields): db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields}) self._fetched_fields += tuple(missing_fields) if not db_fields: return for k, v in db_fields.items(): self[k] = v
Refetches a list of fields from the DB
### Input: Refetches a list of fields from the DB ### Response: #vtb def refetch_fields(self, missing_fields): db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields}) self._fetched_fields += tuple(missing_fields) if not db_fields: return for k, v in db_fields.items(): self[k] = v
#vtb def _force_https(self): if self.session_cookie_secure: if not self.app.debug: self.app.config[] = True criteria = [ self.app.debug, flask.request.is_secure, flask.request.headers.get(, ) == , ] local_options = self._get_local_options() if local_options[] and not any(criteria): if flask.request.url.startswith(): url = flask.request.url.replace(, , 1) code = 302 if self.force_https_permanent: code = 301 r = flask.redirect(url, code=code) return r
Redirect any non-https requests to https. Based largely on flask-sslify.
### Input: Redirect any non-https requests to https. Based largely on flask-sslify. ### Response: #vtb def _force_https(self): if self.session_cookie_secure: if not self.app.debug: self.app.config[] = True criteria = [ self.app.debug, flask.request.is_secure, flask.request.headers.get(, ) == , ] local_options = self._get_local_options() if local_options[] and not any(criteria): if flask.request.url.startswith(): url = flask.request.url.replace(, , 1) code = 302 if self.force_https_permanent: code = 301 r = flask.redirect(url, code=code) return r
#vtb def create(self, dataset_id): from google.cloud.bigquery import Dataset if self.exists(dataset_id): raise DatasetCreationError( "Dataset {0} already " "exists".format(dataset_id) ) dataset = Dataset(self.client.dataset(dataset_id)) if self.location is not None: dataset.location = self.location try: self.client.create_dataset(dataset) except self.http_error as ex: self.process_http_error(ex)
Create a dataset in Google BigQuery Parameters ---------- dataset : str Name of dataset to be written
### Input: Create a dataset in Google BigQuery Parameters ---------- dataset : str Name of dataset to be written ### Response: #vtb def create(self, dataset_id): from google.cloud.bigquery import Dataset if self.exists(dataset_id): raise DatasetCreationError( "Dataset {0} already " "exists".format(dataset_id) ) dataset = Dataset(self.client.dataset(dataset_id)) if self.location is not None: dataset.location = self.location try: self.client.create_dataset(dataset) except self.http_error as ex: self.process_http_error(ex)
#vtb def _create_session(self): logger.debug("Create new phantomjs web driver") self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap, **self.driver_args) self.set_cookies(self.current_cookies) self.driver.set_window_size(1920, 1080)
Creates a fresh session with no/default headers and proxies
### Input: Creates a fresh session with no/default headers and proxies ### Response: #vtb def _create_session(self): logger.debug("Create new phantomjs web driver") self.driver = webdriver.PhantomJS(desired_capabilities=self.dcap, **self.driver_args) self.set_cookies(self.current_cookies) self.driver.set_window_size(1920, 1080)
#vtb def compact(self) -> str: doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version, len(self.issuers), len(self.inputs), len(self.unlocks), len(self.outputs), if self.comment != "" else , self.locktime) if self.version >= 3: doc += "{0}\n".format(self.blockstamp) for pubkey in self.issuers: doc += "{0}\n".format(pubkey) for i in self.inputs: doc += "{0}\n".format(i.inline(self.version)) for u in self.unlocks: doc += "{0}\n".format(u.inline()) for o in self.outputs: doc += "{0}\n".format(o.inline()) if self.comment != "": doc += "{0}\n".format(self.comment) for s in self.signatures: doc += "{0}\n".format(s) return doc
Return a transaction in its compact format from the instance :return:
### Input: Return a transaction in its compact format from the instance :return: ### Response: #vtb def compact(self) -> str: doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version, len(self.issuers), len(self.inputs), len(self.unlocks), len(self.outputs), if self.comment != "" else , self.locktime) if self.version >= 3: doc += "{0}\n".format(self.blockstamp) for pubkey in self.issuers: doc += "{0}\n".format(pubkey) for i in self.inputs: doc += "{0}\n".format(i.inline(self.version)) for u in self.unlocks: doc += "{0}\n".format(u.inline()) for o in self.outputs: doc += "{0}\n".format(o.inline()) if self.comment != "": doc += "{0}\n".format(self.comment) for s in self.signatures: doc += "{0}\n".format(s) return doc
#vtb def query(url, output=True, **kwargs): key1=val1&key2=val2<xml>somecontent</xml> if output is not True: log.warning() if not in kwargs: kwargs[] = opts = __opts__.copy() if in kwargs: opts.update(kwargs[]) del kwargs[] ret = salt.utils.http.query(url=url, opts=opts, **kwargs) return ret
Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: CLI Example: .. code-block:: bash salt-run http.query http://somelink.com/ salt-run http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt-run http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>'
### Input: Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: CLI Example: .. code-block:: bash salt-run http.query http://somelink.com/ salt-run http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt-run http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' ### Response: #vtb def query(url, output=True, **kwargs): key1=val1&key2=val2<xml>somecontent</xml> if output is not True: log.warning() if not in kwargs: kwargs[] = opts = __opts__.copy() if in kwargs: opts.update(kwargs[]) del kwargs[] ret = salt.utils.http.query(url=url, opts=opts, **kwargs) return ret
#vtb def _parse_tree(self, node): if in node.attrib: self.kind = node.attrib[] if in node.attrib: self.width = int(node.attrib[]) if in node.attrib: self.height = int(node.attrib[]) self.url = node.text
Parse a <image> object
### Input: Parse a <image> object ### Response: #vtb def _parse_tree(self, node): if in node.attrib: self.kind = node.attrib[] if in node.attrib: self.width = int(node.attrib[]) if in node.attrib: self.height = int(node.attrib[]) self.url = node.text
#vtb def set_data(self, adjacency_mat=None, **kwargs): if adjacency_mat is not None: if adjacency_mat.shape[0] != adjacency_mat.shape[1]: raise ValueError("Adjacency matrix should be square.") self._adjacency_mat = adjacency_mat for k in self._arrow_attributes: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) setattr(self._edges, translated, kwargs.pop(k)) arrow_kwargs = {} for k in self._arrow_kwargs: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) arrow_kwargs[translated] = kwargs.pop(k) node_kwargs = {} for k in self._node_kwargs: if k in kwargs: translated = (self._node_kw_trans[k] if k in self._node_kw_trans else k) node_kwargs[translated] = kwargs.pop(k) if len(kwargs) > 0: raise TypeError("%s.set_data() got invalid keyword arguments: %S" % (self.__class__.__name__, list(kwargs.keys()))) self._arrow_data = arrow_kwargs self._node_data = node_kwargs if not self._animate: self.set_final_layout()
Set the data Parameters ---------- adjacency_mat : ndarray | None The adjacency matrix. **kwargs : dict Keyword arguments to pass to the arrows.
### Input: Set the data Parameters ---------- adjacency_mat : ndarray | None The adjacency matrix. **kwargs : dict Keyword arguments to pass to the arrows. ### Response: #vtb def set_data(self, adjacency_mat=None, **kwargs): if adjacency_mat is not None: if adjacency_mat.shape[0] != adjacency_mat.shape[1]: raise ValueError("Adjacency matrix should be square.") self._adjacency_mat = adjacency_mat for k in self._arrow_attributes: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) setattr(self._edges, translated, kwargs.pop(k)) arrow_kwargs = {} for k in self._arrow_kwargs: if k in kwargs: translated = (self._arrow_kw_trans[k] if k in self._arrow_kw_trans else k) arrow_kwargs[translated] = kwargs.pop(k) node_kwargs = {} for k in self._node_kwargs: if k in kwargs: translated = (self._node_kw_trans[k] if k in self._node_kw_trans else k) node_kwargs[translated] = kwargs.pop(k) if len(kwargs) > 0: raise TypeError("%s.set_data() got invalid keyword arguments: %S" % (self.__class__.__name__, list(kwargs.keys()))) self._arrow_data = arrow_kwargs self._node_data = node_kwargs if not self._animate: self.set_final_layout()
#vtb def free(self): data = WebDavXmlUtils.create_free_space_request_content() response = self.execute_request(action=, path=, data=data) return WebDavXmlUtils.parse_free_space_response(response.content, self.webdav.hostname)
Returns an amount of free space on remote WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND :return: an amount of free space in bytes.
### Input: Returns an amount of free space on remote WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND :return: an amount of free space in bytes. ### Response: #vtb def free(self): data = WebDavXmlUtils.create_free_space_request_content() response = self.execute_request(action=, path=, data=data) return WebDavXmlUtils.parse_free_space_response(response.content, self.webdav.hostname)
#vtb def _on_message(channel, method, header, body): print "Message:" print "\t%r" % method print "\t%r" % header print "\t%r" % body channel.basic_ack(method.delivery_tag) channel.stop_consuming()
Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver method: The Deliver method :param pika.Spec.BasicProperties properties: The client properties :param str|unicode body: The message body
### Input: Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver method: The Deliver method :param pika.Spec.BasicProperties properties: The client properties :param str|unicode body: The message body ### Response: #vtb def _on_message(channel, method, header, body): print "Message:" print "\t%r" % method print "\t%r" % header print "\t%r" % body channel.basic_ack(method.delivery_tag) channel.stop_consuming()
#vtb def generate_screenshots(self): headers = {: , : } resp = requests.post(self.api_url, data=json.dumps(self.config), \ headers=headers, auth=self.auth) resp = self._process_response(resp) return resp.json()
Take a config file as input and generate screenshots
### Input: Take a config file as input and generate screenshots ### Response: #vtb def generate_screenshots(self): headers = {: , : } resp = requests.post(self.api_url, data=json.dumps(self.config), \ headers=headers, auth=self.auth) resp = self._process_response(resp) return resp.json()
#vtb def get_endpoint(self, session, **kwargs): if self.endpoint is None: try: self._refresh_tokens(session) self._fetch_credentials(session) except: raise AuthorizationFailure() return self.endpoint
Get the HubiC storage endpoint uri. If the current session has not been authenticated, this will trigger a new authentication to the HubiC OAuth service. :param keystoneclient.Session session: The session object to use for queries. :raises keystoneclient.exceptions.AuthorizationFailure: if something goes wrong. :returns: The uri to use for object-storage v1 requests. :rtype: string
### Input: Get the HubiC storage endpoint uri. If the current session has not been authenticated, this will trigger a new authentication to the HubiC OAuth service. :param keystoneclient.Session session: The session object to use for queries. :raises keystoneclient.exceptions.AuthorizationFailure: if something goes wrong. :returns: The uri to use for object-storage v1 requests. :rtype: string ### Response: #vtb def get_endpoint(self, session, **kwargs): if self.endpoint is None: try: self._refresh_tokens(session) self._fetch_credentials(session) except: raise AuthorizationFailure() return self.endpoint
#vtb def _create_variables_no_pretrain(self, n_features): self.encoding_w_ = [] self.encoding_b_ = [] for l, layer in enumerate(self.layers): w_name = .format(l) b_name = .format(l) if l == 0: w_shape = [n_features, self.layers[l]] else: w_shape = [self.layers[l - 1], self.layers[l]] w_init = tf.truncated_normal(shape=w_shape, stddev=0.1) W = tf.Variable(w_init, name=w_name) tf.summary.histogram(w_name, W) self.encoding_w_.append(W) b_init = tf.constant(0.1, shape=[self.layers[l]]) b = tf.Variable(b_init, name=b_name) tf.summary.histogram(b_name, b) self.encoding_b_.append(b)
Create model variables (no previous unsupervised pretraining). :param n_features: number of features :return: self
### Input: Create model variables (no previous unsupervised pretraining). :param n_features: number of features :return: self ### Response: #vtb def _create_variables_no_pretrain(self, n_features): self.encoding_w_ = [] self.encoding_b_ = [] for l, layer in enumerate(self.layers): w_name = .format(l) b_name = .format(l) if l == 0: w_shape = [n_features, self.layers[l]] else: w_shape = [self.layers[l - 1], self.layers[l]] w_init = tf.truncated_normal(shape=w_shape, stddev=0.1) W = tf.Variable(w_init, name=w_name) tf.summary.histogram(w_name, W) self.encoding_w_.append(W) b_init = tf.constant(0.1, shape=[self.layers[l]]) b = tf.Variable(b_init, name=b_name) tf.summary.histogram(b_name, b) self.encoding_b_.append(b)
#vtb def individual(self, ind_id=None): for ind_obj in self.individuals: if ind_obj.ind_id == ind_id: return ind_obj return None
Return a individual object Args: ind_id (str): A individual id Returns: individual (puzzle.models.individual)
### Input: Return a individual object Args: ind_id (str): A individual id Returns: individual (puzzle.models.individual) ### Response: #vtb def individual(self, ind_id=None): for ind_obj in self.individuals: if ind_obj.ind_id == ind_id: return ind_obj return None
#vtb async def request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None) -> dict: url = .format(API_URL_SCAFFOLD, endpoint) if not headers: headers = {} headers.update({: self._api_key}) if not params: params = {} params.update({ : self.latitude, : self.longitude, : self.altitude }) async with self._websession.request(method, url, headers=headers, params=params) as resp: try: resp.raise_for_status() return await resp.json(content_type=None) except client_exceptions.ClientError as err: if any(code in str(err) for code in (, )): raise InvalidApiKeyError() raise RequestError( .format( endpoint, err)) from None
Make a request against air-matters.com.
### Input: Make a request against air-matters.com. ### Response: #vtb async def request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None) -> dict: url = .format(API_URL_SCAFFOLD, endpoint) if not headers: headers = {} headers.update({: self._api_key}) if not params: params = {} params.update({ : self.latitude, : self.longitude, : self.altitude }) async with self._websession.request(method, url, headers=headers, params=params) as resp: try: resp.raise_for_status() return await resp.json(content_type=None) except client_exceptions.ClientError as err: if any(code in str(err) for code in (, )): raise InvalidApiKeyError() raise RequestError( .format( endpoint, err)) from None
#vtb def p_expr_BAND_expr(p): p[0] = make_binary(p.lineno(2), , p[1], p[3], lambda x, y: x & y)
expr : expr BAND expr
### Input: expr : expr BAND expr ### Response: #vtb def p_expr_BAND_expr(p): p[0] = make_binary(p.lineno(2), , p[1], p[3], lambda x, y: x & y)
#vtb def attribute_rewrite_map(self): rewrite_map = dict() token_rewrite_map = self.generate_attribute_token_rewrite_map() for attribute_name, type_instance in self.getmembers(): if isinstance(type_instance, DataType): attribute_tokens = attribute_name.split() rewritten_attribute_name = for token in attribute_tokens: rewritten_attribute_name += token_rewrite_map[token] + "_" rewritten_attribute_name = rewritten_attribute_name[:-1] rewrite_map[attribute_name] = rewritten_attribute_name return rewrite_map
Example: long_name -> a_b :return: the rewrite map :rtype: dict
### Input: Example: long_name -> a_b :return: the rewrite map :rtype: dict ### Response: #vtb def attribute_rewrite_map(self): rewrite_map = dict() token_rewrite_map = self.generate_attribute_token_rewrite_map() for attribute_name, type_instance in self.getmembers(): if isinstance(type_instance, DataType): attribute_tokens = attribute_name.split() rewritten_attribute_name = for token in attribute_tokens: rewritten_attribute_name += token_rewrite_map[token] + "_" rewritten_attribute_name = rewritten_attribute_name[:-1] rewrite_map[attribute_name] = rewritten_attribute_name return rewrite_map
#vtb def delete(self, request, key): request.DELETE = http.QueryDict(request.body) email_addr = request.DELETE.get() user_id = request.DELETE.get() if not email_addr: return http.HttpResponseBadRequest() try: email = EmailAddressValidation.objects.get(address=email_addr, user_id=user_id) except EmailAddressValidation.DoesNotExist: pass else: email.delete() return http.HttpResponse(status=204) try: email = EmailAddress.objects.get(address=email_addr, user_id=user_id) except EmailAddress.DoesNotExist: raise http.Http404 email.user = None email.save() return http.HttpResponse(status=204)
Remove an email address, validated or not.
### Input: Remove an email address, validated or not. ### Response: #vtb def delete(self, request, key): request.DELETE = http.QueryDict(request.body) email_addr = request.DELETE.get() user_id = request.DELETE.get() if not email_addr: return http.HttpResponseBadRequest() try: email = EmailAddressValidation.objects.get(address=email_addr, user_id=user_id) except EmailAddressValidation.DoesNotExist: pass else: email.delete() return http.HttpResponse(status=204) try: email = EmailAddress.objects.get(address=email_addr, user_id=user_id) except EmailAddress.DoesNotExist: raise http.Http404 email.user = None email.save() return http.HttpResponse(status=204)
#vtb def join_room(self, room_id_or_alias): if not room_id_or_alias: raise MatrixError("No alias or room ID to join.") path = "/join/%s" % quote(room_id_or_alias) return self._send("POST", path)
Performs /join/$room_id Args: room_id_or_alias (str): The room ID or room alias to join.
### Input: Performs /join/$room_id Args: room_id_or_alias (str): The room ID or room alias to join. ### Response: #vtb def join_room(self, room_id_or_alias): if not room_id_or_alias: raise MatrixError("No alias or room ID to join.") path = "/join/%s" % quote(room_id_or_alias) return self._send("POST", path)
#vtb def private_download_url(self, url, expires=3600): deadline = int(time.time()) + expires if in url: url += else: url += url = .format(url, str(deadline)) token = self.token(url) return .format(url, token)
生成私有资源下载链接 Args: url: 私有空间资源的原始URL expires: 下载凭证有效期,默认为3600s Returns: 私有资源的下载链接
### Input: 生成私有资源下载链接 Args: url: 私有空间资源的原始URL expires: 下载凭证有效期,默认为3600s Returns: 私有资源的下载链接 ### Response: #vtb def private_download_url(self, url, expires=3600): deadline = int(time.time()) + expires if in url: url += else: url += url = .format(url, str(deadline)) token = self.token(url) return .format(url, token)
#vtb def read(self): line = self.trace_file.readline() if line == : if self.loop: self._reopen_file() else: self.trace_file.close() self.trace_file = None raise DataSourceError() message = JsonFormatter.deserialize(line) timestamp = message.get(, None) if self.realtime and timestamp is not None: self._store_timestamp(timestamp) self._wait(self.starting_time, self.first_timestamp, timestamp) return line + "\x00"
Read a line of data from the input source at a time.
### Input: Read a line of data from the input source at a time. ### Response: #vtb def read(self): line = self.trace_file.readline() if line == : if self.loop: self._reopen_file() else: self.trace_file.close() self.trace_file = None raise DataSourceError() message = JsonFormatter.deserialize(line) timestamp = message.get(, None) if self.realtime and timestamp is not None: self._store_timestamp(timestamp) self._wait(self.starting_time, self.first_timestamp, timestamp) return line + "\x00"
#vtb def create(self, req, **kwargs): response = requests.post(self.url, json=req, **self.req_args()) return self.parse_response(response)
Uses POST to send a first metadata statement signing request to a signing service. :param req: The metadata statement that the entity wants signed :return: returns a dictionary with 'sms' and 'loc' as keys.
### Input: Uses POST to send a first metadata statement signing request to a signing service. :param req: The metadata statement that the entity wants signed :return: returns a dictionary with 'sms' and 'loc' as keys. ### Response: #vtb def create(self, req, **kwargs): response = requests.post(self.url, json=req, **self.req_args()) return self.parse_response(response)
#vtb def _to_graph(self, contexts): prev = None for context in contexts: if prev is None: prev = context continue yield prev[0], context[1], context[0] prev = context
This is an iterator that returns each edge of our graph with its two nodes
### Input: This is an iterator that returns each edge of our graph with its two nodes ### Response: #vtb def _to_graph(self, contexts): prev = None for context in contexts: if prev is None: prev = context continue yield prev[0], context[1], context[0] prev = context
#vtb def to_igraph(self, weighted=None): return ig
Converts this Graph object to an igraph-compatible object. Requires the python-igraph library.
### Input: Converts this Graph object to an igraph-compatible object. Requires the python-igraph library. ### Response: #vtb def to_igraph(self, weighted=None): return ig
#vtb def doi(self, doi, only_message=True): request_url = build_url_endpoint( .join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( , request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: return result = result.json() return result[] if only_message is True else result
This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'}
### Input: This method retrieve the DOI metadata related to a given DOI number. args: Crossref DOI id (String) return: JSON Example: >>> from crossref.restful import Works >>> works = Works() >>> works.doi('10.1590/S0004-28032013005000001') {'is-referenced-by-count': 6, 'reference-count': 216, 'DOI': '10.1590/s0004-28032013005000001', 'subtitle': [], 'issued': {'date-parts': [[2013, 4, 19]]}, 'source': 'Crossref', 'short-container-title': ['Arq. Gastroenterol.'], 'references-count': 216, 'short-title': [], 'deposited': {'timestamp': 1495911725000, 'date-time': '2017-05-27T19:02:05Z', 'date-parts': [[2017, 5, 27]]}, 'ISSN': ['0004-2803'], 'type': 'journal-article', 'URL': 'http://dx.doi.org/10.1590/s0004-28032013005000001', 'indexed': {'timestamp': 1496034748592, 'date-time': '2017-05-29T05:12:28Z', 'date-parts': [[2017, 5, 29]]}, 'content-domain': {'crossmark-restriction': False, 'domain': []}, 'created': {'timestamp': 1374613284000, 'date-time': '2013-07-23T21:01:24Z', 'date-parts': [[2013, 7, 23]]}, 'issn-type': [{'value': '0004-2803', 'type': 'electronic'}], 'page': '81-96', 'volume': '50', 'original-title': [], 'subject': ['Gastroenterology'], 'relation': {}, 'container-title': ['Arquivos de Gastroenterologia'], 'member': '530', 'prefix': '10.1590', 'published-print': {'date-parts': [[2013, 4, 19]]}, 'title': ['3rd BRAZILIAN CONSENSUS ON Helicobacter pylori'], 'publisher': 'FapUNIFESP (SciELO)', 'alternative-id': ['S0004-28032013000200081'], 'abstract': '<jats:p>Significant abstract data..... .</jats:p>', 'author': [{'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Coelho', 'given': 'Luiz Gonzaga'}, {'affiliation': [ {'name': 'Universidade Federal do Rio Grande do Sul, Brazil'}], 'family': 'Maguinilk', 'given': 'Ismael'}, {'affiliation': [ {'name': 'Presidente de Honra do Núcleo Brasileiro para Estudo do Helicobacter, Brazil'}], 'family': 'Zaterka', 'given': 'Schlioma'}, {'affiliation': [ {'name': 'Universidade Federal do Piauí, Brasil'}], 'family': 'Parente', 'given': 'José Miguel'}, {'affiliation': [{'name': 'Universidade Federal de Minas Gerais, BRAZIL'}], 'family': 'Passos', 'given': 'Maria do Carmo Friche'}, {'affiliation': [ {'name': 'Universidade de São Paulo, Brasil'}], 'family': 'Moraes-Filho', 'given': 'Joaquim Prado P.'}], 'score': 1.0, 'issue': '2'} ### Response: #vtb def doi(self, doi, only_message=True): request_url = build_url_endpoint( .join([self.ENDPOINT, doi]) ) request_params = {} result = self.do_http_request( , request_url, data=request_params, custom_header=str(self.etiquette) ) if result.status_code == 404: return result = result.json() return result[] if only_message is True else result
#vtb def zonal_stats(raster, vector): output_layer_name = zonal_stats_steps[] exposure = raster.keywords[] if raster.crs().authid() != vector.crs().authid(): layer = reproject(vector, raster.crs()) output_layer = create_memory_layer( output_layer_name, vector.geometryType(), vector.crs(), vector.fields() ) copy_layer(vector, output_layer) else: layer = create_memory_layer( output_layer_name, vector.geometryType(), vector.crs(), vector.fields() ) copy_layer(vector, layer) input_band = layer.keywords.get(, 1) analysis = QgsZonalStatistics( layer, raster, , input_band, QgsZonalStatistics.Sum) result = analysis.calculateStatistics(None) LOGGER.debug(tr( % (raster.source(), result))) output_field = exposure_count_field[] % exposure if raster.crs().authid() != vector.crs().authid(): output_layer.startEditing() field = create_field_from_definition( exposure_count_field, exposure) output_layer.addAttribute(field) new_index = output_layer.fields().lookupField(field.name()) old_index = layer.fields().lookupField() for feature_input, feature_output in zip( layer.getFeatures(), output_layer.getFeatures()): output_layer.changeAttributeValue( feature_input.id(), new_index, feature_input[old_index]) output_layer.commitChanges() layer = output_layer else: fields_to_rename = { : output_field } if qgis_version() >= 21600: rename_fields(layer, fields_to_rename) else: copy_fields(layer, fields_to_rename) remove_fields(layer, list(fields_to_rename.keys())) layer.commitChanges() layer.startEditing() request = QgsFeatureRequest() expression = % output_field request.setFilterExpression(expression) request.setFlags(QgsFeatureRequest.NoGeometry) index = layer.fields().lookupField(output_field) for feature in layer.getFeatures(): if feature[output_field] is None: layer.changeAttributeValue(feature.id(), index, 0) layer.commitChanges() layer.keywords = raster.keywords.copy() layer.keywords[] = vector.keywords[].copy() layer.keywords[] = ( raster.keywords[].copy()) key = exposure_count_field[] % raster.keywords[] layer.keywords[][key] = output_field layer.keywords[][total_field[]] = output_field layer.keywords[] = raster.keywords.copy() layer.keywords[] = vector.keywords[ ].copy() layer.keywords[] = ( vector.keywords[]) layer.keywords[] = ( layer_purpose_aggregate_hazard_impacted[]) layer.keywords[] = output_layer_name check_layer(layer) return layer
Reclassify a continuous raster layer. Issue https://github.com/inasafe/inasafe/issues/3190 The algorithm will take care about projections. We don't want to reproject the raster layer. So if CRS are different, we reproject the vector layer and then we do a lookup from the reprojected layer to the original vector layer. :param raster: The raster layer. :type raster: QgsRasterLayer :param vector: The vector layer. :type vector: QgsVectorLayer :return: The output of the zonal stats. :rtype: QgsVectorLayer .. versionadded:: 4.0
### Input: Reclassify a continuous raster layer. Issue https://github.com/inasafe/inasafe/issues/3190 The algorithm will take care about projections. We don't want to reproject the raster layer. So if CRS are different, we reproject the vector layer and then we do a lookup from the reprojected layer to the original vector layer. :param raster: The raster layer. :type raster: QgsRasterLayer :param vector: The vector layer. :type vector: QgsVectorLayer :return: The output of the zonal stats. :rtype: QgsVectorLayer .. versionadded:: 4.0 ### Response: #vtb def zonal_stats(raster, vector): output_layer_name = zonal_stats_steps[] exposure = raster.keywords[] if raster.crs().authid() != vector.crs().authid(): layer = reproject(vector, raster.crs()) output_layer = create_memory_layer( output_layer_name, vector.geometryType(), vector.crs(), vector.fields() ) copy_layer(vector, output_layer) else: layer = create_memory_layer( output_layer_name, vector.geometryType(), vector.crs(), vector.fields() ) copy_layer(vector, layer) input_band = layer.keywords.get(, 1) analysis = QgsZonalStatistics( layer, raster, , input_band, QgsZonalStatistics.Sum) result = analysis.calculateStatistics(None) LOGGER.debug(tr( % (raster.source(), result))) output_field = exposure_count_field[] % exposure if raster.crs().authid() != vector.crs().authid(): output_layer.startEditing() field = create_field_from_definition( exposure_count_field, exposure) output_layer.addAttribute(field) new_index = output_layer.fields().lookupField(field.name()) old_index = layer.fields().lookupField() for feature_input, feature_output in zip( layer.getFeatures(), output_layer.getFeatures()): output_layer.changeAttributeValue( feature_input.id(), new_index, feature_input[old_index]) output_layer.commitChanges() layer = output_layer else: fields_to_rename = { : output_field } if qgis_version() >= 21600: rename_fields(layer, fields_to_rename) else: copy_fields(layer, fields_to_rename) remove_fields(layer, list(fields_to_rename.keys())) layer.commitChanges() layer.startEditing() request = QgsFeatureRequest() expression = % output_field request.setFilterExpression(expression) request.setFlags(QgsFeatureRequest.NoGeometry) index = layer.fields().lookupField(output_field) for feature in layer.getFeatures(): if feature[output_field] is None: layer.changeAttributeValue(feature.id(), index, 0) layer.commitChanges() layer.keywords = raster.keywords.copy() layer.keywords[] = vector.keywords[].copy() layer.keywords[] = ( raster.keywords[].copy()) key = exposure_count_field[] % raster.keywords[] layer.keywords[][key] = output_field layer.keywords[][total_field[]] = output_field layer.keywords[] = raster.keywords.copy() layer.keywords[] = vector.keywords[ ].copy() layer.keywords[] = ( vector.keywords[]) layer.keywords[] = ( layer_purpose_aggregate_hazard_impacted[]) layer.keywords[] = output_layer_name check_layer(layer) return layer
#vtb def guard_submit(analysis): if not analysis.getResult(): return False for interim in analysis.getInterimFields(): if not interim.get("value", ""): return False if not analysis.getAttachment(): if analysis.getAttachmentOption() == : return False if IRequestAnalysis.providedBy(analysis): point_of_capture = analysis.getPointOfCapture() if point_of_capture == "lab" and not analysis.isSampleReceived(): return False if point_of_capture == "field" and not analysis.isSampleSampled(): return False if not analysis.bika_setup.getAllowToSubmitNotAssigned(): if not user_has_super_roles(): if not analysis.getAnalyst(): return False if analysis.getAnalyst() != api.get_current_user().getId(): return False for dependency in analysis.getDependencies(): if not is_submitted_or_submittable(dependency): return False return True
Return whether the transition "submit" can be performed or not
### Input: Return whether the transition "submit" can be performed or not ### Response: #vtb def guard_submit(analysis): if not analysis.getResult(): return False for interim in analysis.getInterimFields(): if not interim.get("value", ""): return False if not analysis.getAttachment(): if analysis.getAttachmentOption() == : return False if IRequestAnalysis.providedBy(analysis): point_of_capture = analysis.getPointOfCapture() if point_of_capture == "lab" and not analysis.isSampleReceived(): return False if point_of_capture == "field" and not analysis.isSampleSampled(): return False if not analysis.bika_setup.getAllowToSubmitNotAssigned(): if not user_has_super_roles(): if not analysis.getAnalyst(): return False if analysis.getAnalyst() != api.get_current_user().getId(): return False for dependency in analysis.getDependencies(): if not is_submitted_or_submittable(dependency): return False return True
#vtb def get_argument_parser(): desc = parser = cli.get_argument_parser(desc=desc) g = parser.add_argument_group() g.add_argument( , , type=cli.str_type, required=True, metavar=cli.file_mv, help= ) g.add_argument( , , type=cli.str_type, required=True, metavar=cli.file_mv, help= ) g.add_argument( , , type=cli.str_type, metavar=cli.str_mv, default=, help= ) cli.add_reporting_args(parser) return parser
Create the argument parser for the script. Parameters ---------- Returns ------- `argparse.ArgumentParser` The arguemnt parser.
### Input: Create the argument parser for the script. Parameters ---------- Returns ------- `argparse.ArgumentParser` The arguemnt parser. ### Response: #vtb def get_argument_parser(): desc = parser = cli.get_argument_parser(desc=desc) g = parser.add_argument_group() g.add_argument( , , type=cli.str_type, required=True, metavar=cli.file_mv, help= ) g.add_argument( , , type=cli.str_type, required=True, metavar=cli.file_mv, help= ) g.add_argument( , , type=cli.str_type, metavar=cli.str_mv, default=, help= ) cli.add_reporting_args(parser) return parser
#vtb def deferred(timeout=None): reactor, reactor_thread = threaded_reactor() if reactor is None: raise ImportError("twisted is not available or could not be imported") try: timeout is None or timeout + 0 except TypeError: raise TypeError(" argument must be a number or None") def decorate(func): def wrapper(*args, **kargs): q = Queue() def callback(value): q.put(None) def errback(failure): try: failure.raiseException() except: q.put(sys.exc_info()) def g(): try: d = func(*args, **kargs) try: d.addCallbacks(callback, errback) except AttributeError: raise TypeError("you must return a twisted Deferred " "from your test case!") except: q.put(sys.exc_info()) reactor.callFromThread(g) try: error = q.get(timeout=timeout) except Empty: raise TimeExpired("timeout expired before end of test (%f s.)" % timeout) if error is not None: exc_type, exc_value, tb = error raise exc_type, exc_value, tb wrapper = make_decorator(func)(wrapper) return wrapper return decorate
By wrapping a test function with this decorator, you can return a twisted Deferred and the test will wait for the deferred to be triggered. The whole test function will run inside the Twisted event loop. The optional timeout parameter specifies the maximum duration of the test. The difference with timed() is that timed() will still wait for the test to end, while deferred() will stop the test when its timeout has expired. The latter is more desireable when dealing with network tests, because the result may actually never arrive. If the callback is triggered, the test has passed. If the errback is triggered or the timeout expires, the test has failed. Example:: @deferred(timeout=5.0) def test_resolve(): return reactor.resolve("www.python.org") Attention! If you combine this decorator with other decorators (like "raises"), deferred() must be called *first*! In other words, this is good:: @raises(DNSLookupError) @deferred() def test_error(): return reactor.resolve("xxxjhjhj.biz") and this is bad:: @deferred() @raises(DNSLookupError) def test_error(): return reactor.resolve("xxxjhjhj.biz")
### Input: By wrapping a test function with this decorator, you can return a twisted Deferred and the test will wait for the deferred to be triggered. The whole test function will run inside the Twisted event loop. The optional timeout parameter specifies the maximum duration of the test. The difference with timed() is that timed() will still wait for the test to end, while deferred() will stop the test when its timeout has expired. The latter is more desireable when dealing with network tests, because the result may actually never arrive. If the callback is triggered, the test has passed. If the errback is triggered or the timeout expires, the test has failed. Example:: @deferred(timeout=5.0) def test_resolve(): return reactor.resolve("www.python.org") Attention! If you combine this decorator with other decorators (like "raises"), deferred() must be called *first*! In other words, this is good:: @raises(DNSLookupError) @deferred() def test_error(): return reactor.resolve("xxxjhjhj.biz") and this is bad:: @deferred() @raises(DNSLookupError) def test_error(): return reactor.resolve("xxxjhjhj.biz") ### Response: #vtb def deferred(timeout=None): reactor, reactor_thread = threaded_reactor() if reactor is None: raise ImportError("twisted is not available or could not be imported") try: timeout is None or timeout + 0 except TypeError: raise TypeError(" argument must be a number or None") def decorate(func): def wrapper(*args, **kargs): q = Queue() def callback(value): q.put(None) def errback(failure): try: failure.raiseException() except: q.put(sys.exc_info()) def g(): try: d = func(*args, **kargs) try: d.addCallbacks(callback, errback) except AttributeError: raise TypeError("you must return a twisted Deferred " "from your test case!") except: q.put(sys.exc_info()) reactor.callFromThread(g) try: error = q.get(timeout=timeout) except Empty: raise TimeExpired("timeout expired before end of test (%f s.)" % timeout) if error is not None: exc_type, exc_value, tb = error raise exc_type, exc_value, tb wrapper = make_decorator(func)(wrapper) return wrapper return decorate
#vtb def invert(interval): interval.reverse() res = list(interval) interval.reverse() return res
Invert an interval. Example: >>> invert(['C', 'E']) ['E', 'C']
### Input: Invert an interval. Example: >>> invert(['C', 'E']) ['E', 'C'] ### Response: #vtb def invert(interval): interval.reverse() res = list(interval) interval.reverse() return res
#vtb def pos_tag_sents( sentences: List[List[str]], engine: str = "perceptron", corpus: str = "orchid" ) -> List[List[Tuple[str, str]]]: if not sentences: return [] return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]
Part of Speech tagging Sentence function. :param list sentences: a list of lists of tokenized words :param str engine: * unigram - unigram tagger * perceptron - perceptron tagger (default) * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles (default) * orchid_ud - annotated Thai academic articles using Universal Dependencies Tags * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is
### Input: Part of Speech tagging Sentence function. :param list sentences: a list of lists of tokenized words :param str engine: * unigram - unigram tagger * perceptron - perceptron tagger (default) * artagger - RDR POS tagger :param str corpus: * orchid - annotated Thai academic articles (default) * orchid_ud - annotated Thai academic articles using Universal Dependencies Tags * pud - Parallel Universal Dependencies (PUD) treebanks :return: returns a list of labels regarding which part of speech it is ### Response: #vtb def pos_tag_sents( sentences: List[List[str]], engine: str = "perceptron", corpus: str = "orchid" ) -> List[List[Tuple[str, str]]]: if not sentences: return [] return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]
#vtb def can_unsubscribe_from_topic(self, topic, user): return ( user.is_authenticated and topic.has_subscriber(user) and self._perform_basic_permission_check(topic.forum, user, ) )
Given a topic, checks whether the user can remove it from their subscription list.
### Input: Given a topic, checks whether the user can remove it from their subscription list. ### Response: #vtb def can_unsubscribe_from_topic(self, topic, user): return ( user.is_authenticated and topic.has_subscriber(user) and self._perform_basic_permission_check(topic.forum, user, ) )
#vtb def discover_setup_packages(): logger = logging.getLogger(__name__) import eups eups_client = eups.Eups() products = eups_client.getSetupProducts() packages = {} for package in products: name = package.name info = { : package.dir, : package.version } packages[name] = info logger.debug(.format( name=name, **info)) return packages
Summarize packages currently set up by EUPS, listing their set up directories and EUPS version names. Returns ------- packages : `dict` Dictionary with keys that are EUPS package names. Values are dictionaries with fields: - ``'dir'``: absolute directory path of the set up package. - ``'version'``: EUPS version string for package. Notes ----- This function imports the ``eups`` Python package, which is assumed to be available in the build environmen. This function is designed to encapsulate all direct EUPS interactions need by the stack documentation build process.
### Input: Summarize packages currently set up by EUPS, listing their set up directories and EUPS version names. Returns ------- packages : `dict` Dictionary with keys that are EUPS package names. Values are dictionaries with fields: - ``'dir'``: absolute directory path of the set up package. - ``'version'``: EUPS version string for package. Notes ----- This function imports the ``eups`` Python package, which is assumed to be available in the build environmen. This function is designed to encapsulate all direct EUPS interactions need by the stack documentation build process. ### Response: #vtb def discover_setup_packages(): logger = logging.getLogger(__name__) import eups eups_client = eups.Eups() products = eups_client.getSetupProducts() packages = {} for package in products: name = package.name info = { : package.dir, : package.version } packages[name] = info logger.debug(.format( name=name, **info)) return packages
#vtb def show_help(name): print(.format(name)) print() print() print() print() print() print() print(DEADBEEF DEADBEEF DEADBEEF DEADBEEF\) print(ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\) print()
Show help and basic usage
### Input: Show help and basic usage ### Response: #vtb def show_help(name): print(.format(name)) print() print() print() print() print() print() print(DEADBEEF DEADBEEF DEADBEEF DEADBEEF\) print(ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\) print()
#vtb def _back_compatible_gemini(conf_files, data): if vcfanno.is_human(data, builds=["37"]): for f in conf_files: if f and os.path.basename(f) == "gemini.conf" and os.path.exists(f): with open(f) as in_handle: for line in in_handle: if line.startswith("file"): fname = line.strip().split("=")[-1].replace(, ).strip() if fname.find(".tidy.") > 0: return install.get_gemini_dir(data) return None
Provide old install directory for configuration with GEMINI supplied tidy VCFs. Handles new style (bcbio installed) and old style (GEMINI installed) configuration and data locations.
### Input: Provide old install directory for configuration with GEMINI supplied tidy VCFs. Handles new style (bcbio installed) and old style (GEMINI installed) configuration and data locations. ### Response: #vtb def _back_compatible_gemini(conf_files, data): if vcfanno.is_human(data, builds=["37"]): for f in conf_files: if f and os.path.basename(f) == "gemini.conf" and os.path.exists(f): with open(f) as in_handle: for line in in_handle: if line.startswith("file"): fname = line.strip().split("=")[-1].replace(, ).strip() if fname.find(".tidy.") > 0: return install.get_gemini_dir(data) return None
#vtb def fit(self, X, *args, **kwargs): self.constant_value = self._get_constant_value(X) if self.constant_value is None: if self.unfittable_model: self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs) else: self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs) for name in self.METHOD_NAMES: attribute = getattr(self.__class__, name) if isinstance(attribute, str): setattr(self, name, getattr(self.model, attribute)) elif attribute is None: setattr(self, name, missing_method_scipy_wrapper(lambda x: x)) else: self._replace_constant_methods() self.fitted = True
Fit scipy model to an array of values. Args: X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d Returns: None
### Input: Fit scipy model to an array of values. Args: X(`np.ndarray` or `pd.DataFrame`): Datapoints to be estimated from. Must be 1-d Returns: None ### Response: #vtb def fit(self, X, *args, **kwargs): self.constant_value = self._get_constant_value(X) if self.constant_value is None: if self.unfittable_model: self.model = getattr(scipy.stats, self.model_class)(*args, **kwargs) else: self.model = getattr(scipy.stats, self.model_class)(X, *args, **kwargs) for name in self.METHOD_NAMES: attribute = getattr(self.__class__, name) if isinstance(attribute, str): setattr(self, name, getattr(self.model, attribute)) elif attribute is None: setattr(self, name, missing_method_scipy_wrapper(lambda x: x)) else: self._replace_constant_methods() self.fitted = True
#vtb def set(self, data=None): self.__data = data self.__exception = None self.__event.set()
Sets the event
### Input: Sets the event ### Response: #vtb def set(self, data=None): self.__data = data self.__exception = None self.__event.set()
#vtb def get_permissions_for_registration(self): qs = Permission.objects.none() for instance in self.modeladmin_instances: qs = qs | instance.get_permissions_for_registration() return qs
Utilised by Wagtail's 'register_permissions' hook to allow permissions for a all models grouped by this class to be assigned to Groups in settings.
### Input: Utilised by Wagtail's 'register_permissions' hook to allow permissions for a all models grouped by this class to be assigned to Groups in settings. ### Response: #vtb def get_permissions_for_registration(self): qs = Permission.objects.none() for instance in self.modeladmin_instances: qs = qs | instance.get_permissions_for_registration() return qs
#vtb def _pre_analysis(self): for item in self._starts: callstack = None if isinstance(item, tuple): ip = item[0] state = self._create_initial_state(item[0], item[1]) elif isinstance(item, SimState): state = item.copy() ip = state.solver.eval_one(state.ip) self._reset_state_mode(state, ) else: raise AngrCFGError( % str(type(item))) self._symbolic_function_initial_state[ip] = state path_wrapper = CFGJob(ip, state, self._context_sensitivity_level, None, None, call_stack=callstack) key = path_wrapper.block_id if key not in self._start_keys: self._start_keys.append(key) self._insert_job(path_wrapper) self._register_analysis_job(path_wrapper.func_addr, path_wrapper)
Initialization work. Executed prior to the analysis. :return: None
### Input: Initialization work. Executed prior to the analysis. :return: None ### Response: #vtb def _pre_analysis(self): for item in self._starts: callstack = None if isinstance(item, tuple): ip = item[0] state = self._create_initial_state(item[0], item[1]) elif isinstance(item, SimState): state = item.copy() ip = state.solver.eval_one(state.ip) self._reset_state_mode(state, ) else: raise AngrCFGError( % str(type(item))) self._symbolic_function_initial_state[ip] = state path_wrapper = CFGJob(ip, state, self._context_sensitivity_level, None, None, call_stack=callstack) key = path_wrapper.block_id if key not in self._start_keys: self._start_keys.append(key) self._insert_job(path_wrapper) self._register_analysis_job(path_wrapper.func_addr, path_wrapper)
#vtb def timeseries(self): if self._timeseries is None: if isinstance(self.grid.network.timeseries.generation_fluctuating. columns, pd.MultiIndex): if self.weather_cell_id: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[ self.type, self.weather_cell_id].to_frame() except KeyError: logger.exception("No time series for type {} and " "weather cell ID {} given.".format( self.type, self.weather_cell_id)) raise else: logger.exception("No weather cell ID provided for " "fluctuating generator {}.".format( repr(self))) raise KeyError else: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[self.type].to_frame() except KeyError: logger.exception("No time series for type {} " "given.".format(self.type)) raise timeseries = timeseries * self.nominal_capacity if self.curtailment is not None: timeseries = timeseries.join( self.curtailment.to_frame(), how=) timeseries.p = timeseries.p - timeseries.curtailment.fillna(0) if self.timeseries_reactive is not None: timeseries[] = self.timeseries_reactive else: timeseries[] = timeseries[] * self.q_sign * tan(acos( self.power_factor)) return timeseries else: return self._timeseries.loc[ self.grid.network.timeseries.timeindex, :]
Feed-in time series of generator It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries` looks for generation and curtailment time series of the according type of technology (and weather cell) in :class:`~.grid.network.TimeSeries`. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'.
### Input: Feed-in time series of generator It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries` looks for generation and curtailment time series of the according type of technology (and weather cell) in :class:`~.grid.network.TimeSeries`. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'. ### Response: #vtb def timeseries(self): if self._timeseries is None: if isinstance(self.grid.network.timeseries.generation_fluctuating. columns, pd.MultiIndex): if self.weather_cell_id: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[ self.type, self.weather_cell_id].to_frame() except KeyError: logger.exception("No time series for type {} and " "weather cell ID {} given.".format( self.type, self.weather_cell_id)) raise else: logger.exception("No weather cell ID provided for " "fluctuating generator {}.".format( repr(self))) raise KeyError else: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[self.type].to_frame() except KeyError: logger.exception("No time series for type {} " "given.".format(self.type)) raise timeseries = timeseries * self.nominal_capacity if self.curtailment is not None: timeseries = timeseries.join( self.curtailment.to_frame(), how=) timeseries.p = timeseries.p - timeseries.curtailment.fillna(0) if self.timeseries_reactive is not None: timeseries[] = self.timeseries_reactive else: timeseries[] = timeseries[] * self.q_sign * tan(acos( self.power_factor)) return timeseries else: return self._timeseries.loc[ self.grid.network.timeseries.timeindex, :]
#vtb def from_schema(self, schema_node): params = [] for param_schema in schema_node.children: location = param_schema.name if location is : name = param_schema.__class__.__name__ if name == : name = schema_node.__class__.__name__ + param = self.parameter_converter(location, param_schema) param[] = name if self.ref: param = self._ref(param) params.append(param) elif location in ((, , , , )): for node_schema in param_schema.children: param = self.parameter_converter(location, node_schema) if self.ref: param = self._ref(param) params.append(param) return params
Creates a list of Swagger params from a colander request schema. :param schema_node: Request schema to be transformed into Swagger. :param validators: Validators used in colander with the schema. :rtype: list :returns: List of Swagger parameters.
### Input: Creates a list of Swagger params from a colander request schema. :param schema_node: Request schema to be transformed into Swagger. :param validators: Validators used in colander with the schema. :rtype: list :returns: List of Swagger parameters. ### Response: #vtb def from_schema(self, schema_node): params = [] for param_schema in schema_node.children: location = param_schema.name if location is : name = param_schema.__class__.__name__ if name == : name = schema_node.__class__.__name__ + param = self.parameter_converter(location, param_schema) param[] = name if self.ref: param = self._ref(param) params.append(param) elif location in ((, , , , )): for node_schema in param_schema.children: param = self.parameter_converter(location, node_schema) if self.ref: param = self._ref(param) params.append(param) return params
#vtb def select_candidates(config): download_candidates = [] for group in config.group: summary_file = get_summary(config.section, group, config.uri, config.use_cache) entries = parse_summary(summary_file) for entry in filter_entries(entries, config): download_candidates.append((entry, group)) return download_candidates
Select candidates to download. Parameters ---------- config: NgdConfig Runtime configuration object Returns ------- list of (<candidate entry>, <taxonomic group>)
### Input: Select candidates to download. Parameters ---------- config: NgdConfig Runtime configuration object Returns ------- list of (<candidate entry>, <taxonomic group>) ### Response: #vtb def select_candidates(config): download_candidates = [] for group in config.group: summary_file = get_summary(config.section, group, config.uri, config.use_cache) entries = parse_summary(summary_file) for entry in filter_entries(entries, config): download_candidates.append((entry, group)) return download_candidates
#vtb def process_dividends(self, next_session, asset_finder, adjustment_reader): position_tracker = self.position_tracker held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( held_sids, next_session, asset_finder ) stock_dividends = ( adjustment_reader.get_stock_dividends_with_ex_date( held_sids, next_session, asset_finder ) ) position_tracker.earn_dividends( cash_dividends, stock_dividends, ) self._cash_flow( position_tracker.pay_dividends( next_session, ), )
Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session
### Input: Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session ### Response: #vtb def process_dividends(self, next_session, asset_finder, adjustment_reader): position_tracker = self.position_tracker held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( held_sids, next_session, asset_finder ) stock_dividends = ( adjustment_reader.get_stock_dividends_with_ex_date( held_sids, next_session, asset_finder ) ) position_tracker.earn_dividends( cash_dividends, stock_dividends, ) self._cash_flow( position_tracker.pay_dividends( next_session, ), )
#vtb def reset_trial(self, trial, new_config, new_experiment_tag): trial.experiment_tag = new_experiment_tag trial.config = new_config trainable = trial.runner with warn_if_slow("reset_config"): reset_val = ray.get(trainable.reset_config.remote(new_config)) return reset_val
Tries to invoke `Trainable.reset_config()` to reset trial. Args: trial (Trial): Trial to be reset. new_config (dict): New configuration for Trial trainable. new_experiment_tag (str): New experiment name for trial. Returns: True if `reset_config` is successful else False.
### Input: Tries to invoke `Trainable.reset_config()` to reset trial. Args: trial (Trial): Trial to be reset. new_config (dict): New configuration for Trial trainable. new_experiment_tag (str): New experiment name for trial. Returns: True if `reset_config` is successful else False. ### Response: #vtb def reset_trial(self, trial, new_config, new_experiment_tag): trial.experiment_tag = new_experiment_tag trial.config = new_config trainable = trial.runner with warn_if_slow("reset_config"): reset_val = ray.get(trainable.reset_config.remote(new_config)) return reset_val
#vtb def RegisterTextKey(cls, key, atomid): def getter(tags, key): return tags[atomid] def setter(tags, key, value): tags[atomid] = value def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter)
Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 atom name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
### Input: Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 atom name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterTextKey("artist", "\xa9ART") ### Response: #vtb def RegisterTextKey(cls, key, atomid): def getter(tags, key): return tags[atomid] def setter(tags, key, value): tags[atomid] = value def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter)
#vtb def _to_dict(self): _dict = {} if hasattr(self, ) and self.class_name is not None: _dict[] = self.class_name if hasattr(self, ) and self.score is not None: _dict[] = self.score if hasattr(self, ) and self.type_hierarchy is not None: _dict[] = self.type_hierarchy return _dict
Return a json dictionary representing this model.
### Input: Return a json dictionary representing this model. ### Response: #vtb def _to_dict(self): _dict = {} if hasattr(self, ) and self.class_name is not None: _dict[] = self.class_name if hasattr(self, ) and self.score is not None: _dict[] = self.score if hasattr(self, ) and self.type_hierarchy is not None: _dict[] = self.type_hierarchy return _dict
#vtb def ticker(ctx, market): market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
Show ticker of a market
### Input: Show ticker of a market ### Response: #vtb def ticker(ctx, market): market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
#vtb def worker_collectionfinish(self, node, ids): if self.shuttingdown: return self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids) self._session.testscollected = len(ids) self.sched.add_node_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal and not self.sched.has_pending: self.trdist.ensure_show_status() self.terminal.write_line("") if self.config.option.verbose > 0: self.terminal.write_line( "scheduling tests via %s" % (self.sched.__class__.__name__) ) self.sched.schedule()
worker has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collections), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use.
### Input: worker has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collections), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use. ### Response: #vtb def worker_collectionfinish(self, node, ids): if self.shuttingdown: return self.config.hook.pytest_xdist_node_collection_finished(node=node, ids=ids) self._session.testscollected = len(ids) self.sched.add_node_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal and not self.sched.has_pending: self.trdist.ensure_show_status() self.terminal.write_line("") if self.config.option.verbose > 0: self.terminal.write_line( "scheduling tests via %s" % (self.sched.__class__.__name__) ) self.sched.schedule()
#vtb def rounding_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): outputs = tf.squeeze(tf.to_int32(predictions)) labels = tf.squeeze(labels) weights = weights_fn(labels) labels = tf.to_int32(labels) return tf.to_float(tf.equal(outputs, labels)), weights
Rounding accuracy for L1/L2 losses: round down the predictions to ints.
### Input: Rounding accuracy for L1/L2 losses: round down the predictions to ints. ### Response: #vtb def rounding_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): outputs = tf.squeeze(tf.to_int32(predictions)) labels = tf.squeeze(labels) weights = weights_fn(labels) labels = tf.to_int32(labels) return tf.to_float(tf.equal(outputs, labels)), weights
#vtb def _repr_png_(self): app.process_events() QApplication.processEvents() img = read_pixels() return bytes(_make_png(img))
This is used by ipython to plot inline.
### Input: This is used by ipython to plot inline. ### Response: #vtb def _repr_png_(self): app.process_events() QApplication.processEvents() img = read_pixels() return bytes(_make_png(img))
#vtb def bots_create(self, bot): self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token))
Save new bot :param bot: bot object to save :type bot: Bot
### Input: Save new bot :param bot: bot object to save :type bot: Bot ### Response: #vtb def bots_create(self, bot): self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token))
#vtb def raise_for_execution_errors(nb, output_path): error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error
Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook
### Input: Assigned parameters into the appropriate place in the input notebook Parameters ---------- nb : NotebookNode Executable notebook object output_path : str Path to write executed notebook ### Response: #vtb def raise_for_execution_errors(nb, output_path): error = None for cell in nb.cells: if cell.get("outputs") is None: continue for output in cell.outputs: if output.output_type == "error": error = PapermillExecutionError( exec_count=cell.execution_count, source=cell.source, ename=output.ename, evalue=output.evalue, traceback=output.traceback, ) break if error: error_msg = ERROR_MESSAGE_TEMPLATE % str(error.exec_count) error_msg_cell = nbformat.v4.new_code_cell( source="%%html\n" + error_msg, outputs=[ nbformat.v4.new_output(output_type="display_data", data={"text/html": error_msg}) ], metadata={"inputHidden": True, "hide_input": True}, ) nb.cells = [error_msg_cell] + nb.cells write_ipynb(nb, output_path) raise error
#vtb def read(self, pos, size, **kwargs): short_reads = kwargs.pop(, None) if self.write_mode is None: self.write_mode = False elif self.write_mode is True: raise SimFileError("Cannot read and write to the same SimPackets") if pos is None: pos = len(self.content) if pos < 0: raise SimFileError("SimPacket.read(%d): Negative packet number?" % pos) elif pos > len(self.content): raise SimFileError("SimPacket.read(%d): Packet number is past frontier of %d?" % (pos, len(self.content))) elif pos != len(self.content): _, realsize = self.content[pos] self.state.solver.add(size <= realsize) if not self.state.solver.satisfiable(): raise SimFileError("Packet read size constraint made state unsatisfiable???") return self.content[pos] + (pos+1,) if type(size) is int: size = self.state.solver.BVV(size, self.state.arch.bits) packet = (data, size) self.content.append(packet) return packet + (pos+1,)
Read a packet from the stream. :param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream. :param size: The size to read. May be symbolic. :param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option. :return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read.
### Input: Read a packet from the stream. :param int pos: The packet number to read from the sequence of the stream. May be None to append to the stream. :param size: The size to read. May be symbolic. :param short_reads: Whether to replace the size with a symbolic value constrained to less than or equal to the original size. If unspecified, will be chosen based on the state option. :return: A tuple of the data read (a bitvector of the length that is the maximum length of the read) and the actual size of the read. ### Response: #vtb def read(self, pos, size, **kwargs): short_reads = kwargs.pop(, None) if self.write_mode is None: self.write_mode = False elif self.write_mode is True: raise SimFileError("Cannot read and write to the same SimPackets") if pos is None: pos = len(self.content) if pos < 0: raise SimFileError("SimPacket.read(%d): Negative packet number?" % pos) elif pos > len(self.content): raise SimFileError("SimPacket.read(%d): Packet number is past frontier of %d?" % (pos, len(self.content))) elif pos != len(self.content): _, realsize = self.content[pos] self.state.solver.add(size <= realsize) if not self.state.solver.satisfiable(): raise SimFileError("Packet read size constraint made state unsatisfiable???") return self.content[pos] + (pos+1,) if type(size) is int: size = self.state.solver.BVV(size, self.state.arch.bits) packet = (data, size) self.content.append(packet) return packet + (pos+1,)
#vtb def simxGetDistanceHandle(clientID, distanceObjectName, operationMode): handle = ct.c_int() if (sys.version_info[0] == 3) and (type(distanceObjectName) is str): distanceObjectName=distanceObjectName.encode() return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value
Please have a look at the function description/documentation in the V-REP user manual
### Input: Please have a look at the function description/documentation in the V-REP user manual ### Response: #vtb def simxGetDistanceHandle(clientID, distanceObjectName, operationMode): handle = ct.c_int() if (sys.version_info[0] == 3) and (type(distanceObjectName) is str): distanceObjectName=distanceObjectName.encode() return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value
#vtb def _new_masterpassword(self, password): if self.config_key in self.config and self.config[self.config_key]: raise Exception("Storage already has a masterpassword!") self.decrypted_master = hexlify(os.urandom(32)).decode("ascii") self.password = password self._save_encrypted_masterpassword() return self.masterkey
Generate a new random masterkey, encrypt it with the password and store it in the store. :param str password: Password to use for en-/de-cryption
### Input: Generate a new random masterkey, encrypt it with the password and store it in the store. :param str password: Password to use for en-/de-cryption ### Response: #vtb def _new_masterpassword(self, password): if self.config_key in self.config and self.config[self.config_key]: raise Exception("Storage already has a masterpassword!") self.decrypted_master = hexlify(os.urandom(32)).decode("ascii") self.password = password self._save_encrypted_masterpassword() return self.masterkey
#vtb def has_ocsp_must_staple_extension(certificate: cryptography.x509.Certificate) -> bool: has_ocsp_must_staple = False try: tls_feature_ext = certificate.extensions.get_extension_for_oid(ExtensionOID.TLS_FEATURE) for feature_type in tls_feature_ext.value: if feature_type == cryptography.x509.TLSFeatureType.status_request: has_ocsp_must_staple = True break except ExtensionNotFound: pass return has_ocsp_must_staple
Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066.
### Input: Return True if the certificate has the OCSP Must-Staple extension defined in RFC 6066. ### Response: #vtb def has_ocsp_must_staple_extension(certificate: cryptography.x509.Certificate) -> bool: has_ocsp_must_staple = False try: tls_feature_ext = certificate.extensions.get_extension_for_oid(ExtensionOID.TLS_FEATURE) for feature_type in tls_feature_ext.value: if feature_type == cryptography.x509.TLSFeatureType.status_request: has_ocsp_must_staple = True break except ExtensionNotFound: pass return has_ocsp_must_staple
#vtb def _fix_set_options(cls, options): optional_set_options = (, ) mandatory_set_options = (, ) def _get_set(value_str): return cls._expand_error_codes(set(value_str.split()) - {}) for opt in optional_set_options: value = getattr(options, opt) if value is not None: setattr(options, opt, _get_set(value)) for opt in mandatory_set_options: value = getattr(options, opt) if value is None: value = if not isinstance(value, Set): value = _get_set(value) setattr(options, opt, value) return options
Alter the set options from None/strings to sets in place.
### Input: Alter the set options from None/strings to sets in place. ### Response: #vtb def _fix_set_options(cls, options): optional_set_options = (, ) mandatory_set_options = (, ) def _get_set(value_str): return cls._expand_error_codes(set(value_str.split()) - {}) for opt in optional_set_options: value = getattr(options, opt) if value is not None: setattr(options, opt, _get_set(value)) for opt in mandatory_set_options: value = getattr(options, opt) if value is None: value = if not isinstance(value, Set): value = _get_set(value) setattr(options, opt, value) return options
#vtb def update_md5(filenames): import re for name in filenames: base = os.path.basename(name) f = open(name,) md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, ); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,) f.write(src) f.close()
Update our built-in md5 registry
### Input: Update our built-in md5 registry ### Response: #vtb def update_md5(filenames): import re for name in filenames: base = os.path.basename(name) f = open(name,) md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, ); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,) f.write(src) f.close()
#vtb def auth(name, nodes, pcsuser=, pcspasswd=, extra_args=None): pcs cluster auth\ ret = {: name, : True, : , : {}} auth_required = False authorized = __salt__[](nodes=nodes) log.trace(, authorized) authorized_dict = {} for line in authorized[].splitlines(): node = line.split()[0].strip() auth_state = line.split()[1].strip() if node in nodes: authorized_dict.update({node: auth_state}) log.trace(, authorized_dict) for node in nodes: if node in authorized_dict and authorized_dict[node] == : ret[] += .format(node) else: auth_required = True if __opts__[]: ret[] += .format(node) if not auth_required: return ret if __opts__[]: ret[] = None return ret if not isinstance(extra_args, (list, tuple)): extra_args = [] if not in extra_args: extra_args += [] authorize = __salt__[](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args) log.trace(, authorize) authorize_dict = {} for line in authorize[].splitlines(): node = line.split()[0].strip() auth_state = line.split()[1].strip() if node in nodes: authorize_dict.update({node: auth_state}) log.trace(, authorize_dict) for node in nodes: if node in authorize_dict and authorize_dict[node] == : ret[] += .format(node) ret[].update({node: {: , : }}) else: ret[] = False if node in authorized_dict: ret[] += .format(node, authorized_dict[node]) if node in authorize_dict: ret[] += .format(node, authorize_dict[node]) return ret
Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: []
### Input: Ensure all nodes are authorized to the cluster name Irrelevant, not used (recommended: pcs_auth__auth) nodes a list of nodes which should be authorized to the cluster pcsuser user for communication with pcs (default: hacluster) pcspasswd password for pcsuser (default: hacluster) extra_args list of extra args for the \'pcs cluster auth\' command Example: .. code-block:: yaml pcs_auth__auth: pcs.auth: - nodes: - node1.example.com - node2.example.com - pcsuser: hacluster - pcspasswd: hoonetorg - extra_args: [] ### Response: #vtb def auth(name, nodes, pcsuser=, pcspasswd=, extra_args=None): pcs cluster auth\ ret = {: name, : True, : , : {}} auth_required = False authorized = __salt__[](nodes=nodes) log.trace(, authorized) authorized_dict = {} for line in authorized[].splitlines(): node = line.split()[0].strip() auth_state = line.split()[1].strip() if node in nodes: authorized_dict.update({node: auth_state}) log.trace(, authorized_dict) for node in nodes: if node in authorized_dict and authorized_dict[node] == : ret[] += .format(node) else: auth_required = True if __opts__[]: ret[] += .format(node) if not auth_required: return ret if __opts__[]: ret[] = None return ret if not isinstance(extra_args, (list, tuple)): extra_args = [] if not in extra_args: extra_args += [] authorize = __salt__[](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args) log.trace(, authorize) authorize_dict = {} for line in authorize[].splitlines(): node = line.split()[0].strip() auth_state = line.split()[1].strip() if node in nodes: authorize_dict.update({node: auth_state}) log.trace(, authorize_dict) for node in nodes: if node in authorize_dict and authorize_dict[node] == : ret[] += .format(node) ret[].update({node: {: , : }}) else: ret[] = False if node in authorized_dict: ret[] += .format(node, authorized_dict[node]) if node in authorize_dict: ret[] += .format(node, authorize_dict[node]) return ret
#vtb def reaction_charge(reaction, compound_charge): charge_sum = 0.0 for compound, value in reaction.compounds: charge = compound_charge.get(compound.name, float()) charge_sum += charge * float(value) return charge_sum
Calculate the overall charge for the specified reaction. Args: reaction: :class:`psamm.reaction.Reaction`. compound_charge: a map from each compound to charge values.
### Input: Calculate the overall charge for the specified reaction. Args: reaction: :class:`psamm.reaction.Reaction`. compound_charge: a map from each compound to charge values. ### Response: #vtb def reaction_charge(reaction, compound_charge): charge_sum = 0.0 for compound, value in reaction.compounds: charge = compound_charge.get(compound.name, float()) charge_sum += charge * float(value) return charge_sum
#vtb def gen_mapname(): filepath = None while (filepath is None) or (os.path.exists(os.path.join(config[], filepath))): filepath = % _gen_string() return filepath
Generate a uniq mapfile pathname.
### Input: Generate a uniq mapfile pathname. ### Response: #vtb def gen_mapname(): filepath = None while (filepath is None) or (os.path.exists(os.path.join(config[], filepath))): filepath = % _gen_string() return filepath
#vtb def backpropagate_3d(uSin, angles, res, nm, lD=0, coords=None, weight_angles=True, onlyreal=False, padding=(True, True), padfac=1.75, padval=None, intp_order=2, dtype=None, num_cores=ncores, save_memory=False, copy=True, count=None, max_count=None, verbose=0): r A = angles.size if len(uSin.shape) != 3: raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).") if len(uSin) != A: raise ValueError("`len(angles)` must be equal to `len(uSin)`.") if len(list(padding)) != 2: raise ValueError("`padding` must be boolean tuple of length 2!") if np.array(padding).dtype is not np.dtype(bool): raise ValueError("Parameter `padding` must be boolean tuple.") if coords is not None: raise NotImplementedError("Setting coordinates is not yet supported.") if num_cores > ncores: raise ValueError("`num_cores` must not exceed number " + "of physical cores: {}".format(ncores)) if dtype is None: dtype = np.float_ dtype = np.dtype(dtype) if dtype.name not in ["float32", "float64"]: raise ValueError("dtype must be float32 or float64!") dtype_complex = np.dtype("complex{}".format( 2 * np.int(dtype.name.strip("float")))) ct_dt_map = {np.dtype(np.float32): ctypes.c_float, np.dtype(np.float64): ctypes.c_double } if max_count is not None: max_count.value += A + 2 ne.set_num_threads(num_cores) uSin = np.array(uSin, copy=copy) lny, lnx = uSin.shape[1], uSin.shape[2] ln = lnx orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2)))) ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2)))) if padding[0]: padx = orderx - lnx else: padx = 0 if padding[1]: pady = ordery - lny else: pady = 0 padyl = np.int(np.ceil(pady / 2)) padyr = pady - padyl padxl = np.int(np.ceil(padx / 2)) padxr = padx - padxl lNx, lNy = lnx + padx, lny + pady lNz = ln if verbose > 0: print("......Image size (x,y): {}x{}, padded: {}x{}".format( lnx, lny, lNx, lNy)) if weight_angles: weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1) uSin *= weights km = (2 * np.pi * nm) / res fx = np.fft.fftfreq(lNx) fy = np.fft.fftfreq(lNy) kx = 2 * np.pi * fx ky = 2 * np.pi * fy dphi0 = 2 * np.pi / A kx = kx.reshape(1, -1) ky = ky.reshape(-1, 1) filter_klp = (kx**2 + ky**2 < km**2) M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp) prefactor = -1j * km / (2 * np.pi) prefactor *= dphi0 prefactor *= np.abs(kx) * filter_klp prefactor *= np.exp(-1j * km * (M-1) * lD) if count is not None: count.value += 1 center = lNz / 2.0 z = np.linspace(-center, center, lNz, endpoint=False) zv = z.reshape(-1, 1, 1) Mp = M.reshape(lNy, lNx) f2_exp_fac = 1j * km * (Mp - 1) if save_memory: pass else: filter2 = ne.evaluate("exp(factor * zv)", local_dict={"factor": f2_exp_fac, "zv": zv}) if count is not None: count.value += 1 if onlyreal: outarr = np.zeros((ln, lny, lnx), dtype=dtype) else: outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex) oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex) myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores, flags=["FFTW_ESTIMATE"], axes=(0, 1)) inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex) myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores, axes=(0, 1), direction="FFTW_BACKWARD", flags=["FFTW_MEASURE"]) shared_array = mp.RawArray(ct_dt_map[dtype], ln * lny * lnx) arr = np.frombuffer(shared_array, dtype=dtype).reshape(ln, lny, lnx) pool4loop = mp.Pool(processes=num_cores, initializer=_init_worker, initargs=(shared_array, (ln, lny, lnx), dtype)) filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex) for aa in np.arange(A): if padval is None: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="edge") else: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="linear_ramp", end_values=(padval,)) myfftw_plan.execute() oneslice *= prefactor / (lNx * lNy) for p in range(len(zv)): if save_memory: ne.evaluate("exp(factor * zvp) * projectioni", local_dict={"zvp": zv[p], "projectioni": oneslice, "factor": f2_exp_fac}, out=inarr) else: np.multiply(filter2[p], oneslice, out=inarr) myifftw_plan.execute() filtered_proj[p, :, :] = inarr[padyl:lny+padyl, padxl:lnx+padxl] arr[:] = filtered_proj.real phi0 = np.rad2deg(angles[aa]) if not onlyreal: filtered_proj_imag = filtered_proj.imag _mprotate(phi0, lny, pool4loop, intp_order) outarr.real += arr if not onlyreal: arr[:] = filtered_proj_imag _mprotate(phi0, lny, pool4loop, intp_order) outarr.imag += arr if count is not None: count.value += 1 pool4loop.terminate() pool4loop.join() _cleanup_worker() return outarr
r"""3D backpropagation Three-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,y,z)` by a dielectric object with refractive index :math:`n(x,y,z)`. This method implements the 3D backpropagation algorithm :cite:`Mueller2015arxiv`. .. math:: f(\mathbf{r}) = -\frac{i k_\mathrm{m}}{2\pi} \sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\! \left \{ \text{FFT}^{-1}_{\mathrm{2D}} \left \{ \left| k_\mathrm{Dx} \right| \frac{\text{FFT}_{\mathrm{2D}} \left \{ u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}} {u_0(l_\mathrm{D})} \exp \! \left[i k_\mathrm{m}(M - 1) \cdot (z_{\phi_j}-l_\mathrm{D}) \right] \right \} \right \} with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse :math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the rotational operator :math:`D_{-\phi_j}`, the angular distance between the projections :math:`\Delta \phi_0`, the ramp filter in Fourier space :math:`|k_\mathrm{Dx}|`, and the propagation distance :math:`(z_{\phi_j}-l_\mathrm{D})`. Parameters ---------- uSin: (A, Ny, Nx) ndarray Three-dimensional sinogram of plane recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None [(3, M) ndarray] Only compute the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. weight_angles: bool If `True`, weights each backpropagated projection with a factor proportional to the angular distance between the neighboring projections. .. math:: \Delta \phi_0 \longmapsto \Delta \phi_j = \frac{\phi_{j+1} - \phi_{j-1}}{2} .. versionadded:: 0.1.1 onlyreal: bool If `True`, only the real part of the reconstructed image will be returned. This saves computation time. padding: tuple of bool Pad the input data to the second next power of 2 before Fourier transforming. This reduces artifacts and speeds up the process for input image sizes that are not powers of 2. The default is padding in x and y: `padding=(True, True)`. For padding only in x-direction (e.g. for cylindrical symmetries), set `padding` to `(True, False)`. To turn off padding, set it to `(False, False)`. padfac: float Increase padding size of the input data. A value greater than one will trigger padding to the second-next power of two. For example, a value of 1.75 will lead to a padded size of 256 for an initial size of 144, whereas it will lead to a padded size of 512 for an initial size of 150. Values geater than 2 are allowed. This parameter may greatly increase memory usage! padval: float The value used for padding. This is important for the Rytov approximation, where an approximat zero in the phase might translate to 2πi due to the unwrapping algorithm. In that case, this value should be a multiple of 2πi. If `padval` is `None`, then the edge values are used for padding (see documentation of :func:`numpy.pad`). intp_order: int between 0 and 5 Order of the interpolation for rotation. See :func:`scipy.ndimage.interpolation.rotate` for details. dtype: dtype object or argument for :func:`numpy.dtype` The data type that is used for calculations (float or double). Defaults to `numpy.float_`. num_cores: int The number of cores to use for parallel operations. This value defaults to the number of cores on the system. save_memory: bool Saves memory at the cost of longer computation time. .. versionadded:: 0.1.5 copy: bool Copy input sinogram `uSin` for data processing. If `copy` is set to `False`, then `uSin` will be overridden. .. versionadded:: 0.1.5 count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`).
### Input: r"""3D backpropagation Three-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,y,z)` by a dielectric object with refractive index :math:`n(x,y,z)`. This method implements the 3D backpropagation algorithm :cite:`Mueller2015arxiv`. .. math:: f(\mathbf{r}) = -\frac{i k_\mathrm{m}}{2\pi} \sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\! \left \{ \text{FFT}^{-1}_{\mathrm{2D}} \left \{ \left| k_\mathrm{Dx} \right| \frac{\text{FFT}_{\mathrm{2D}} \left \{ u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}} {u_0(l_\mathrm{D})} \exp \! \left[i k_\mathrm{m}(M - 1) \cdot (z_{\phi_j}-l_\mathrm{D}) \right] \right \} \right \} with the forward :math:`\text{FFT}_{\mathrm{2D}}` and inverse :math:`\text{FFT}^{-1}_{\mathrm{2D}}` 2D fast Fourier transform, the rotational operator :math:`D_{-\phi_j}`, the angular distance between the projections :math:`\Delta \phi_0`, the ramp filter in Fourier space :math:`|k_\mathrm{Dx}|`, and the propagation distance :math:`(z_{\phi_j}-l_\mathrm{D})`. Parameters ---------- uSin: (A, Ny, Nx) ndarray Three-dimensional sinogram of plane recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None [(3, M) ndarray] Only compute the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. weight_angles: bool If `True`, weights each backpropagated projection with a factor proportional to the angular distance between the neighboring projections. .. math:: \Delta \phi_0 \longmapsto \Delta \phi_j = \frac{\phi_{j+1} - \phi_{j-1}}{2} .. versionadded:: 0.1.1 onlyreal: bool If `True`, only the real part of the reconstructed image will be returned. This saves computation time. padding: tuple of bool Pad the input data to the second next power of 2 before Fourier transforming. This reduces artifacts and speeds up the process for input image sizes that are not powers of 2. The default is padding in x and y: `padding=(True, True)`. For padding only in x-direction (e.g. for cylindrical symmetries), set `padding` to `(True, False)`. To turn off padding, set it to `(False, False)`. padfac: float Increase padding size of the input data. A value greater than one will trigger padding to the second-next power of two. For example, a value of 1.75 will lead to a padded size of 256 for an initial size of 144, whereas it will lead to a padded size of 512 for an initial size of 150. Values geater than 2 are allowed. This parameter may greatly increase memory usage! padval: float The value used for padding. This is important for the Rytov approximation, where an approximat zero in the phase might translate to 2πi due to the unwrapping algorithm. In that case, this value should be a multiple of 2πi. If `padval` is `None`, then the edge values are used for padding (see documentation of :func:`numpy.pad`). intp_order: int between 0 and 5 Order of the interpolation for rotation. See :func:`scipy.ndimage.interpolation.rotate` for details. dtype: dtype object or argument for :func:`numpy.dtype` The data type that is used for calculations (float or double). Defaults to `numpy.float_`. num_cores: int The number of cores to use for parallel operations. This value defaults to the number of cores on the system. save_memory: bool Saves memory at the cost of longer computation time. .. versionadded:: 0.1.5 copy: bool Copy input sinogram `uSin` for data processing. If `copy` is set to `False`, then `uSin` will be overridden. .. versionadded:: 0.1.5 count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`). ### Response: #vtb def backpropagate_3d(uSin, angles, res, nm, lD=0, coords=None, weight_angles=True, onlyreal=False, padding=(True, True), padfac=1.75, padval=None, intp_order=2, dtype=None, num_cores=ncores, save_memory=False, copy=True, count=None, max_count=None, verbose=0): r A = angles.size if len(uSin.shape) != 3: raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).") if len(uSin) != A: raise ValueError("`len(angles)` must be equal to `len(uSin)`.") if len(list(padding)) != 2: raise ValueError("`padding` must be boolean tuple of length 2!") if np.array(padding).dtype is not np.dtype(bool): raise ValueError("Parameter `padding` must be boolean tuple.") if coords is not None: raise NotImplementedError("Setting coordinates is not yet supported.") if num_cores > ncores: raise ValueError("`num_cores` must not exceed number " + "of physical cores: {}".format(ncores)) if dtype is None: dtype = np.float_ dtype = np.dtype(dtype) if dtype.name not in ["float32", "float64"]: raise ValueError("dtype must be float32 or float64!") dtype_complex = np.dtype("complex{}".format( 2 * np.int(dtype.name.strip("float")))) ct_dt_map = {np.dtype(np.float32): ctypes.c_float, np.dtype(np.float64): ctypes.c_double } if max_count is not None: max_count.value += A + 2 ne.set_num_threads(num_cores) uSin = np.array(uSin, copy=copy) lny, lnx = uSin.shape[1], uSin.shape[2] ln = lnx orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2)))) ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2)))) if padding[0]: padx = orderx - lnx else: padx = 0 if padding[1]: pady = ordery - lny else: pady = 0 padyl = np.int(np.ceil(pady / 2)) padyr = pady - padyl padxl = np.int(np.ceil(padx / 2)) padxr = padx - padxl lNx, lNy = lnx + padx, lny + pady lNz = ln if verbose > 0: print("......Image size (x,y): {}x{}, padded: {}x{}".format( lnx, lny, lNx, lNy)) if weight_angles: weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1) uSin *= weights km = (2 * np.pi * nm) / res fx = np.fft.fftfreq(lNx) fy = np.fft.fftfreq(lNy) kx = 2 * np.pi * fx ky = 2 * np.pi * fy dphi0 = 2 * np.pi / A kx = kx.reshape(1, -1) ky = ky.reshape(-1, 1) filter_klp = (kx**2 + ky**2 < km**2) M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp) prefactor = -1j * km / (2 * np.pi) prefactor *= dphi0 prefactor *= np.abs(kx) * filter_klp prefactor *= np.exp(-1j * km * (M-1) * lD) if count is not None: count.value += 1 center = lNz / 2.0 z = np.linspace(-center, center, lNz, endpoint=False) zv = z.reshape(-1, 1, 1) Mp = M.reshape(lNy, lNx) f2_exp_fac = 1j * km * (Mp - 1) if save_memory: pass else: filter2 = ne.evaluate("exp(factor * zv)", local_dict={"factor": f2_exp_fac, "zv": zv}) if count is not None: count.value += 1 if onlyreal: outarr = np.zeros((ln, lny, lnx), dtype=dtype) else: outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex) oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex) myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores, flags=["FFTW_ESTIMATE"], axes=(0, 1)) inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex) myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores, axes=(0, 1), direction="FFTW_BACKWARD", flags=["FFTW_MEASURE"]) shared_array = mp.RawArray(ct_dt_map[dtype], ln * lny * lnx) arr = np.frombuffer(shared_array, dtype=dtype).reshape(ln, lny, lnx) pool4loop = mp.Pool(processes=num_cores, initializer=_init_worker, initargs=(shared_array, (ln, lny, lnx), dtype)) filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex) for aa in np.arange(A): if padval is None: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="edge") else: oneslice[:] = np.pad(uSin[aa], ((padyl, padyr), (padxl, padxr)), mode="linear_ramp", end_values=(padval,)) myfftw_plan.execute() oneslice *= prefactor / (lNx * lNy) for p in range(len(zv)): if save_memory: ne.evaluate("exp(factor * zvp) * projectioni", local_dict={"zvp": zv[p], "projectioni": oneslice, "factor": f2_exp_fac}, out=inarr) else: np.multiply(filter2[p], oneslice, out=inarr) myifftw_plan.execute() filtered_proj[p, :, :] = inarr[padyl:lny+padyl, padxl:lnx+padxl] arr[:] = filtered_proj.real phi0 = np.rad2deg(angles[aa]) if not onlyreal: filtered_proj_imag = filtered_proj.imag _mprotate(phi0, lny, pool4loop, intp_order) outarr.real += arr if not onlyreal: arr[:] = filtered_proj_imag _mprotate(phi0, lny, pool4loop, intp_order) outarr.imag += arr if count is not None: count.value += 1 pool4loop.terminate() pool4loop.join() _cleanup_worker() return outarr
#vtb def replace_config(config, name): global static_stages if static_stages is None: static_stages = PipelineStages() stages = static_stages if in config: path = config[] if not os.path.isabs(path) and config.get(): path = os.path.join(config[], path) try: stages.load_external_stages(config[]) except IOError: return streamcorpus_pipeline if in config: for mod in config[]: try: stages.load_module_stages(mod) except ImportError: return streamcorpus_pipeline else: stages = static_stages new_sub_modules = set(stage for stage in stages.itervalues() if hasattr(stage, )) return NewSubModules(streamcorpus_pipeline, new_sub_modules)
Replace the top-level pipeline configurable object. This investigates a number of sources, including `external_stages_path` and `external_stages_modules` configuration and `streamcorpus_pipeline.stages` entry points, and uses these to find the actual :data:`sub_modules` for :mod:`streamcorpus_pipeline`.
### Input: Replace the top-level pipeline configurable object. This investigates a number of sources, including `external_stages_path` and `external_stages_modules` configuration and `streamcorpus_pipeline.stages` entry points, and uses these to find the actual :data:`sub_modules` for :mod:`streamcorpus_pipeline`. ### Response: #vtb def replace_config(config, name): global static_stages if static_stages is None: static_stages = PipelineStages() stages = static_stages if in config: path = config[] if not os.path.isabs(path) and config.get(): path = os.path.join(config[], path) try: stages.load_external_stages(config[]) except IOError: return streamcorpus_pipeline if in config: for mod in config[]: try: stages.load_module_stages(mod) except ImportError: return streamcorpus_pipeline else: stages = static_stages new_sub_modules = set(stage for stage in stages.itervalues() if hasattr(stage, )) return NewSubModules(streamcorpus_pipeline, new_sub_modules)
#vtb def _ret8(ins): output = _8bit_oper(ins.quad[1]) output.append() output.append( % str(ins.quad[2])) return output
Returns from a procedure / function an 8bits value
### Input: Returns from a procedure / function an 8bits value ### Response: #vtb def _ret8(ins): output = _8bit_oper(ins.quad[1]) output.append() output.append( % str(ins.quad[2])) return output
#vtb def children(self, p_todo, p_only_direct=False): children = \ self._depgraph.outgoing_neighbors(hash(p_todo), not p_only_direct) return [self._tododict[child] for child in children]
Returns a list of child todos that the given todo (in)directly depends on.
### Input: Returns a list of child todos that the given todo (in)directly depends on. ### Response: #vtb def children(self, p_todo, p_only_direct=False): children = \ self._depgraph.outgoing_neighbors(hash(p_todo), not p_only_direct) return [self._tododict[child] for child in children]
#vtb def is_businessdate(in_date): if not isinstance(in_date, BaseDate): try: in_date = BusinessDate(in_date) except: return False y, m, d, = in_date.to_ymd() return is_valid_ymd(y, m, d)
checks whether the provided date is a date :param BusinessDate, int or float in_date: :return bool:
### Input: checks whether the provided date is a date :param BusinessDate, int or float in_date: :return bool: ### Response: #vtb def is_businessdate(in_date): if not isinstance(in_date, BaseDate): try: in_date = BusinessDate(in_date) except: return False y, m, d, = in_date.to_ymd() return is_valid_ymd(y, m, d)
#vtb def convert(self, vroot, entry_variables): self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables cnt = 0 with nn.parameter_scope(self.name): for t, func in enumerate(self.graph_info.funcs): if func.name == "BatchNormalization": bn_func = func if bn_func.info.args["batch_stat"] == False: o = self._bn_linear_conversion(bn_func, cnt) cnt += 1 continue o = self._identity_conversion(func) self.end_variable = o return self.end_variable
All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
### Input: All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. ### Response: #vtb def convert(self, vroot, entry_variables): self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables cnt = 0 with nn.parameter_scope(self.name): for t, func in enumerate(self.graph_info.funcs): if func.name == "BatchNormalization": bn_func = func if bn_func.info.args["batch_stat"] == False: o = self._bn_linear_conversion(bn_func, cnt) cnt += 1 continue o = self._identity_conversion(func) self.end_variable = o return self.end_variable
#vtb def get_init_kwargs(self): init_kwargs = {} for k in self.init_kwargs: if k in self.core_property_set: init_kwargs[k] = getattr(self, k) elif k in self: init_kwargs[k] = self[k] return init_kwargs
Generates keyword arguments for creating a new Docker client instance. :return: Keyword arguments as defined through this configuration. :rtype: dict
### Input: Generates keyword arguments for creating a new Docker client instance. :return: Keyword arguments as defined through this configuration. :rtype: dict ### Response: #vtb def get_init_kwargs(self): init_kwargs = {} for k in self.init_kwargs: if k in self.core_property_set: init_kwargs[k] = getattr(self, k) elif k in self: init_kwargs[k] = self[k] return init_kwargs
#vtb def current(self, value): current = min(max(self._min, value), self._max) self._current = current if current > self._stop : self._stop = current self._start = current-self._width elif current < self._start : self._start = current self._stop = current + self._width if abs(self._start - self._min) <= self._sticky_lenght : self._start = self._min if abs(self._stop - self._max) <= self._sticky_lenght : self._stop = self._max
set current cursor position
### Input: set current cursor position ### Response: #vtb def current(self, value): current = min(max(self._min, value), self._max) self._current = current if current > self._stop : self._stop = current self._start = current-self._width elif current < self._start : self._start = current self._stop = current + self._width if abs(self._start - self._min) <= self._sticky_lenght : self._start = self._min if abs(self._stop - self._max) <= self._sticky_lenght : self._stop = self._max
#vtb def _release_lock(self): if not self._has_lock(): return lfp = self._lock_file_path() try: if os.name == : os.chmod(lfp, 0777) os.remove(lfp) except OSError: pass self._owns_lock = False
Release our lock if we have one
### Input: Release our lock if we have one ### Response: #vtb def _release_lock(self): if not self._has_lock(): return lfp = self._lock_file_path() try: if os.name == : os.chmod(lfp, 0777) os.remove(lfp) except OSError: pass self._owns_lock = False
#vtb def trace(msg): if os.environ.get() == : print(, msg, file=sys.stderr)
Print a trace message to stderr if environment variable is set.
### Input: Print a trace message to stderr if environment variable is set. ### Response: #vtb def trace(msg): if os.environ.get() == : print(, msg, file=sys.stderr)
#vtb def random_variants( count, genome_name="GRCh38", deletions=True, insertions=True, random_seed=None): rng = random.Random(random_seed) ensembl = genome_for_reference_name(genome_name) if ensembl in _transcript_ids_cache: transcript_ids = _transcript_ids_cache[ensembl] else: transcript_ids = ensembl.transcript_ids() _transcript_ids_cache[ensembl] = transcript_ids variants = [] for _ in range(count * 100): if len(variants) < count: transcript_id = rng.choice(transcript_ids) transcript = ensembl.transcript_by_id(transcript_id) if not transcript.complete: continue exon = rng.choice(transcript.exons) base1_genomic_position = rng.randint(exon.start, exon.end) transcript_offset = transcript.spliced_offset(base1_genomic_position) seq = transcript.sequence ref = str(seq[transcript_offset]) if transcript.on_backward_strand: ref = reverse_complement(ref) alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref] if insertions: nucleotide_pairs = [ x + y for x in STANDARD_NUCLEOTIDES for y in STANDARD_NUCLEOTIDES ] alt_nucleotides.extend(nucleotide_pairs) if deletions: alt_nucleotides.append("") alt = rng.choice(alt_nucleotides) variant = Variant( transcript.contig, base1_genomic_position, ref=ref, alt=alt, ensembl=ensembl) variants.append(variant) else: return VariantCollection(variants) raise ValueError( ("Unable to generate %d random variants, " "there may be a problem with PyEnsembl") % count)
Generate a VariantCollection with random variants that overlap at least one complete coding transcript.
### Input: Generate a VariantCollection with random variants that overlap at least one complete coding transcript. ### Response: #vtb def random_variants( count, genome_name="GRCh38", deletions=True, insertions=True, random_seed=None): rng = random.Random(random_seed) ensembl = genome_for_reference_name(genome_name) if ensembl in _transcript_ids_cache: transcript_ids = _transcript_ids_cache[ensembl] else: transcript_ids = ensembl.transcript_ids() _transcript_ids_cache[ensembl] = transcript_ids variants = [] for _ in range(count * 100): if len(variants) < count: transcript_id = rng.choice(transcript_ids) transcript = ensembl.transcript_by_id(transcript_id) if not transcript.complete: continue exon = rng.choice(transcript.exons) base1_genomic_position = rng.randint(exon.start, exon.end) transcript_offset = transcript.spliced_offset(base1_genomic_position) seq = transcript.sequence ref = str(seq[transcript_offset]) if transcript.on_backward_strand: ref = reverse_complement(ref) alt_nucleotides = [x for x in STANDARD_NUCLEOTIDES if x != ref] if insertions: nucleotide_pairs = [ x + y for x in STANDARD_NUCLEOTIDES for y in STANDARD_NUCLEOTIDES ] alt_nucleotides.extend(nucleotide_pairs) if deletions: alt_nucleotides.append("") alt = rng.choice(alt_nucleotides) variant = Variant( transcript.contig, base1_genomic_position, ref=ref, alt=alt, ensembl=ensembl) variants.append(variant) else: return VariantCollection(variants) raise ValueError( ("Unable to generate %d random variants, " "there may be a problem with PyEnsembl") % count)
#vtb def check_crystal_equivalence(crystal_a, crystal_b): cryst_a = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_a), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) cryst_b = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_b), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) samecell = np.allclose(cryst_a[], cryst_b[], atol=1e-5) samenatoms = len(cryst_a[]) == len(cryst_b[]) samespg = cryst_a[] == cryst_b[] def test_rotations_translations(cryst_a, cryst_b, repeat): cell = cryst_a[] pristine = crystal(, [(0, 0., 0.)], spacegroup=int(cryst_a[]), cellpar=[cell[0]/repeat[0], cell[1]/repeat[1], cell[2]/repeat[2]]).repeat(repeat) sym_set_p = spglib.get_symmetry_dataset(ase_to_spgcell(pristine), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) for _,trans in enumerate(zip(sym_set_p[], sym_set_p[])): pnew=(np.matmul(trans[0],cryst_a[].T).T + trans[1]) % 1.0 fulln = np.concatenate([cryst_a[][:, None], pnew], axis=1) fullb = np.concatenate([cryst_b[][:, None], cryst_b[]], axis=1) sorted_n = np.array(sorted([ list(row) for row in list(fulln) ])) sorted_b = np.array(sorted([ list(row) for row in list(fullb) ])) if np.allclose(sorted_n, sorted_b, atol=1e-5): return True return False if samecell and samenatoms and samespg: cell = cryst_a[] rng1 = range(1, int(norm(cell[0])/2.)) rng2 = range(1, int(norm(cell[1])/2.)) rng3 = range(1, int(norm(cell[2])/2.)) for repeat in itertools.product(rng1, rng2, rng3): if test_rotations_translations(cryst_a, cryst_b, repeat): return True return False
Function that identifies whether two crystals are equivalent
### Input: Function that identifies whether two crystals are equivalent ### Response: #vtb def check_crystal_equivalence(crystal_a, crystal_b): cryst_a = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_a), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) cryst_b = spglib.get_symmetry_dataset(ase_to_spgcell(crystal_b), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) samecell = np.allclose(cryst_a[], cryst_b[], atol=1e-5) samenatoms = len(cryst_a[]) == len(cryst_b[]) samespg = cryst_a[] == cryst_b[] def test_rotations_translations(cryst_a, cryst_b, repeat): cell = cryst_a[] pristine = crystal(, [(0, 0., 0.)], spacegroup=int(cryst_a[]), cellpar=[cell[0]/repeat[0], cell[1]/repeat[1], cell[2]/repeat[2]]).repeat(repeat) sym_set_p = spglib.get_symmetry_dataset(ase_to_spgcell(pristine), symprec=1e-5, angle_tolerance=-1.0, hall_number=0) for _,trans in enumerate(zip(sym_set_p[], sym_set_p[])): pnew=(np.matmul(trans[0],cryst_a[].T).T + trans[1]) % 1.0 fulln = np.concatenate([cryst_a[][:, None], pnew], axis=1) fullb = np.concatenate([cryst_b[][:, None], cryst_b[]], axis=1) sorted_n = np.array(sorted([ list(row) for row in list(fulln) ])) sorted_b = np.array(sorted([ list(row) for row in list(fullb) ])) if np.allclose(sorted_n, sorted_b, atol=1e-5): return True return False if samecell and samenatoms and samespg: cell = cryst_a[] rng1 = range(1, int(norm(cell[0])/2.)) rng2 = range(1, int(norm(cell[1])/2.)) rng3 = range(1, int(norm(cell[2])/2.)) for repeat in itertools.product(rng1, rng2, rng3): if test_rotations_translations(cryst_a, cryst_b, repeat): return True return False
#vtb def simple_moving_matrix(x, n=10): if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) h = n / 2 o = 0 if h * 2 == n else 1 xx = [] for i in range(h, len(x) - h): xx.append(x[i-h:i+h+o]) return np.array(xx)
Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average
### Input: Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average ### Response: #vtb def simple_moving_matrix(x, n=10): if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) h = n / 2 o = 0 if h * 2 == n else 1 xx = [] for i in range(h, len(x) - h): xx.append(x[i-h:i+h+o]) return np.array(xx)
#vtb def char(i): i = ord(i) if i not in font: return [(0,)] * 8 return [(ord(row),) for row in font[i].decode()]
Get image data for the character `i` (a one character string). Returned as a list of rows. Each row is a tuple containing the packed pixels.
### Input: Get image data for the character `i` (a one character string). Returned as a list of rows. Each row is a tuple containing the packed pixels. ### Response: #vtb def char(i): i = ord(i) if i not in font: return [(0,)] * 8 return [(ord(row),) for row in font[i].decode()]
#vtb def casefold_with_i_dots(text): text = unicodedata.normalize(, text).replace(, ).replace(, ) return text.casefold()
Convert capital I's and capital dotted İ's to lowercase in the way that's appropriate for Turkish and related languages, then case-fold the rest of the letters.
### Input: Convert capital I's and capital dotted İ's to lowercase in the way that's appropriate for Turkish and related languages, then case-fold the rest of the letters. ### Response: #vtb def casefold_with_i_dots(text): text = unicodedata.normalize(, text).replace(, ).replace(, ) return text.casefold()
#vtb def query_publishers(self, publisher_query): content = self._serialize.body(publisher_query, ) response = self._send(http_method=, location_id=, version=, content=content) return self._deserialize(, response)
QueryPublishers. [Preview API] :param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query: :rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>`
### Input: QueryPublishers. [Preview API] :param :class:`<PublisherQuery> <azure.devops.v5_0.gallery.models.PublisherQuery>` publisher_query: :rtype: :class:`<PublisherQueryResult> <azure.devops.v5_0.gallery.models.PublisherQueryResult>` ### Response: #vtb def query_publishers(self, publisher_query): content = self._serialize.body(publisher_query, ) response = self._send(http_method=, location_id=, version=, content=content) return self._deserialize(, response)
#vtb def addFilter(self, *lstFilters, **dctFilters) : "add a new filter to the query" dstF = {} if len(lstFilters) > 0 : if type(lstFilters[0]) is types.DictType : dstF = lstFilters[0] lstFilters = lstFilters[1:] if len(dctFilters) > 0 : dstF = dict(dstF, **dctFilters) filts = {} for k, v in dstF.iteritems() : sk = k.split() if len(sk) == 2 : operator = sk[-1].strip().upper() if operator not in self.operators : raise ValueError( % operator) kk = % (self.rabaClass.__name__, k) elif len(sk) == 1 : operator = "=" kk = % (self.rabaClass.__name__, k) else : raise ValueError( % k) if isRabaObject(v) : vv = v.getJsonEncoding() else : vv = v if sk[0].find() > -1 : kk = self._parseJoint(sk[0], operator) filts[kk] = vv for lt in lstFilters : for l in lt : match = self.fieldPattern.match(l) if match == None : raise ValueError("RabaQuery Error: Invalid filter " % l) field = match.group(1) operator = match.group(2) value = match.group(4) if field.find() > -1 : joink = self._parseJoint(field, operator, value) filts[joink] = value else : filts[ %(self.rabaClass.__name__, field, operator)] = value self.filters.append(filts)
add a new filter to the query
### Input: add a new filter to the query ### Response: #vtb def addFilter(self, *lstFilters, **dctFilters) : "add a new filter to the query" dstF = {} if len(lstFilters) > 0 : if type(lstFilters[0]) is types.DictType : dstF = lstFilters[0] lstFilters = lstFilters[1:] if len(dctFilters) > 0 : dstF = dict(dstF, **dctFilters) filts = {} for k, v in dstF.iteritems() : sk = k.split() if len(sk) == 2 : operator = sk[-1].strip().upper() if operator not in self.operators : raise ValueError( % operator) kk = % (self.rabaClass.__name__, k) elif len(sk) == 1 : operator = "=" kk = % (self.rabaClass.__name__, k) else : raise ValueError( % k) if isRabaObject(v) : vv = v.getJsonEncoding() else : vv = v if sk[0].find() > -1 : kk = self._parseJoint(sk[0], operator) filts[kk] = vv for lt in lstFilters : for l in lt : match = self.fieldPattern.match(l) if match == None : raise ValueError("RabaQuery Error: Invalid filter " % l) field = match.group(1) operator = match.group(2) value = match.group(4) if field.find() > -1 : joink = self._parseJoint(field, operator, value) filts[joink] = value else : filts[ %(self.rabaClass.__name__, field, operator)] = value self.filters.append(filts)
#vtb def trigger_actions(self, subsystem): for py3_module, trigger_action in self.udev_consumers[subsystem]: if trigger_action in ON_TRIGGER_ACTIONS: self.py3_wrapper.log( "%s udev event, refresh consumer %s" % (subsystem, py3_module.module_full_name) ) py3_module.force_update()
Refresh all modules which subscribed to the given subsystem.
### Input: Refresh all modules which subscribed to the given subsystem. ### Response: #vtb def trigger_actions(self, subsystem): for py3_module, trigger_action in self.udev_consumers[subsystem]: if trigger_action in ON_TRIGGER_ACTIONS: self.py3_wrapper.log( "%s udev event, refresh consumer %s" % (subsystem, py3_module.module_full_name) ) py3_module.force_update()
#vtb def post_status(self, body="", id="", parentid="", stashid=""): if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req(, post_data={ "body":body, "id":id, "parentid":parentid, "stashid":stashid }) return response[]
Post a status :param username: The body of the status :param id: The id of the object you wish to share :param parentid: The parentid of the object you wish to share :param stashid: The stashid of the object you wish to add to the status
### Input: Post a status :param username: The body of the status :param id: The id of the object you wish to share :param parentid: The parentid of the object you wish to share :param stashid: The stashid of the object you wish to add to the status ### Response: #vtb def post_status(self, body="", id="", parentid="", stashid=""): if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req(, post_data={ "body":body, "id":id, "parentid":parentid, "stashid":stashid }) return response[]
#vtb def make_clusters(span_tree, cut_value): iv0, iv1 = span_tree.nonzero() match_dict = {} for i0, i1 in zip(iv0, iv1): d = span_tree[i0, i1] if d > cut_value: continue imin = int(min(i0, i1)) imax = int(max(i0, i1)) if imin in match_dict: match_dict[imin][imax] = True else: match_dict[imin] = {imax: True} working = True while working: working = False rev_dict = make_rev_dict_unique(match_dict) k_sort = rev_dict.keys() k_sort.sort() for k in k_sort: v = rev_dict[k] if len(v) > 1: working = True v_sort = v.keys() v_sort.sort() cluster_idx = v_sort[0] for vv in v_sort[1:]: try: to_merge = match_dict.pop(vv) except: continue try: match_dict[cluster_idx].update(to_merge) match_dict[cluster_idx][vv] = True except: continue try: match_dict[cluster_idx].pop(cluster_idx) except: pass cdict = {} for k, v in match_dict.items(): cdict[k] = v.keys() rdict = make_reverse_dict(cdict) return cdict, rdict
Find clusters from the spanning tree Parameters ---------- span_tree : a sparse nsrcs x nsrcs array Filled with zeros except for the active edges, which are filled with the edge measures (either distances or sigmas cut_value : float Value used to cluster group. All links with measures above this calue will be cut. returns dict(int:[int,...]) A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster.
### Input: Find clusters from the spanning tree Parameters ---------- span_tree : a sparse nsrcs x nsrcs array Filled with zeros except for the active edges, which are filled with the edge measures (either distances or sigmas cut_value : float Value used to cluster group. All links with measures above this calue will be cut. returns dict(int:[int,...]) A dictionary of clusters. Each cluster is a source index and the list of other sources in the cluster. ### Response: #vtb def make_clusters(span_tree, cut_value): iv0, iv1 = span_tree.nonzero() match_dict = {} for i0, i1 in zip(iv0, iv1): d = span_tree[i0, i1] if d > cut_value: continue imin = int(min(i0, i1)) imax = int(max(i0, i1)) if imin in match_dict: match_dict[imin][imax] = True else: match_dict[imin] = {imax: True} working = True while working: working = False rev_dict = make_rev_dict_unique(match_dict) k_sort = rev_dict.keys() k_sort.sort() for k in k_sort: v = rev_dict[k] if len(v) > 1: working = True v_sort = v.keys() v_sort.sort() cluster_idx = v_sort[0] for vv in v_sort[1:]: try: to_merge = match_dict.pop(vv) except: continue try: match_dict[cluster_idx].update(to_merge) match_dict[cluster_idx][vv] = True except: continue try: match_dict[cluster_idx].pop(cluster_idx) except: pass cdict = {} for k, v in match_dict.items(): cdict[k] = v.keys() rdict = make_reverse_dict(cdict) return cdict, rdict
#vtb def to_netcdf(dataset, path_or_file=None, mode=, format=None, group=None, engine=None, encoding=None, unlimited_dims=None, compute=True, multifile=False): if isinstance(path_or_file, Path): path_or_file = str(path_or_file) if encoding is None: encoding = {} if path_or_file is None: if engine is None: engine = elif engine != : raise ValueError( "or engine= is supported" % engine) if not compute: raise NotImplementedError( ) elif isinstance(path_or_file, str): if engine is None: engine = _get_default_engine(path_or_file) path_or_file = _normalize_path(path_or_file) else: engine = _validate_dataset_names(dataset) _validate_attrs(dataset) try: store_open = WRITEABLE_STORES[engine] except KeyError: raise ValueError( % engine) if format is not None: format = format.upper() scheduler = _get_scheduler() have_chunks = any(v.chunks for v in dataset.variables.values()) autoclose = have_chunks and scheduler in [, ] if autoclose and engine == : raise NotImplementedError("Writing netCDF files with the %s backend " "is not currently supported with daskunlimited_dims', None) if isinstance(unlimited_dims, str): unlimited_dims = [unlimited_dims] writer = ArrayWriter() try: dump_to_store(dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims) if autoclose: store.close() if multifile: return writer, store writes = writer.sync(compute=compute) if path_or_file is None: store.sync() return target.getvalue() finally: if not multifile and compute: store.close() if not compute: import dask return dask.delayed(_finalize_store)(writes, store)
This function creates an appropriate datastore for writing a dataset to disk as a netCDF file See `Dataset.to_netcdf` for full API docs. The ``multifile`` argument is only for the private use of save_mfdataset.
### Input: This function creates an appropriate datastore for writing a dataset to disk as a netCDF file See `Dataset.to_netcdf` for full API docs. The ``multifile`` argument is only for the private use of save_mfdataset. ### Response: #vtb def to_netcdf(dataset, path_or_file=None, mode=, format=None, group=None, engine=None, encoding=None, unlimited_dims=None, compute=True, multifile=False): if isinstance(path_or_file, Path): path_or_file = str(path_or_file) if encoding is None: encoding = {} if path_or_file is None: if engine is None: engine = elif engine != : raise ValueError( "or engine= is supported" % engine) if not compute: raise NotImplementedError( ) elif isinstance(path_or_file, str): if engine is None: engine = _get_default_engine(path_or_file) path_or_file = _normalize_path(path_or_file) else: engine = _validate_dataset_names(dataset) _validate_attrs(dataset) try: store_open = WRITEABLE_STORES[engine] except KeyError: raise ValueError( % engine) if format is not None: format = format.upper() scheduler = _get_scheduler() have_chunks = any(v.chunks for v in dataset.variables.values()) autoclose = have_chunks and scheduler in [, ] if autoclose and engine == : raise NotImplementedError("Writing netCDF files with the %s backend " "is not currently supported with daskunlimited_dims', None) if isinstance(unlimited_dims, str): unlimited_dims = [unlimited_dims] writer = ArrayWriter() try: dump_to_store(dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims) if autoclose: store.close() if multifile: return writer, store writes = writer.sync(compute=compute) if path_or_file is None: store.sync() return target.getvalue() finally: if not multifile and compute: store.close() if not compute: import dask return dask.delayed(_finalize_store)(writes, store)
#vtb def interpret_header(self): if in self.header: self.date = self.header[] elif in self.header: self.date = self.header[] else: raise Exception("Image does not have a DATE_OBS or DATE-OBS field") self.cy, self.cx = self.header[], self.header[] sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec arcsec_per_pixel = self.header[] self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel)
Read pertinent information from the image headers, especially location and radius of the Sun to calculate the default thematic map :return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel
### Input: Read pertinent information from the image headers, especially location and radius of the Sun to calculate the default thematic map :return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel ### Response: #vtb def interpret_header(self): if in self.header: self.date = self.header[] elif in self.header: self.date = self.header[] else: raise Exception("Image does not have a DATE_OBS or DATE-OBS field") self.cy, self.cx = self.header[], self.header[] sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec arcsec_per_pixel = self.header[] self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel)
#vtb def _computeStatus(self, dfile, service): if service: if not dfile[].has_key(service): return self.ST_UNTRACKED else: return dfile[][service][] first_service_key=dfile[].keys()[0] first_status=dfile[][first_service_key][] all_status_match=True for service in dfile[]: if dfile[][service][]!=first_status: return self.ST_COMPLICATED return first_status
Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status
### Input: Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status ### Response: #vtb def _computeStatus(self, dfile, service): if service: if not dfile[].has_key(service): return self.ST_UNTRACKED else: return dfile[][service][] first_service_key=dfile[].keys()[0] first_status=dfile[][first_service_key][] all_status_match=True for service in dfile[]: if dfile[][service][]!=first_status: return self.ST_COMPLICATED return first_status
#vtb def _handle_state_change_msg(self, new_helper): assert self.my_pplan_helper is not None assert self.my_instance is not None and self.my_instance.py_class is not None if self.my_pplan_helper.get_topology_state() != new_helper.get_topology_state(): self.my_pplan_helper = new_helper if new_helper.is_topology_running(): if not self.is_instance_started: self.start_instance_if_possible() self.my_instance.py_class.invoke_activate() elif new_helper.is_topology_paused(): self.my_instance.py_class.invoke_deactivate() else: raise RuntimeError("Unexpected TopologyState update: %s" % new_helper.get_topology_state()) else: Log.info("Topology state remains the same.")
Called when state change is commanded by stream manager
### Input: Called when state change is commanded by stream manager ### Response: #vtb def _handle_state_change_msg(self, new_helper): assert self.my_pplan_helper is not None assert self.my_instance is not None and self.my_instance.py_class is not None if self.my_pplan_helper.get_topology_state() != new_helper.get_topology_state(): self.my_pplan_helper = new_helper if new_helper.is_topology_running(): if not self.is_instance_started: self.start_instance_if_possible() self.my_instance.py_class.invoke_activate() elif new_helper.is_topology_paused(): self.my_instance.py_class.invoke_deactivate() else: raise RuntimeError("Unexpected TopologyState update: %s" % new_helper.get_topology_state()) else: Log.info("Topology state remains the same.")
#vtb def is_valid_group(group_name, nova_creds): valid_groups = [] for key, value in nova_creds.items(): supernova_groups = value.get(, []) if hasattr(supernova_groups, ): supernova_groups = [supernova_groups] valid_groups.extend(supernova_groups) valid_groups.append() if group_name in valid_groups: return True else: return False
Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option.
### Input: Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option. ### Response: #vtb def is_valid_group(group_name, nova_creds): valid_groups = [] for key, value in nova_creds.items(): supernova_groups = value.get(, []) if hasattr(supernova_groups, ): supernova_groups = [supernova_groups] valid_groups.extend(supernova_groups) valid_groups.append() if group_name in valid_groups: return True else: return False
#vtb def _draw_fold_indicator(self, top, mouse_over, collapsed, painter): rect = QtCore.QRect(0, top, self.sizeHint().width(), self.sizeHint().height()) if self._native: if os.environ[].lower() not in PYQT5_API: opt = QtGui.QStyleOptionViewItemV2() else: opt = QtWidgets.QStyleOptionViewItem() opt.rect = rect opt.state = (QtWidgets.QStyle.State_Active | QtWidgets.QStyle.State_Item | QtWidgets.QStyle.State_Children) if not collapsed: opt.state |= QtWidgets.QStyle.State_Open if mouse_over: opt.state |= (QtWidgets.QStyle.State_MouseOver | QtWidgets.QStyle.State_Enabled | QtWidgets.QStyle.State_Selected) opt.palette.setBrush(QtGui.QPalette.Window, self.palette().highlight()) opt.rect.translate(-2, 0) self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch, opt, painter, self) else: index = 0 if not collapsed: index = 2 if mouse_over: index += 1 QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect)
Draw the fold indicator/trigger (arrow). :param top: Top position :param mouse_over: Whether the mouse is over the indicator :param collapsed: Whether the trigger is collapsed or not. :param painter: QPainter
### Input: Draw the fold indicator/trigger (arrow). :param top: Top position :param mouse_over: Whether the mouse is over the indicator :param collapsed: Whether the trigger is collapsed or not. :param painter: QPainter ### Response: #vtb def _draw_fold_indicator(self, top, mouse_over, collapsed, painter): rect = QtCore.QRect(0, top, self.sizeHint().width(), self.sizeHint().height()) if self._native: if os.environ[].lower() not in PYQT5_API: opt = QtGui.QStyleOptionViewItemV2() else: opt = QtWidgets.QStyleOptionViewItem() opt.rect = rect opt.state = (QtWidgets.QStyle.State_Active | QtWidgets.QStyle.State_Item | QtWidgets.QStyle.State_Children) if not collapsed: opt.state |= QtWidgets.QStyle.State_Open if mouse_over: opt.state |= (QtWidgets.QStyle.State_MouseOver | QtWidgets.QStyle.State_Enabled | QtWidgets.QStyle.State_Selected) opt.palette.setBrush(QtGui.QPalette.Window, self.palette().highlight()) opt.rect.translate(-2, 0) self.style().drawPrimitive(QtWidgets.QStyle.PE_IndicatorBranch, opt, painter, self) else: index = 0 if not collapsed: index = 2 if mouse_over: index += 1 QtGui.QIcon(self._custom_indicators[index]).paint(painter, rect)
#vtb def get_spark_context(conf=None): if hasattr(SparkContext, "getOrCreate"): with SparkContext._lock: if SparkContext._active_spark_context is None: spark_conf = create_spark_conf() if conf is None else conf return SparkContext.getOrCreate(spark_conf) else: return SparkContext.getOrCreate() else: if SparkContext._active_spark_context is None: spark_conf = create_spark_conf() if conf is None else conf return SparkContext(conf=spark_conf) else: return SparkContext._active_spark_context
Get the current active spark context and create one if no active instance :param conf: combining bigdl configs into spark conf :return: SparkContext
### Input: Get the current active spark context and create one if no active instance :param conf: combining bigdl configs into spark conf :return: SparkContext ### Response: #vtb def get_spark_context(conf=None): if hasattr(SparkContext, "getOrCreate"): with SparkContext._lock: if SparkContext._active_spark_context is None: spark_conf = create_spark_conf() if conf is None else conf return SparkContext.getOrCreate(spark_conf) else: return SparkContext.getOrCreate() else: if SparkContext._active_spark_context is None: spark_conf = create_spark_conf() if conf is None else conf return SparkContext(conf=spark_conf) else: return SparkContext._active_spark_context
#vtb def get_distutils_display_options(): short_display_opts = set( + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set( + o[0] for o in Distribution.display_options) short_display_opts.add() long_display_opts.add() display_commands = set([ , , , , , ]) return short_display_opts.union(long_display_opts.union(display_commands))
Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or --
### Input: Returns a set of all the distutils display options in their long and short forms. These are the setup.py arguments such as --name or --version which print the project's metadata and then exit. Returns ------- opts : set The long and short form display option arguments, including the - or -- ### Response: #vtb def get_distutils_display_options(): short_display_opts = set( + o[1] for o in Distribution.display_options if o[1]) long_display_opts = set( + o[0] for o in Distribution.display_options) short_display_opts.add() long_display_opts.add() display_commands = set([ , , , , , ]) return short_display_opts.union(long_display_opts.union(display_commands))
#vtb def cluster(x, cluster=, n_clusters=3, ndims=None, format_data=True): if cluster == None: return x elif (isinstance(cluster, six.string_types) and cluster==) or \ (isinstance(cluster, dict) and cluster[]==): if not _has_hdbscan: raise ImportError() if ndims != None: warnings.warn() if format_data: x = formatter(x, ppca=True) if isinstance(cluster, six.string_types): model = models[cluster] if cluster != : model_params = { : n_clusters } else: model_params = {} elif type(cluster) is dict: if isinstance(cluster[], six.string_types): model = models[cluster[]] model_params = cluster[] model = model(**model_params) model.fit(np.vstack(x)) return list(model.labels_)
Performs clustering analysis and returns a list of cluster labels Parameters ---------- x : A Numpy array, Pandas Dataframe or list of arrays/dfs The data to be clustered. You can pass a single array/df or a list. If a list is passed, the arrays will be stacked and the clustering will be performed across all lists (i.e. not within each list). cluster : str or dict Model to use to discover clusters. Support algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. n_clusters : int Number of clusters to discover. Not required for HDBSCAN. format_data : bool Whether or not to first call the format_data function (default: True). ndims : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- cluster_labels : list An list of cluster labels
### Input: Performs clustering analysis and returns a list of cluster labels Parameters ---------- x : A Numpy array, Pandas Dataframe or list of arrays/dfs The data to be clustered. You can pass a single array/df or a list. If a list is passed, the arrays will be stacked and the clustering will be performed across all lists (i.e. not within each list). cluster : str or dict Model to use to discover clusters. Support algorithms are: KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See scikit-learn specific model docs for details on parameters supported for each model. n_clusters : int Number of clusters to discover. Not required for HDBSCAN. format_data : bool Whether or not to first call the format_data function (default: True). ndims : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- cluster_labels : list An list of cluster labels ### Response: #vtb def cluster(x, cluster=, n_clusters=3, ndims=None, format_data=True): if cluster == None: return x elif (isinstance(cluster, six.string_types) and cluster==) or \ (isinstance(cluster, dict) and cluster[]==): if not _has_hdbscan: raise ImportError() if ndims != None: warnings.warn() if format_data: x = formatter(x, ppca=True) if isinstance(cluster, six.string_types): model = models[cluster] if cluster != : model_params = { : n_clusters } else: model_params = {} elif type(cluster) is dict: if isinstance(cluster[], six.string_types): model = models[cluster[]] model_params = cluster[] model = model(**model_params) model.fit(np.vstack(x)) return list(model.labels_)
#vtb def gemini_query(self, query_id): logger.debug("Looking for query with id {0}".format(query_id)) return self.query(GeminiQuery).filter_by(id=query_id).first()
Return a gemini query Args: name (str)
### Input: Return a gemini query Args: name (str) ### Response: #vtb def gemini_query(self, query_id): logger.debug("Looking for query with id {0}".format(query_id)) return self.query(GeminiQuery).filter_by(id=query_id).first()
#vtb def deprecated(msg=): def wrapper(func): @functools.wraps(func) def new_func(*args, **kwargs): warning_string = "Call to deprecated function or property `%s`." % func.__name__ warning_string = warning_string + + msg warnings.warn( warning_string, category=DeprecationWarning, ) return func(*args, **kwargs) return new_func return wrapper
This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. :param msg: Additional message added to the warning.
### Input: This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. :param msg: Additional message added to the warning. ### Response: #vtb def deprecated(msg=): def wrapper(func): @functools.wraps(func) def new_func(*args, **kwargs): warning_string = "Call to deprecated function or property `%s`." % func.__name__ warning_string = warning_string + + msg warnings.warn( warning_string, category=DeprecationWarning, ) return func(*args, **kwargs) return new_func return wrapper
#vtb def template_sphere_shell(outer_radius, inner_radius=0): r img = _template_sphere_disc(dim=3, outer_radius=outer_radius, inner_radius=inner_radius) return img
r""" This method generates an image array of a sphere-shell. It is useful for passing to Cubic networks as a ``template`` to make spherical shaped networks. Parameters ---------- outer_radius : int Number of nodes in the outer radius of the sphere. inner_radius : int Number of nodes in the inner radius of the shell. a value of 0 will result in a solid sphere. Returns ------- A Numpy array containing 1's to demarcate the sphere-shell, and 0's elsewhere.
### Input: r""" This method generates an image array of a sphere-shell. It is useful for passing to Cubic networks as a ``template`` to make spherical shaped networks. Parameters ---------- outer_radius : int Number of nodes in the outer radius of the sphere. inner_radius : int Number of nodes in the inner radius of the shell. a value of 0 will result in a solid sphere. Returns ------- A Numpy array containing 1's to demarcate the sphere-shell, and 0's elsewhere. ### Response: #vtb def template_sphere_shell(outer_radius, inner_radius=0): r img = _template_sphere_disc(dim=3, outer_radius=outer_radius, inner_radius=inner_radius) return img
#vtb def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units=, comments=, skiprows=0, shape0=2): assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !" addInfo = {} if type(PolyFileObj) in [list,str]: PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj) addInfo = {:PathFileExt} PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2) elif hasattr(PolyFileObj,"Poly"): addInfo = {:PolyFileObj.Id.SaveName} PolyFileObj = PolyFileObj.Poly Poly = np.asarray(PolyFileObj) assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !" Poly = Poly if Poly.shape[0]==shape0 else Poly.T Poly = convert_units(Poly, In=units, Out=) return Poly, addInfo
Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted)
### Input: Return a polygon as a np.ndarray, extracted from a txt file or from a ToFu object, with appropriate units Useful for :meth:`tofu.plugins.AUG.Ves._create()` Parameters ---------- PolyFileObj : str / :mod:`tofu.geom` object / np.ndarray The source where the polygon is to be found, either: - str: the name of a file containing the coorindates of a polygon to be loaded with :meth:`numpy.loadtxt()` - A :mod:`tofu.geom` object: with attribute 'Poly' - np.ndarray: an 2-dimensional array containing the 2D cartesian coordinates of a polygon SavePathInp : str / None The absolute path where the input file is stored units : str Flag indicating in which units the polygon coordinates is expressed in the input file / object / array (will be converted to meters) comments : str Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name skiprows : int Parameter to be fed to :meth:`numpy.loadtxt()` if PolyFileObj is a file name shape0 : int Specifies whether the loaded array is a (2,N) or (3,N) array (transposed it if necessary) Returns ------- Poly : np.ndarray (2,N) np.ndarray containing the 2D cartesian coordinates of the polygon, where N is the number of points addInfo : dict Dictionaryb containing information on the origin of the polygon, for the record (e.g.: the name and absolute path of the file from which it was extracted) ### Response: #vtb def get_PolyFromPolyFileObj(PolyFileObj, SavePathInp=None, units=, comments=, skiprows=0, shape0=2): assert type(PolyFileObj) in [list,str] or hasattr(PolyFileObj,"Poly") or np.asarray(PolyFileObj).ndim==2, "Arg PolyFileObj must be str (PathFileExt), a ToFu object with attribute Poly or an iterable convertible to 2d np.ndarray !" addInfo = {} if type(PolyFileObj) in [list,str]: PathFileExt = get_FileFromInfos(Path=SavePathInp, Name=PolyFileObj) addInfo = {:PathFileExt} PolyFileObj = np.loadtxt(PathFileExt, dtype=float, comments=comments, delimiter=None, converters=None, skiprows=skiprows, usecols=None, unpack=False, ndmin=2) elif hasattr(PolyFileObj,"Poly"): addInfo = {:PolyFileObj.Id.SaveName} PolyFileObj = PolyFileObj.Poly Poly = np.asarray(PolyFileObj) assert Poly.ndim==2 and shape0 in Poly.shape and max(Poly.shape)>=3 and not np.any(np.isnan(Poly)), "Arg np.asarray(PolyFileObj) must be a (2,N) or (N,2) np.ndarray with non NaNs !" Poly = Poly if Poly.shape[0]==shape0 else Poly.T Poly = convert_units(Poly, In=units, Out=) return Poly, addInfo
#vtb def get_upgrades(self, remove_applied=True): if self.upgrades is None: plugins = self._load_upgrades(remove_applied=remove_applied) self.upgrades = self.order_upgrades(plugins, self.history) return self.upgrades
Get upgrades (ordered according to their dependencies). :param remove_applied: Set to false to return all upgrades, otherwise already applied upgrades are removed from their graph (incl. all their dependencies.
### Input: Get upgrades (ordered according to their dependencies). :param remove_applied: Set to false to return all upgrades, otherwise already applied upgrades are removed from their graph (incl. all their dependencies. ### Response: #vtb def get_upgrades(self, remove_applied=True): if self.upgrades is None: plugins = self._load_upgrades(remove_applied=remove_applied) self.upgrades = self.order_upgrades(plugins, self.history) return self.upgrades
#vtb def _upload_file(compute, project_id, file_path, path): path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/")) with open(file_path, "rb") as f: yield from compute.http_query("POST", path, f, timeout=None)
Upload a file to a remote project :param file_path: File path on the controller file system :param path: File path on the remote system relative to project directory
### Input: Upload a file to a remote project :param file_path: File path on the controller file system :param path: File path on the remote system relative to project directory ### Response: #vtb def _upload_file(compute, project_id, file_path, path): path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/")) with open(file_path, "rb") as f: yield from compute.http_query("POST", path, f, timeout=None)
#vtb def admeig(classname, f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau): args = f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau A = getattr(adm, + classname)(*args) perm_keys = get_permissible_wcs(classname, f) if perm_keys != : A = A[perm_keys][:, perm_keys] w, v = np.linalg.eig(A.T) return w, v
Compute the eigenvalues and eigenvectors for a QCD anomalous dimension matrix that is defined in `adm.adm_s_X` where X is the name of the sector. Supports memoization. Output analogous to `np.linalg.eig`.
### Input: Compute the eigenvalues and eigenvectors for a QCD anomalous dimension matrix that is defined in `adm.adm_s_X` where X is the name of the sector. Supports memoization. Output analogous to `np.linalg.eig`. ### Response: #vtb def admeig(classname, f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau): args = f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau A = getattr(adm, + classname)(*args) perm_keys = get_permissible_wcs(classname, f) if perm_keys != : A = A[perm_keys][:, perm_keys] w, v = np.linalg.eig(A.T) return w, v