code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
| text
stringlengths 144
19.2k
|
---|---|---|
#vtb
def add_disk_encryption_passwords(self, ids, passwords, clear_on_suspend):
if not isinstance(ids, list):
raise TypeError("ids can only be an instance of type list")
for a in ids[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(passwords, list):
raise TypeError("passwords can only be an instance of type list")
for a in passwords[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(clear_on_suspend, bool):
raise TypeError("clear_on_suspend can only be an instance of type bool")
self._call("addDiskEncryptionPasswords",
in_p=[ids, passwords, clear_on_suspend])
|
Adds a password used for hard disk encryption/decryption.
in ids of type str
List of identifiers for the passwords. Must match the identifier
used when the encrypted medium was created.
in passwords of type str
List of passwords.
in clear_on_suspend of type bool
Flag whether to clear the given passwords on VM suspend (due to a suspending host
for example). The passwords must be supplied again before the VM can resume.
raises :class:`VBoxErrorPasswordIncorrect`
The password provided wasn't correct for at least one disk using the provided
ID.
|
### Input:
Adds a password used for hard disk encryption/decryption.
in ids of type str
List of identifiers for the passwords. Must match the identifier
used when the encrypted medium was created.
in passwords of type str
List of passwords.
in clear_on_suspend of type bool
Flag whether to clear the given passwords on VM suspend (due to a suspending host
for example). The passwords must be supplied again before the VM can resume.
raises :class:`VBoxErrorPasswordIncorrect`
The password provided wasn't correct for at least one disk using the provided
ID.
### Response:
#vtb
def add_disk_encryption_passwords(self, ids, passwords, clear_on_suspend):
if not isinstance(ids, list):
raise TypeError("ids can only be an instance of type list")
for a in ids[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(passwords, list):
raise TypeError("passwords can only be an instance of type list")
for a in passwords[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
if not isinstance(clear_on_suspend, bool):
raise TypeError("clear_on_suspend can only be an instance of type bool")
self._call("addDiskEncryptionPasswords",
in_p=[ids, passwords, clear_on_suspend])
|
#vtb
def cli(ctx, env):
env.out("Welcome to the SoftLayer shell.")
env.out("")
formatter = formatting.HelpFormatter()
commands = []
shell_commands = []
for name in cli_core.cli.list_commands(ctx):
command = cli_core.cli.get_command(ctx, name)
if command.short_help is None:
command.short_help = command.help
details = (name, command.short_help)
if name in dict(routes.ALL_ROUTES):
shell_commands.append(details)
else:
commands.append(details)
with formatter.section():
formatter.write_dl(shell_commands)
with formatter.section():
formatter.write_dl(commands)
for line in formatter.buffer:
env.out(line, newline=False)
|
Print shell help text.
|
### Input:
Print shell help text.
### Response:
#vtb
def cli(ctx, env):
env.out("Welcome to the SoftLayer shell.")
env.out("")
formatter = formatting.HelpFormatter()
commands = []
shell_commands = []
for name in cli_core.cli.list_commands(ctx):
command = cli_core.cli.get_command(ctx, name)
if command.short_help is None:
command.short_help = command.help
details = (name, command.short_help)
if name in dict(routes.ALL_ROUTES):
shell_commands.append(details)
else:
commands.append(details)
with formatter.section():
formatter.write_dl(shell_commands)
with formatter.section():
formatter.write_dl(commands)
for line in formatter.buffer:
env.out(line, newline=False)
|
#vtb
def find_repositories_with_locate(path):
command = [b, b]
for dotdir in DOTDIRS:
command.append(br % (escape(path), escape(dotdir)))
command.append(br % (escape(path), escape(dotdir)))
try:
paths = check_output(command).strip(b).split(b)
except CalledProcessError:
return []
return [os.path.split(p) for p in paths
if not os.path.islink(p) and os.path.isdir(p)]
|
Use locate to return a sequence of (directory, dotdir) pairs.
|
### Input:
Use locate to return a sequence of (directory, dotdir) pairs.
### Response:
#vtb
def find_repositories_with_locate(path):
command = [b, b]
for dotdir in DOTDIRS:
command.append(br % (escape(path), escape(dotdir)))
command.append(br % (escape(path), escape(dotdir)))
try:
paths = check_output(command).strip(b).split(b)
except CalledProcessError:
return []
return [os.path.split(p) for p in paths
if not os.path.islink(p) and os.path.isdir(p)]
|
#vtb
def div(self, key, value=2):
return uwsgi.cache_mul(key, value, self.timeout, self.name)
|
Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool
|
### Input:
Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool
### Response:
#vtb
def div(self, key, value=2):
return uwsgi.cache_mul(key, value, self.timeout, self.name)
|
#vtb
def post(self, path, data=None, json=None, headers=None, **kwargs):
if headers is not None:
merger = jsonmerge.Merger(SCHEMA)
kwargs["headers"] = merger.merge(self.defaultHeaders, headers)
else:
kwargs["headers"] = self.defaultHeaders
url = combine_urls(self.host, path)
if self.cert is not None:
kwargs["cert"] = self.cert
self.logger.debug("Trying to send HTTP POST to {}".format(url))
try:
resp = requests.post(url, data, json, **kwargs)
self._log_response(resp)
except requests.RequestException as es:
self._log_exception(es)
raise
return resp
|
Sends a POST request to host/path.
:param path: String, resource path on server
:param data: Dictionary, bytes or file-like object to send in the body of the request
:param json: JSON formatted data to send in the body of the request
:param headers: Dictionary of HTTP headers to be sent with the request,
overwrites default headers if there is overlap
:param kwargs: Other arguments used in the requests.request call
valid parameters in kwargs are the optional parameters of Requests.Request
http://docs.python-requests.org/en/master/api/
:return: requests.Response
:raises: RequestException
|
### Input:
Sends a POST request to host/path.
:param path: String, resource path on server
:param data: Dictionary, bytes or file-like object to send in the body of the request
:param json: JSON formatted data to send in the body of the request
:param headers: Dictionary of HTTP headers to be sent with the request,
overwrites default headers if there is overlap
:param kwargs: Other arguments used in the requests.request call
valid parameters in kwargs are the optional parameters of Requests.Request
http://docs.python-requests.org/en/master/api/
:return: requests.Response
:raises: RequestException
### Response:
#vtb
def post(self, path, data=None, json=None, headers=None, **kwargs):
if headers is not None:
merger = jsonmerge.Merger(SCHEMA)
kwargs["headers"] = merger.merge(self.defaultHeaders, headers)
else:
kwargs["headers"] = self.defaultHeaders
url = combine_urls(self.host, path)
if self.cert is not None:
kwargs["cert"] = self.cert
self.logger.debug("Trying to send HTTP POST to {}".format(url))
try:
resp = requests.post(url, data, json, **kwargs)
self._log_response(resp)
except requests.RequestException as es:
self._log_exception(es)
raise
return resp
|
#vtb
def _get_server(self):
with self._lock:
inactive_server_count = len(self._inactive_servers)
for i in range(inactive_server_count):
try:
ts, server, message = heapq.heappop(self._inactive_servers)
except IndexError:
pass
else:
if (ts + self.retry_interval) > time():
heapq.heappush(self._inactive_servers,
(ts, server, message))
else:
self._active_servers.append(server)
logger.warn("Restored server %s into active pool",
server)
if not self._active_servers:
ts, server, message = heapq.heappop(self._inactive_servers)
self._active_servers.append(server)
logger.info("Restored server %s into active pool", server)
server = self._active_servers[0]
self._roundrobin()
return server
|
Get server to use for request.
Also process inactive server list, re-add them after given interval.
|
### Input:
Get server to use for request.
Also process inactive server list, re-add them after given interval.
### Response:
#vtb
def _get_server(self):
with self._lock:
inactive_server_count = len(self._inactive_servers)
for i in range(inactive_server_count):
try:
ts, server, message = heapq.heappop(self._inactive_servers)
except IndexError:
pass
else:
if (ts + self.retry_interval) > time():
heapq.heappush(self._inactive_servers,
(ts, server, message))
else:
self._active_servers.append(server)
logger.warn("Restored server %s into active pool",
server)
if not self._active_servers:
ts, server, message = heapq.heappop(self._inactive_servers)
self._active_servers.append(server)
logger.info("Restored server %s into active pool", server)
server = self._active_servers[0]
self._roundrobin()
return server
|
#vtb
def pyobj_role(make_node, name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
try:
prefixed_name, obj, parent, modname = import_by_name(text)
except ImportError:
msg = inliner.reporter.error("Could not locate Python object {}".format(text), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_node(rawtext, app, prefixed_name, obj, parent, modname, options)
return [node], []
|
Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
|
### Input:
Include Python object value, rendering it to text using str.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
### Response:
#vtb
def pyobj_role(make_node, name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
try:
prefixed_name, obj, parent, modname = import_by_name(text)
except ImportError:
msg = inliner.reporter.error("Could not locate Python object {}".format(text), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_node(rawtext, app, prefixed_name, obj, parent, modname, options)
return [node], []
|
#vtb
def read_csv(self, file: str, table: str = , libref: str = , results: str = ,
opts: dict = None) -> :
opts = opts if opts is not None else {}
if results == :
results = self.results
self._io.read_csv(file, table, libref, self.nosub, opts)
if self.exist(table, libref):
return SASdata(self, libref, table, results)
else:
return None
|
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
:return: SASdata object
|
### Input:
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
:return: SASdata object
### Response:
#vtb
def read_csv(self, file: str, table: str = , libref: str = , results: str = ,
opts: dict = None) -> :
opts = opts if opts is not None else {}
if results == :
results = self.results
self._io.read_csv(file, table, libref, self.nosub, opts)
if self.exist(table, libref):
return SASdata(self, libref, table, results)
else:
return None
|
#vtb
def get_sentence_xpath_tuples(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
parsed_html = get_html_tree(filename_url_or_filelike)
try:
xpath_finder = parsed_html.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = parsed_html.getroottree().getpath
nodes_with_text = parsed_html.xpath(xpath_to_text)
sent_xpath_pairs = [
( + s, xpath_finder(n)) if e == 0
else (s, xpath_finder(n))
for n in nodes_with_text
for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(
BRACKET_PATTERN.sub(, .join(n.xpath()))))
if s.endswith(tuple(SENTENCE_ENDING))
]
return sent_xpath_pairs
|
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
|
### Input:
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
### Response:
#vtb
def get_sentence_xpath_tuples(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
parsed_html = get_html_tree(filename_url_or_filelike)
try:
xpath_finder = parsed_html.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = parsed_html.getroottree().getpath
nodes_with_text = parsed_html.xpath(xpath_to_text)
sent_xpath_pairs = [
( + s, xpath_finder(n)) if e == 0
else (s, xpath_finder(n))
for n in nodes_with_text
for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(
BRACKET_PATTERN.sub(, .join(n.xpath()))))
if s.endswith(tuple(SENTENCE_ENDING))
]
return sent_xpath_pairs
|
#vtb
def validate(self):
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u
u.format(type(self.fold_scope_location), self.fold_scope_location))
|
Ensure the Fold block is valid.
|
### Input:
Ensure the Fold block is valid.
### Response:
#vtb
def validate(self):
if not isinstance(self.fold_scope_location, FoldScopeLocation):
raise TypeError(u
u.format(type(self.fold_scope_location), self.fold_scope_location))
|
#vtb
def close_session(self):
if not self._session.closed:
if self._session._connector_owner:
self._session._connector.close()
self._session._connector = None
|
Close current session.
|
### Input:
Close current session.
### Response:
#vtb
def close_session(self):
if not self._session.closed:
if self._session._connector_owner:
self._session._connector.close()
self._session._connector = None
|
#vtb
def update_filenames(self):
self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, ),
+ self.sky_state + + str(
self.sky_zenith) + + str(
self.sky_azimuth) + + str(
self.num_bands) + + self.ds_code))
|
Does nothing currently. May not need this method
|
### Input:
Does nothing currently. May not need this method
### Response:
#vtb
def update_filenames(self):
self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, ),
+ self.sky_state + + str(
self.sky_zenith) + + str(
self.sky_azimuth) + + str(
self.num_bands) + + self.ds_code))
|
#vtb
def detranslify(text):
try:
res = translit.detranslify(text)
except Exception as err:
res = default_value % {: err, : text}
return res
|
Detranslify russian text
|
### Input:
Detranslify russian text
### Response:
#vtb
def detranslify(text):
try:
res = translit.detranslify(text)
except Exception as err:
res = default_value % {: err, : text}
return res
|
#vtb
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
|
The total score for the words found, according to the rules.
|
### Input:
The total score for the words found, according to the rules.
### Response:
#vtb
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
|
#vtb
def eval_option_value(self, option):
try:
value = eval(option, {}, {})
except (SyntaxError, NameError, TypeError):
return option
if type(value) in (str, bool, int, float):
return value
elif type(value) in (list, tuple):
for v in value:
if type(v) not in (str, bool, int, float):
self._write_error("Value of element of list object has wrong type %s" % v)
return value
return option
|
Evaluates an option
:param option: a string
:return: an object of type str, bool, int, float or list
|
### Input:
Evaluates an option
:param option: a string
:return: an object of type str, bool, int, float or list
### Response:
#vtb
def eval_option_value(self, option):
try:
value = eval(option, {}, {})
except (SyntaxError, NameError, TypeError):
return option
if type(value) in (str, bool, int, float):
return value
elif type(value) in (list, tuple):
for v in value:
if type(v) not in (str, bool, int, float):
self._write_error("Value of element of list object has wrong type %s" % v)
return value
return option
|
#vtb
def login(self, **kwargs):
payload = {
: self.username,
: self.password,
}
headers = kwargs.setdefault(, {})
headers.setdefault(
,
)
url =
response = self.request(url, , json=payload, **kwargs)
r_json = response.json()
return r_json[] == 0
|
登录
|
### Input:
登录
### Response:
#vtb
def login(self, **kwargs):
payload = {
: self.username,
: self.password,
}
headers = kwargs.setdefault(, {})
headers.setdefault(
,
)
url =
response = self.request(url, , json=payload, **kwargs)
r_json = response.json()
return r_json[] == 0
|
#vtb
def object(self, object):
if object is None:
raise ValueError("Invalid value for `object`, must not be `None`")
allowed_values = ["service-package-quota-history"]
if object not in allowed_values:
raise ValueError(
"Invalid value for `object` ({0}), must be one of {1}"
.format(object, allowed_values)
)
self._object = object
|
Sets the object of this ServicePackageQuotaHistoryResponse.
Always set to 'service-package-quota-history'.
:param object: The object of this ServicePackageQuotaHistoryResponse.
:type: str
|
### Input:
Sets the object of this ServicePackageQuotaHistoryResponse.
Always set to 'service-package-quota-history'.
:param object: The object of this ServicePackageQuotaHistoryResponse.
:type: str
### Response:
#vtb
def object(self, object):
if object is None:
raise ValueError("Invalid value for `object`, must not be `None`")
allowed_values = ["service-package-quota-history"]
if object not in allowed_values:
raise ValueError(
"Invalid value for `object` ({0}), must be one of {1}"
.format(object, allowed_values)
)
self._object = object
|
#vtb
def _train_model(
self, train_data, loss_fn, valid_data=None, log_writer=None, restore_state={}
):
self.train()
train_config = self.config["train_config"]
train_loader = self._create_data_loader(train_data)
valid_loader = self._create_data_loader(valid_data)
epoch_size = len(train_loader.dataset)
if self.config["verbose"] and self.config["device"] != "cpu":
print("Using GPU...")
self.to(self.config["device"])
self._set_writer(train_config)
self._set_logger(train_config, epoch_size)
self._set_checkpointer(train_config)
self._set_optimizer(train_config)
self._set_scheduler(train_config)
if restore_state:
start_iteration = self._restore_training_state(restore_state)
else:
start_iteration = 0
metrics_hist = {}
for epoch in range(start_iteration, train_config["n_epochs"]):
progress_bar = (
train_config["progress_bar"]
and self.config["verbose"]
and self.logger.log_unit == "epochs"
)
t = tqdm(
enumerate(train_loader),
total=len(train_loader),
disable=(not progress_bar),
)
self.running_loss = 0.0
self.running_examples = 0
for batch_num, data in t:
batch_size = len(data[0])
if self.config["device"] != "cpu":
data = place_on_gpu(data)
self.optimizer.zero_grad()
loss = loss_fn(*data)
if torch.isnan(loss):
msg = "Loss is NaN. Consider reducing learning rate."
raise Exception(msg)
loss.backward()
self.optimizer.step()
metrics_dict = self._execute_logging(
train_loader, valid_loader, loss, batch_size
)
metrics_hist.update(metrics_dict)
t.set_postfix(loss=metrics_dict["train/loss"])
self._update_scheduler(epoch, metrics_hist)
self.eval()
if self.checkpointer:
self.checkpointer.load_best_model(model=self)
if self.writer:
if self.writer.include_config:
self.writer.add_config(self.config)
self.writer.close()
if self.config["verbose"]:
print("Finished Training")
if valid_loader is not None:
self.score(
valid_loader,
metric=train_config["validation_metric"],
verbose=True,
print_confusion_matrix=True,
)
|
The internal training routine called by train_model() after setup
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
loss_fn: the loss function to minimize (maps *data -> loss)
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
restore_state: a dictionary containing model weights (optimizer, main network) and training information
If valid_data is not provided, then no checkpointing or
evaluation on the dev set will occur.
|
### Input:
The internal training routine called by train_model() after setup
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
loss_fn: the loss function to minimize (maps *data -> loss)
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
restore_state: a dictionary containing model weights (optimizer, main network) and training information
If valid_data is not provided, then no checkpointing or
evaluation on the dev set will occur.
### Response:
#vtb
def _train_model(
self, train_data, loss_fn, valid_data=None, log_writer=None, restore_state={}
):
self.train()
train_config = self.config["train_config"]
train_loader = self._create_data_loader(train_data)
valid_loader = self._create_data_loader(valid_data)
epoch_size = len(train_loader.dataset)
if self.config["verbose"] and self.config["device"] != "cpu":
print("Using GPU...")
self.to(self.config["device"])
self._set_writer(train_config)
self._set_logger(train_config, epoch_size)
self._set_checkpointer(train_config)
self._set_optimizer(train_config)
self._set_scheduler(train_config)
if restore_state:
start_iteration = self._restore_training_state(restore_state)
else:
start_iteration = 0
metrics_hist = {}
for epoch in range(start_iteration, train_config["n_epochs"]):
progress_bar = (
train_config["progress_bar"]
and self.config["verbose"]
and self.logger.log_unit == "epochs"
)
t = tqdm(
enumerate(train_loader),
total=len(train_loader),
disable=(not progress_bar),
)
self.running_loss = 0.0
self.running_examples = 0
for batch_num, data in t:
batch_size = len(data[0])
if self.config["device"] != "cpu":
data = place_on_gpu(data)
self.optimizer.zero_grad()
loss = loss_fn(*data)
if torch.isnan(loss):
msg = "Loss is NaN. Consider reducing learning rate."
raise Exception(msg)
loss.backward()
self.optimizer.step()
metrics_dict = self._execute_logging(
train_loader, valid_loader, loss, batch_size
)
metrics_hist.update(metrics_dict)
t.set_postfix(loss=metrics_dict["train/loss"])
self._update_scheduler(epoch, metrics_hist)
self.eval()
if self.checkpointer:
self.checkpointer.load_best_model(model=self)
if self.writer:
if self.writer.include_config:
self.writer.add_config(self.config)
self.writer.close()
if self.config["verbose"]:
print("Finished Training")
if valid_loader is not None:
self.score(
valid_loader,
metric=train_config["validation_metric"],
verbose=True,
print_confusion_matrix=True,
)
|
#vtb
def get_object_reference(obj: Object) -> str:
resource_name = obj.title
if resource_name is None:
class_name = obj.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = obj
return .format(
.join(resource_name.split()).lower().strip())
|
Gets an object reference string from the obj instance.
This adds the object type to ALL_RESOURCES so that it gets documented and
returns a str which contains a sphinx reference to the documented object.
:param obj: The Object instance.
:returns: A sphinx docs reference str.
|
### Input:
Gets an object reference string from the obj instance.
This adds the object type to ALL_RESOURCES so that it gets documented and
returns a str which contains a sphinx reference to the documented object.
:param obj: The Object instance.
:returns: A sphinx docs reference str.
### Response:
#vtb
def get_object_reference(obj: Object) -> str:
resource_name = obj.title
if resource_name is None:
class_name = obj.__name__
resource_name = class_name_to_resource_name(class_name)
ALL_RESOURCES[resource_name] = obj
return .format(
.join(resource_name.split()).lower().strip())
|
#vtb
def get_mouse_pos(self, window_pos=None):
window_pos = window_pos or pygame.mouse.get_pos()
window_pt = point.Point(*window_pos) + 0.5
for surf in reversed(self._surfaces):
if (surf.surf_type != SurfType.CHROME and
surf.surf_rect.contains_point(window_pt)):
surf_rel_pt = window_pt - surf.surf_rect.tl
world_pt = surf.world_to_surf.back_pt(surf_rel_pt)
return MousePos(world_pt, surf)
|
Return a MousePos filled with the world position and surf it hit.
|
### Input:
Return a MousePos filled with the world position and surf it hit.
### Response:
#vtb
def get_mouse_pos(self, window_pos=None):
window_pos = window_pos or pygame.mouse.get_pos()
window_pt = point.Point(*window_pos) + 0.5
for surf in reversed(self._surfaces):
if (surf.surf_type != SurfType.CHROME and
surf.surf_rect.contains_point(window_pt)):
surf_rel_pt = window_pt - surf.surf_rect.tl
world_pt = surf.world_to_surf.back_pt(surf_rel_pt)
return MousePos(world_pt, surf)
|
#vtb
def write_numeric_array(fd, header, array):
bd = BytesIO()
write_var_header(bd, header)
if not isinstance(array, basestring) and header[][0] > 1:
array = list(chain.from_iterable(izip(*array)))
write_elements(bd, header[], array)
data = bd.getvalue()
bd.close()
write_var_data(fd, data)
|
Write the numeric array
|
### Input:
Write the numeric array
### Response:
#vtb
def write_numeric_array(fd, header, array):
bd = BytesIO()
write_var_header(bd, header)
if not isinstance(array, basestring) and header[][0] > 1:
array = list(chain.from_iterable(izip(*array)))
write_elements(bd, header[], array)
data = bd.getvalue()
bd.close()
write_var_data(fd, data)
|
#vtb
def require_at_least_one_query_parameter(*query_parameter_names):
def outer_wrapper(view):
@wraps(view)
def wrapper(request, *args, **kwargs):
requirement_satisfied = False
for query_parameter_name in query_parameter_names:
query_parameter_values = request.query_params.getlist(query_parameter_name)
kwargs[query_parameter_name] = query_parameter_values
if query_parameter_values:
requirement_satisfied = True
if not requirement_satisfied:
raise ValidationError(
detail=.format(
params=.join(query_parameter_names)
)
)
return view(request, *args, **kwargs)
return wrapper
return outer_wrapper
|
Ensure at least one of the specified query parameters are included in the request.
This decorator checks for the existence of at least one of the specified query
parameters and passes the values as function parameters to the decorated view.
If none of the specified query parameters are included in the request, a
ValidationError is raised.
Usage::
@require_at_least_one_query_parameter('program_uuids', 'course_run_ids')
def my_view(request, program_uuids, course_run_ids):
# Some functionality ...
|
### Input:
Ensure at least one of the specified query parameters are included in the request.
This decorator checks for the existence of at least one of the specified query
parameters and passes the values as function parameters to the decorated view.
If none of the specified query parameters are included in the request, a
ValidationError is raised.
Usage::
@require_at_least_one_query_parameter('program_uuids', 'course_run_ids')
def my_view(request, program_uuids, course_run_ids):
# Some functionality ...
### Response:
#vtb
def require_at_least_one_query_parameter(*query_parameter_names):
def outer_wrapper(view):
@wraps(view)
def wrapper(request, *args, **kwargs):
requirement_satisfied = False
for query_parameter_name in query_parameter_names:
query_parameter_values = request.query_params.getlist(query_parameter_name)
kwargs[query_parameter_name] = query_parameter_values
if query_parameter_values:
requirement_satisfied = True
if not requirement_satisfied:
raise ValidationError(
detail=.format(
params=.join(query_parameter_names)
)
)
return view(request, *args, **kwargs)
return wrapper
return outer_wrapper
|
#vtb
def count_(self):
try:
num = len(self.df.index)
except Exception as e:
self.err(e, "Can not count data")
return
return num
|
Returns the number of rows of the main dataframe
|
### Input:
Returns the number of rows of the main dataframe
### Response:
#vtb
def count_(self):
try:
num = len(self.df.index)
except Exception as e:
self.err(e, "Can not count data")
return
return num
|
#vtb
def attach_volume_to_device(self, volume_id, device_id):
try:
volume = self.manager.get_volume(volume_id)
volume.attach(device_id)
except packet.baseapi.Error as msg:
raise PacketManagerException(msg)
return volume
|
Attaches the created Volume to a Device.
|
### Input:
Attaches the created Volume to a Device.
### Response:
#vtb
def attach_volume_to_device(self, volume_id, device_id):
try:
volume = self.manager.get_volume(volume_id)
volume.attach(device_id)
except packet.baseapi.Error as msg:
raise PacketManagerException(msg)
return volume
|
#vtb
def mock_attr(self, *args, **kwargs):
self.path.extend(args)
self.qs.update(kwargs)
return self
|
Empty method to call to slurp up args and kwargs.
`args` get pushed onto the url path.
`kwargs` are converted to a query string and appended to the URL.
|
### Input:
Empty method to call to slurp up args and kwargs.
`args` get pushed onto the url path.
`kwargs` are converted to a query string and appended to the URL.
### Response:
#vtb
def mock_attr(self, *args, **kwargs):
self.path.extend(args)
self.qs.update(kwargs)
return self
|
#vtb
def open_handle(self, dwDesiredAccess = win32.THREAD_ALL_ACCESS):
hThread = win32.OpenThread(dwDesiredAccess, win32.FALSE, self.dwThreadId)
self.close_handle()
self.hThread = hThread
|
Opens a new handle to the thread, closing the previous one.
The new handle is stored in the L{hThread} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.THREAD_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the thread
with the requested access rights. This tipically happens because
the target thread belongs to system process and the debugger is not
runnning with administrative rights.
|
### Input:
Opens a new handle to the thread, closing the previous one.
The new handle is stored in the L{hThread} property.
@warn: Normally you should call L{get_handle} instead, since it's much
"smarter" and tries to reuse handles and merge access rights.
@type dwDesiredAccess: int
@param dwDesiredAccess: Desired access rights.
Defaults to L{win32.THREAD_ALL_ACCESS}.
See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx}
@raise WindowsError: It's not possible to open a handle to the thread
with the requested access rights. This tipically happens because
the target thread belongs to system process and the debugger is not
runnning with administrative rights.
### Response:
#vtb
def open_handle(self, dwDesiredAccess = win32.THREAD_ALL_ACCESS):
hThread = win32.OpenThread(dwDesiredAccess, win32.FALSE, self.dwThreadId)
self.close_handle()
self.hThread = hThread
|
#vtb
async def nextset(self):
conn = self._get_db()
current_result = self._result
if current_result is None or current_result is not conn._result:
return
if not current_result.has_next:
return
self._result = None
self._clear_result()
await conn.next_result()
await self._do_get_result()
return True
|
Get the next query set
|
### Input:
Get the next query set
### Response:
#vtb
async def nextset(self):
conn = self._get_db()
current_result = self._result
if current_result is None or current_result is not conn._result:
return
if not current_result.has_next:
return
self._result = None
self._clear_result()
await conn.next_result()
await self._do_get_result()
return True
|
#vtb
def _set_output_arguments(self):
group = self.parser.add_argument_group()
group.add_argument(, , type=argparse.FileType(),
dest=, default=sys.stdout,
help="output file")
group.add_argument(, dest=, action=,
help="produce a JSON line for each output item")
|
Activate output arguments parsing
|
### Input:
Activate output arguments parsing
### Response:
#vtb
def _set_output_arguments(self):
group = self.parser.add_argument_group()
group.add_argument(, , type=argparse.FileType(),
dest=, default=sys.stdout,
help="output file")
group.add_argument(, dest=, action=,
help="produce a JSON line for each output item")
|
#vtb
def savorSessionCookie(self, request):
cookieValue = request.getSession().uid
request.addCookie(
self.cookieKey, cookieValue, path=,
max_age=PERSISTENT_SESSION_LIFETIME,
domain=self.cookieDomainForRequest(request))
|
Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL.
|
### Input:
Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL.
### Response:
#vtb
def savorSessionCookie(self, request):
cookieValue = request.getSession().uid
request.addCookie(
self.cookieKey, cookieValue, path=,
max_age=PERSISTENT_SESSION_LIFETIME,
domain=self.cookieDomainForRequest(request))
|
#vtb
def ContextTupleToDict(context):
d = {}
if not context:
return d
for k, v in zip(ExceptionWithContext.CONTEXT_PARTS, context):
if v != and v != None:
d[k] = v
return d
|
Convert a tuple representing a context into a dict of (key, value) pairs
|
### Input:
Convert a tuple representing a context into a dict of (key, value) pairs
### Response:
#vtb
def ContextTupleToDict(context):
d = {}
if not context:
return d
for k, v in zip(ExceptionWithContext.CONTEXT_PARTS, context):
if v != and v != None:
d[k] = v
return d
|
#vtb
def churn_rate(user, summary=, **kwargs):
if len(user.records) == 0:
return statistics([], summary=summary)
query = {
: ,
: OrderedDict([
(, []),
(, [])
]),
: ,
: True,
: True
}
rv = grouping_query(user, query)
weekly_positions = rv[0][1]
all_positions = list(set(p for l in weekly_positions for p in l))
frequencies = {}
cos_dist = []
for week, week_positions in enumerate(weekly_positions):
count = Counter(week_positions)
total = sum(count.values())
frequencies[week] = [count.get(p, 0) / total for p in all_positions]
all_indexes = range(len(all_positions))
for f_1, f_2 in pairwise(list(frequencies.values())):
num = sum(f_1[a] * f_2[a] for a in all_indexes)
denom_1 = sum(f ** 2 for f in f_1)
denom_2 = sum(f ** 2 for f in f_2)
cos_dist.append(1 - num / (denom_1 ** .5 * denom_2 ** .5))
return statistics(cos_dist, summary=summary)
|
Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks.
|
### Input:
Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks.
### Response:
#vtb
def churn_rate(user, summary=, **kwargs):
if len(user.records) == 0:
return statistics([], summary=summary)
query = {
: ,
: OrderedDict([
(, []),
(, [])
]),
: ,
: True,
: True
}
rv = grouping_query(user, query)
weekly_positions = rv[0][1]
all_positions = list(set(p for l in weekly_positions for p in l))
frequencies = {}
cos_dist = []
for week, week_positions in enumerate(weekly_positions):
count = Counter(week_positions)
total = sum(count.values())
frequencies[week] = [count.get(p, 0) / total for p in all_positions]
all_indexes = range(len(all_positions))
for f_1, f_2 in pairwise(list(frequencies.values())):
num = sum(f_1[a] * f_2[a] for a in all_indexes)
denom_1 = sum(f ** 2 for f in f_1)
denom_2 = sum(f ** 2 for f in f_2)
cos_dist.append(1 - num / (denom_1 ** .5 * denom_2 ** .5))
return statistics(cos_dist, summary=summary)
|
#vtb
def get_branch_info(self):
branch_info = None
if os.path.exists(constants.cached_branch_info):
logger.debug(u)
ctime = datetime.utcfromtimestamp(
os.path.getctime(constants.cached_branch_info))
if datetime.utcnow() < (ctime + timedelta(minutes=5)):
with io.open(constants.cached_branch_info, encoding=, mode=) as f:
branch_info = json.load(f)
return branch_info
else:
logger.debug(u)
logger.debug(u,
self.branch_info_url)
net_logger.info(u, self.branch_info_url)
response = self.session.get(self.branch_info_url,
timeout=self.config.http_timeout)
logger.debug(u, response.status_code)
if response.status_code != 200:
logger.debug("There was an error obtaining branch information.")
logger.debug(u, response.status_code)
logger.debug("Assuming default branch information %s" % self.branch_info)
return False
branch_info = response.json()
logger.debug(u, json.dumps(branch_info))
if ((branch_info[u] is not -1 and
branch_info[u] is -1)):
self.get_satellite5_info(branch_info)
logger.debug(u)
with io.open(constants.cached_branch_info, encoding=, mode=) as f:
bi_str = json.dumps(branch_info, ensure_ascii=False)
f.write(bi_str)
self.branch_info = branch_info
return branch_info
|
Retrieve branch_info from Satellite Server
|
### Input:
Retrieve branch_info from Satellite Server
### Response:
#vtb
def get_branch_info(self):
branch_info = None
if os.path.exists(constants.cached_branch_info):
logger.debug(u)
ctime = datetime.utcfromtimestamp(
os.path.getctime(constants.cached_branch_info))
if datetime.utcnow() < (ctime + timedelta(minutes=5)):
with io.open(constants.cached_branch_info, encoding=, mode=) as f:
branch_info = json.load(f)
return branch_info
else:
logger.debug(u)
logger.debug(u,
self.branch_info_url)
net_logger.info(u, self.branch_info_url)
response = self.session.get(self.branch_info_url,
timeout=self.config.http_timeout)
logger.debug(u, response.status_code)
if response.status_code != 200:
logger.debug("There was an error obtaining branch information.")
logger.debug(u, response.status_code)
logger.debug("Assuming default branch information %s" % self.branch_info)
return False
branch_info = response.json()
logger.debug(u, json.dumps(branch_info))
if ((branch_info[u] is not -1 and
branch_info[u] is -1)):
self.get_satellite5_info(branch_info)
logger.debug(u)
with io.open(constants.cached_branch_info, encoding=, mode=) as f:
bi_str = json.dumps(branch_info, ensure_ascii=False)
f.write(bi_str)
self.branch_info = branch_info
return branch_info
|
#vtb
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
warnings.warn(
"This function is dead code and will be removed on or after 2019-07-18")
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X
|
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
|
### Input:
TensorFlow implementation for apply perturbations to input features based
on salency maps
:param i: index of first selected feature
:param j: index of second selected feature
:param X: a matrix containing our input features for our sample
:param increase: boolean; true if we are increasing pixels, false otherwise
:param theta: delta for each feature adjustment
:param clip_min: mininum value for a feature in our sample
:param clip_max: maximum value for a feature in our sample
: return: a perturbed input feature matrix for a target class
### Response:
#vtb
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
warnings.warn(
"This function is dead code and will be removed on or after 2019-07-18")
if increase:
X[0, i] = np.minimum(clip_max, X[0, i] + theta)
X[0, j] = np.minimum(clip_max, X[0, j] + theta)
else:
X[0, i] = np.maximum(clip_min, X[0, i] - theta)
X[0, j] = np.maximum(clip_min, X[0, j] - theta)
return X
|
#vtb
def _get_media(media_types):
get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x]
if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None)
return list(map(get_mapped_media, media_types))
|
Helper method to map the media types.
|
### Input:
Helper method to map the media types.
### Response:
#vtb
def _get_media(media_types):
get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x]
if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None)
return list(map(get_mapped_media, media_types))
|
#vtb
def __get_zero_seq_indexes(self, message: str, following_zeros: int):
result = []
if following_zeros > len(message):
return result
zero_counter = 0
for i in range(0, len(message)):
if message[i] == "0":
zero_counter += 1
else:
if zero_counter >= following_zeros:
result.append((i - zero_counter, i))
zero_counter = 0
if zero_counter >= following_zeros:
result.append((len(message) - 1 - following_zeros, len(message) - 1))
return result
|
:rtype: list[tuple of int]
|
### Input:
:rtype: list[tuple of int]
### Response:
#vtb
def __get_zero_seq_indexes(self, message: str, following_zeros: int):
result = []
if following_zeros > len(message):
return result
zero_counter = 0
for i in range(0, len(message)):
if message[i] == "0":
zero_counter += 1
else:
if zero_counter >= following_zeros:
result.append((i - zero_counter, i))
zero_counter = 0
if zero_counter >= following_zeros:
result.append((len(message) - 1 - following_zeros, len(message) - 1))
return result
|
#vtb
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor:
return np.vdot(tensor0, tensor1)
|
Return the inner product between two tensors
|
### Input:
Return the inner product between two tensors
### Response:
#vtb
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor:
return np.vdot(tensor0, tensor1)
|
#vtb
def get_volume_options(volumes):
if not isinstance(volumes, list):
volumes = [volumes]
volumes = [Volume.create_from_tuple(v) for v in volumes]
result = []
for v in volumes:
result += ["-v", str(v)]
return result
|
Generates volume options to run methods.
:param volumes: tuple or list of tuples in form target x source,target x source,target,mode.
:return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...]
|
### Input:
Generates volume options to run methods.
:param volumes: tuple or list of tuples in form target x source,target x source,target,mode.
:return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...]
### Response:
#vtb
def get_volume_options(volumes):
if not isinstance(volumes, list):
volumes = [volumes]
volumes = [Volume.create_from_tuple(v) for v in volumes]
result = []
for v in volumes:
result += ["-v", str(v)]
return result
|
#vtb
def compute_elementary_effects(model_inputs, model_outputs, trajectory_size,
delta):
num_vars = model_inputs.shape[1]
num_rows = model_inputs.shape[0]
num_trajectories = int(num_rows / trajectory_size)
ee = np.zeros((num_trajectories, num_vars), dtype=np.float)
ip_vec = model_inputs.reshape(num_trajectories, trajectory_size, num_vars)
ip_cha = np.subtract(ip_vec[:, 1:, :], ip_vec[:, 0:-1, :])
up = (ip_cha > 0)
lo = (ip_cha < 0)
op_vec = model_outputs.reshape(num_trajectories, trajectory_size)
result_up = get_increased_values(op_vec, up, lo)
result_lo = get_decreased_values(op_vec, up, lo)
ee = np.subtract(result_up, result_lo)
np.divide(ee, delta, out=ee)
return ee
|
Arguments
---------
model_inputs : matrix of inputs to the model under analysis.
x-by-r where x is the number of variables and
r is the number of rows (a function of x and num_trajectories)
model_outputs
an r-length vector of model outputs
trajectory_size
a scalar indicating the number of rows in a trajectory
delta : float
scaling factor computed from `num_levels`
|
### Input:
Arguments
---------
model_inputs : matrix of inputs to the model under analysis.
x-by-r where x is the number of variables and
r is the number of rows (a function of x and num_trajectories)
model_outputs
an r-length vector of model outputs
trajectory_size
a scalar indicating the number of rows in a trajectory
delta : float
scaling factor computed from `num_levels`
### Response:
#vtb
def compute_elementary_effects(model_inputs, model_outputs, trajectory_size,
delta):
num_vars = model_inputs.shape[1]
num_rows = model_inputs.shape[0]
num_trajectories = int(num_rows / trajectory_size)
ee = np.zeros((num_trajectories, num_vars), dtype=np.float)
ip_vec = model_inputs.reshape(num_trajectories, trajectory_size, num_vars)
ip_cha = np.subtract(ip_vec[:, 1:, :], ip_vec[:, 0:-1, :])
up = (ip_cha > 0)
lo = (ip_cha < 0)
op_vec = model_outputs.reshape(num_trajectories, trajectory_size)
result_up = get_increased_values(op_vec, up, lo)
result_lo = get_decreased_values(op_vec, up, lo)
ee = np.subtract(result_up, result_lo)
np.divide(ee, delta, out=ee)
return ee
|
#vtb
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = , incrementedEdgeVal = ):
for addedNode, attribs in addedGraph.nodes(data = True):
if incrementedNodeVal:
try:
targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal]
except KeyError:
targetGraph.add_node(addedNode, **attribs)
else:
if not targetGraph.has_node(addedNode):
targetGraph.add_node(addedNode, **attribs)
for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True):
if incrementedEdgeVal:
try:
targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal]
except KeyError:
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
else:
if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2):
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
|
A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
|
### Input:
A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method.
**mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten.
# Parameters
_targetGraph_ : `networkx Graph`
> the graph to be modified, it has precedence.
_addedGraph_ : `networkx Graph`
> the graph that is unmodified, it is added and does **not** have precedence.
_incrementedNodeVal_ : `optional [str]`
> default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
_incrementedEdgeVal_ : `optional [str]`
> default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
### Response:
#vtb
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = , incrementedEdgeVal = ):
for addedNode, attribs in addedGraph.nodes(data = True):
if incrementedNodeVal:
try:
targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal]
except KeyError:
targetGraph.add_node(addedNode, **attribs)
else:
if not targetGraph.has_node(addedNode):
targetGraph.add_node(addedNode, **attribs)
for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True):
if incrementedEdgeVal:
try:
targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal]
except KeyError:
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
else:
if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2):
targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
|
#vtb
def deliver_tx(self, raw_transaction):
self.abort_if_abci_chain_is_not_synced()
logger.debug(, raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug()
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug()
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk)
|
Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
|
### Input:
Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction.
### Response:
#vtb
def deliver_tx(self, raw_transaction):
self.abort_if_abci_chain_is_not_synced()
logger.debug(, raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
logger.debug()
return ResponseDeliverTx(code=CodeTypeError)
else:
logger.debug()
self.block_txn_ids.append(transaction.id)
self.block_transactions.append(transaction)
return ResponseDeliverTx(code=CodeTypeOk)
|
#vtb
def deepgetattr(obj, attr, default=AttributeError):
try:
return reduce(getattr, attr.split(), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
|
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
|
### Input:
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
### Response:
#vtb
def deepgetattr(obj, attr, default=AttributeError):
try:
return reduce(getattr, attr.split(), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
|
#vtb
def run_solr_text_on(solrInstance, category, q, qf, fields, optionals):
if optionals == None:
optionals = ""
query = solrInstance.value + "select?q=" + q + "&qf=" + qf + "&fq=document_category:\"" + category.value + "\"&fl=" + fields + "&wt=json&indent=on" + optionals
response = requests.get(query)
return response.json()[][]
|
Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id
|
### Input:
Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id
### Response:
#vtb
def run_solr_text_on(solrInstance, category, q, qf, fields, optionals):
if optionals == None:
optionals = ""
query = solrInstance.value + "select?q=" + q + "&qf=" + qf + "&fq=document_category:\"" + category.value + "\"&fl=" + fields + "&wt=json&indent=on" + optionals
response = requests.get(query)
return response.json()[][]
|
#vtb
def radec2azel(ra_deg: float, dec_deg: float,
lat_deg: float, lon_deg: float,
time: datetime, usevallado: bool = False) -> Tuple[float, float]:
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
lat = np.atleast_1d(lat_deg)
lon = np.atleast_1d(lon_deg)
ra = np.atleast_1d(ra_deg)
dec = np.atleast_1d(dec_deg)
obs = EarthLocation(lat=lat * u.deg,
lon=lon * u.deg)
points = SkyCoord(Angle(ra, unit=u.deg),
Angle(dec, unit=u.deg),
equinox=)
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree
|
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
|
### Input:
sky coordinates (ra, dec) to viewing angle (az, el)
Parameters
----------
ra_deg : float or numpy.ndarray of float
ecliptic right ascension (degress)
dec_deg : float or numpy.ndarray of float
ecliptic declination (degrees)
lat_deg : float
observer latitude [-90, 90]
lon_deg : float
observer longitude [-180, 180] (degrees)
time : datetime.datetime
time of observation
usevallado : bool, optional
default use astropy. If true, use Vallado algorithm
Returns
-------
az_deg : float or numpy.ndarray of float
azimuth [degrees clockwize from North]
el_deg : float or numpy.ndarray of float
elevation [degrees above horizon (neglecting aberration)]
### Response:
#vtb
def radec2azel(ra_deg: float, dec_deg: float,
lat_deg: float, lon_deg: float,
time: datetime, usevallado: bool = False) -> Tuple[float, float]:
if usevallado or Time is None:
return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time)
lat = np.atleast_1d(lat_deg)
lon = np.atleast_1d(lon_deg)
ra = np.atleast_1d(ra_deg)
dec = np.atleast_1d(dec_deg)
obs = EarthLocation(lat=lat * u.deg,
lon=lon * u.deg)
points = SkyCoord(Angle(ra, unit=u.deg),
Angle(dec, unit=u.deg),
equinox=)
altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time))))
return altaz.az.degree, altaz.alt.degree
|
#vtb
def _simulate_coef_from_bootstraps(
self, n_draws, coef_bootstraps, cov_bootstraps):
random_bootstrap_indices = np.random.choice(
np.arange(len(coef_bootstraps)), size=n_draws, replace=True)
bootstrap_index_to_draw_indices = defaultdict(list)
for draw_index, bootstrap_index in enumerate(random_bootstrap_indices):
bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index)
coef_draws = np.empty((n_draws, len(self.coef_)))
for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items():
coef_draws[draw_indices] = np.random.multivariate_normal(
coef_bootstraps[bootstrap], cov_bootstraps[bootstrap],
size=len(draw_indices))
return coef_draws
|
Simulate coefficients using bootstrap samples.
|
### Input:
Simulate coefficients using bootstrap samples.
### Response:
#vtb
def _simulate_coef_from_bootstraps(
self, n_draws, coef_bootstraps, cov_bootstraps):
random_bootstrap_indices = np.random.choice(
np.arange(len(coef_bootstraps)), size=n_draws, replace=True)
bootstrap_index_to_draw_indices = defaultdict(list)
for draw_index, bootstrap_index in enumerate(random_bootstrap_indices):
bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index)
coef_draws = np.empty((n_draws, len(self.coef_)))
for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items():
coef_draws[draw_indices] = np.random.multivariate_normal(
coef_bootstraps[bootstrap], cov_bootstraps[bootstrap],
size=len(draw_indices))
return coef_draws
|
#vtb
def run_shell_command(commands, **kwargs):
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error
|
Run a shell command.
|
### Input:
Run a shell command.
### Response:
#vtb
def run_shell_command(commands, **kwargs):
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error
|
#vtb
def db_create(name,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
tablespace=None,
encoding=None,
lc_collate=None,
lc_ctype=None,
owner=None,
template=None,
runas=None):
*dbname*dbname
query = .format(name)
with_args = salt.utils.odict.OrderedDict([
(, _quote_ddl_value(tablespace, )),
|
Adds a databases to the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_create 'dbname'
salt '*' postgres.db_create 'dbname' template=template_postgis
|
### Input:
Adds a databases to the Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_create 'dbname'
salt '*' postgres.db_create 'dbname' template=template_postgis
### Response:
#vtb
def db_create(name,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
tablespace=None,
encoding=None,
lc_collate=None,
lc_ctype=None,
owner=None,
template=None,
runas=None):
*dbname*dbname
query = .format(name)
with_args = salt.utils.odict.OrderedDict([
(, _quote_ddl_value(tablespace, )),
|
#vtb
def addcomment(self, comment, private=False):
vals = self.bugzilla.build_update(comment=comment,
comment_private=private)
log.debug("addcomment: update=%s", vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
|
Add the given comment to this bug. Set private to True to mark this
comment as private.
|
### Input:
Add the given comment to this bug. Set private to True to mark this
comment as private.
### Response:
#vtb
def addcomment(self, comment, private=False):
vals = self.bugzilla.build_update(comment=comment,
comment_private=private)
log.debug("addcomment: update=%s", vals)
return self.bugzilla.update_bugs(self.bug_id, vals)
|
#vtb
def _safe_dump(data):
custom_dumper = __utils__[]()
def boto_listelement_presenter(dumper, data):
return dumper.represent_list(list(data))
yaml.add_representer(boto.ec2.cloudwatch.listelement.ListElement,
boto_listelement_presenter,
Dumper=custom_dumper)
def dimension_presenter(dumper, data):
return dumper.represent_dict(dict(data))
yaml.add_representer(boto.ec2.cloudwatch.dimension.Dimension,
dimension_presenter, Dumper=custom_dumper)
return __utils__[](data, Dumper=custom_dumper)
|
this presenter magic makes yaml.safe_dump
work with the objects returned from
boto.describe_alarms()
|
### Input:
this presenter magic makes yaml.safe_dump
work with the objects returned from
boto.describe_alarms()
### Response:
#vtb
def _safe_dump(data):
custom_dumper = __utils__[]()
def boto_listelement_presenter(dumper, data):
return dumper.represent_list(list(data))
yaml.add_representer(boto.ec2.cloudwatch.listelement.ListElement,
boto_listelement_presenter,
Dumper=custom_dumper)
def dimension_presenter(dumper, data):
return dumper.represent_dict(dict(data))
yaml.add_representer(boto.ec2.cloudwatch.dimension.Dimension,
dimension_presenter, Dumper=custom_dumper)
return __utils__[](data, Dumper=custom_dumper)
|
#vtb
def remove_tmp_prefix_from_filename(filename):
if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):
raise RuntimeError(ERROR_MESSAGES[] % {: filename})
return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]
|
Remove tmp prefix from filename.
|
### Input:
Remove tmp prefix from filename.
### Response:
#vtb
def remove_tmp_prefix_from_filename(filename):
if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):
raise RuntimeError(ERROR_MESSAGES[] % {: filename})
return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]
|
#vtb
def from_ZNM(cls, Z, N, M, name=):
df = pd.DataFrame.from_dict({: Z, : N, : M}).set_index([, ])[]
df.name = name
return cls(df=df, name=name)
|
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
|
### Input:
Creates a table from arrays Z, N and M
Example:
________
>>> Z = [82, 82, 83]
>>> N = [126, 127, 130]
>>> M = [-21.34, -18.0, -14.45]
>>> Table.from_ZNM(Z, N, M, name='Custom Table')
Z N
82 126 -21.34
127 -18.00
83 130 -14.45
Name: Custom Table, dtype: float64
### Response:
#vtb
def from_ZNM(cls, Z, N, M, name=):
df = pd.DataFrame.from_dict({: Z, : N, : M}).set_index([, ])[]
df.name = name
return cls(df=df, name=name)
|
#vtb
def p_plus_assignment(self, t):
self.accu.add(Term(, [self.name,"gen(\""+t[1]+"\")","1"]))
|
plus_assignment : IDENT EQ PLUS
|
### Input:
plus_assignment : IDENT EQ PLUS
### Response:
#vtb
def p_plus_assignment(self, t):
self.accu.add(Term(, [self.name,"gen(\""+t[1]+"\")","1"]))
|
#vtb
def url(self, pattern, method=None, type_cast=None):
if not type_cast:
type_cast = {}
def decorator(function):
self.add(pattern, function, method, type_cast)
return function
return decorator
|
Decorator for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
|
### Input:
Decorator for registering a path pattern.
Args:
pattern (str): Regex pattern to match a certain path
method (str, optional): Usually used to define one of GET, POST,
PUT, DELETE. You may use whatever fits your situation though.
Defaults to None.
type_cast (dict, optional): Mapping between the param name and
one of `int`, `float` or `bool`. The value reflected by the
provided param name will than be casted to the given type.
Defaults to None.
### Response:
#vtb
def url(self, pattern, method=None, type_cast=None):
if not type_cast:
type_cast = {}
def decorator(function):
self.add(pattern, function, method, type_cast)
return function
return decorator
|
#vtb
def load_table_from_config(input_dir, config):
path = pathlib.Path(input_dir).joinpath(config[])
kwargs = config[]
return pd.read_csv(path, **kwargs)
|
Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame
|
### Input:
Load table from table config dict
Args:
input_dir (path-like): directory containing input files
config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'.
Returns:
pd.DataFrame
### Response:
#vtb
def load_table_from_config(input_dir, config):
path = pathlib.Path(input_dir).joinpath(config[])
kwargs = config[]
return pd.read_csv(path, **kwargs)
|
#vtb
def _get_k(self):
if not self.ready:
self.k.create()
self.ready = True
return self.k
|
Accessing self.k indirectly allows for creating the kvstore table
if necessary.
|
### Input:
Accessing self.k indirectly allows for creating the kvstore table
if necessary.
### Response:
#vtb
def _get_k(self):
if not self.ready:
self.k.create()
self.ready = True
return self.k
|
#vtb
def has_project_permissions(user: , project: , request_method: str) -> bool:
if user.is_staff or user.is_superuser or project.user == user:
return True
return request_method in permissions.SAFE_METHODS and project.is_public
|
This logic is extracted here to be used also with Sanic api.
|
### Input:
This logic is extracted here to be used also with Sanic api.
### Response:
#vtb
def has_project_permissions(user: , project: , request_method: str) -> bool:
if user.is_staff or user.is_superuser or project.user == user:
return True
return request_method in permissions.SAFE_METHODS and project.is_public
|
#vtb
def divide(self, phi1, inplace=True):
phi = self if inplace else self.copy()
phi1 = phi1.copy()
if set(phi1.variables) - set(phi.variables):
raise ValueError("Scope of divisor should be a subset of dividend")
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[tuple(slice_)]
phi1.variables.extend(extra_vars)
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values / phi1.values
phi.values[np.isnan(phi.values)] = 0
if not inplace:
return phi
|
DiscreteFactor division by `phi1`.
Parameters
----------
phi1 : `DiscreteFactor` instance
The denominator for division.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)])
>>> phi1.divide(phi2)
>>> phi1.variables
['x1', 'x2', 'x3']
>>> phi1.cardinality
array([2, 3, 2])
>>> phi1.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]])
|
### Input:
DiscreteFactor division by `phi1`.
Parameters
----------
phi1 : `DiscreteFactor` instance
The denominator for division.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)])
>>> phi1.divide(phi2)
>>> phi1.variables
['x1', 'x2', 'x3']
>>> phi1.cardinality
array([2, 3, 2])
>>> phi1.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]])
### Response:
#vtb
def divide(self, phi1, inplace=True):
phi = self if inplace else self.copy()
phi1 = phi1.copy()
if set(phi1.variables) - set(phi.variables):
raise ValueError("Scope of divisor should be a subset of dividend")
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[tuple(slice_)]
phi1.variables.extend(extra_vars)
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values / phi1.values
phi.values[np.isnan(phi.values)] = 0
if not inplace:
return phi
|
#vtb
def get_archive(self, archive_name, default_version=None):
auth, archive_name = self._normalize_archive_name(archive_name)
res = self.manager.get_archive(archive_name)
if default_version is None:
default_version = self._default_versions.get(archive_name, None)
if (auth is not None) and (auth != res[]):
raise ValueError(
.format(archive_name, auth) +
.format(
res[], archive_name))
return self._ArchiveConstructor(
api=self,
default_version=default_version,
**res)
|
Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found
|
### Input:
Retrieve a data archive
Parameters
----------
archive_name: str
Name of the archive to retrieve
default_version: version
str or :py:class:`~distutils.StrictVersion` giving the default
version number to be used on read operations
Returns
-------
archive: object
New :py:class:`~datafs.core.data_archive.DataArchive` object
Raises
------
KeyError:
A KeyError is raised when the ``archive_name`` is not found
### Response:
#vtb
def get_archive(self, archive_name, default_version=None):
auth, archive_name = self._normalize_archive_name(archive_name)
res = self.manager.get_archive(archive_name)
if default_version is None:
default_version = self._default_versions.get(archive_name, None)
if (auth is not None) and (auth != res[]):
raise ValueError(
.format(archive_name, auth) +
.format(
res[], archive_name))
return self._ArchiveConstructor(
api=self,
default_version=default_version,
**res)
|
#vtb
def walk(self):
if self.verbose > 1:
print_( + self._id + )
phi = self.phi
theta = self.walk_theta
u = random(len(phi))
z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1)
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_( + + str(x))
x = x + phi * (x - xp) * z
if self.verbose > 1:
print_( + + str(x))
self.stochastic.value = x
self.hastings_factor = 0.0
|
Walk proposal kernel
|
### Input:
Walk proposal kernel
### Response:
#vtb
def walk(self):
if self.verbose > 1:
print_( + self._id + )
phi = self.phi
theta = self.walk_theta
u = random(len(phi))
z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1)
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_( + + str(x))
x = x + phi * (x - xp) * z
if self.verbose > 1:
print_( + + str(x))
self.stochastic.value = x
self.hastings_factor = 0.0
|
#vtb
def from_string(cls, string, relpath=None, encoding=None, is_sass=None):
if isinstance(string, six.text_type):
if encoding is None:
encoding = determine_encoding(string)
byte_contents = string.encode(encoding)
text_contents = string
elif isinstance(string, six.binary_type):
encoding = determine_encoding(string)
byte_contents = string
text_contents = string.decode(encoding)
else:
raise TypeError("Expected text or bytes, got {0!r}".format(string))
origin = None
if relpath is None:
m = hashlib.sha256()
m.update(byte_contents)
relpath = repr("string:{0}:{1}".format(
m.hexdigest()[:16], text_contents[:100]))
return cls(
origin, relpath, text_contents, encoding=encoding,
is_sass=is_sass,
)
|
Read Sass source from the contents of a string.
The origin is always None. `relpath` defaults to "string:...".
|
### Input:
Read Sass source from the contents of a string.
The origin is always None. `relpath` defaults to "string:...".
### Response:
#vtb
def from_string(cls, string, relpath=None, encoding=None, is_sass=None):
if isinstance(string, six.text_type):
if encoding is None:
encoding = determine_encoding(string)
byte_contents = string.encode(encoding)
text_contents = string
elif isinstance(string, six.binary_type):
encoding = determine_encoding(string)
byte_contents = string
text_contents = string.decode(encoding)
else:
raise TypeError("Expected text or bytes, got {0!r}".format(string))
origin = None
if relpath is None:
m = hashlib.sha256()
m.update(byte_contents)
relpath = repr("string:{0}:{1}".format(
m.hexdigest()[:16], text_contents[:100]))
return cls(
origin, relpath, text_contents, encoding=encoding,
is_sass=is_sass,
)
|
#vtb
def abbreviations(text):
return PreProcessorRegex(
search_args=symbols.ABBREVIATIONS,
search_func=lambda x: r"(?<={})(?=\.).".format(x),
repl=, flags=re.IGNORECASE).run(text)
|
Remove periods after an abbreviation from a list of known
abbrevations that can be spoken the same without that period. This
prevents having to handle tokenization of that period.
Note:
Could potentially remove the ending period of a sentence.
Note:
Abbreviations that Google Translate can't pronounce without
(or even with) a period should be added as a word substitution with a
:class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
|
### Input:
Remove periods after an abbreviation from a list of known
abbrevations that can be spoken the same without that period. This
prevents having to handle tokenization of that period.
Note:
Could potentially remove the ending period of a sentence.
Note:
Abbreviations that Google Translate can't pronounce without
(or even with) a period should be added as a word substitution with a
:class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
### Response:
#vtb
def abbreviations(text):
return PreProcessorRegex(
search_args=symbols.ABBREVIATIONS,
search_func=lambda x: r"(?<={})(?=\.).".format(x),
repl=, flags=re.IGNORECASE).run(text)
|
#vtb
def isDiurnal(self):
sun = self.getObject(const.SUN)
mc = self.getAngle(const.MC)
lat = self.pos.lat
sunRA, sunDecl = utils.eqCoords(sun.lon, sun.lat)
mcRA, mcDecl = utils.eqCoords(mc.lon, 0)
return utils.isAboveHorizon(sunRA, sunDecl, mcRA, lat)
|
Returns true if this chart is diurnal.
|
### Input:
Returns true if this chart is diurnal.
### Response:
#vtb
def isDiurnal(self):
sun = self.getObject(const.SUN)
mc = self.getAngle(const.MC)
lat = self.pos.lat
sunRA, sunDecl = utils.eqCoords(sun.lon, sun.lat)
mcRA, mcDecl = utils.eqCoords(mc.lon, 0)
return utils.isAboveHorizon(sunRA, sunDecl, mcRA, lat)
|
#vtb
def service_highstate(requires=True):
ret = {}
running = running_service_owners()
for service in running:
ret[service] = {: []}
if requires:
ret[service][].append(
{: {: running[service]}}
)
enabled = enabled_service_owners()
for service in enabled:
if service in ret:
ret[service][].append({: True})
else:
ret[service] = {: [{: True}]}
if requires:
exists = False
for item in ret[service][]:
if isinstance(item, dict) and next(six.iterkeys(item)) == :
exists = True
if not exists:
ret[service][].append(
{: {: enabled[service]}}
)
return ret
|
Return running and enabled services in a highstate structure. By default
also returns package dependencies for those services, which means that
package definitions must be created outside this function. To drop the
package dependencies, set ``requires`` to False.
CLI Example:
salt myminion introspect.service_highstate
salt myminion introspect.service_highstate requires=False
|
### Input:
Return running and enabled services in a highstate structure. By default
also returns package dependencies for those services, which means that
package definitions must be created outside this function. To drop the
package dependencies, set ``requires`` to False.
CLI Example:
salt myminion introspect.service_highstate
salt myminion introspect.service_highstate requires=False
### Response:
#vtb
def service_highstate(requires=True):
ret = {}
running = running_service_owners()
for service in running:
ret[service] = {: []}
if requires:
ret[service][].append(
{: {: running[service]}}
)
enabled = enabled_service_owners()
for service in enabled:
if service in ret:
ret[service][].append({: True})
else:
ret[service] = {: [{: True}]}
if requires:
exists = False
for item in ret[service][]:
if isinstance(item, dict) and next(six.iterkeys(item)) == :
exists = True
if not exists:
ret[service][].append(
{: {: enabled[service]}}
)
return ret
|
#vtb
def get_filters(cls, raw_filters, num_cols, columns_dict):
filters = None
for flt in raw_filters:
col = flt.get()
op = flt.get()
eq = flt.get()
if (
not col or
not op or
(eq is None and op not in (, ))):
continue
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in (, )
eq = cls.filter_values_handler(
eq, is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col)
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type=,
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type=, fields=fields)
if op == :
cond = ~cond
elif op == :
cond = Filter(
extraction_function=extraction_fn,
type=,
pattern=eq,
dimension=col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Dimension(col) == None
elif op == :
cond = Dimension(col) != None
if filters:
filters = Filter(type=, fields=[
cond,
filters,
])
else:
filters = cond
return filters
|
Given Superset filter data structure, returns pydruid Filter(s)
|
### Input:
Given Superset filter data structure, returns pydruid Filter(s)
### Response:
#vtb
def get_filters(cls, raw_filters, num_cols, columns_dict):
filters = None
for flt in raw_filters:
col = flt.get()
op = flt.get()
eq = flt.get()
if (
not col or
not op or
(eq is None and op not in (, ))):
continue
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in (, )
eq = cls.filter_values_handler(
eq, is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col)
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type=,
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type=, fields=fields)
if op == :
cond = ~cond
elif op == :
cond = Filter(
extraction_function=extraction_fn,
type=,
pattern=eq,
dimension=col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Filter(
type=,
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == :
cond = Dimension(col) == None
elif op == :
cond = Dimension(col) != None
if filters:
filters = Filter(type=, fields=[
cond,
filters,
])
else:
filters = cond
return filters
|
#vtb
def get_model_from_path_string(root_model, path):
for path_section in path.split():
if path_section:
try:
field, model, direct, m2m = _get_field_by_name(root_model, path_section)
except FieldDoesNotExist:
return root_model
if direct:
if _get_remote_field(field):
try:
root_model = _get_remote_field(field).parent_model()
except AttributeError:
root_model = _get_remote_field(field).model
else:
if hasattr(field, ):
root_model = field.related_model
else:
root_model = field.model
return root_model
|
Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
|
### Input:
Return a model class for a related model
root_model is the class of the initial model
path is like foo__bar where bar is related to foo
### Response:
#vtb
def get_model_from_path_string(root_model, path):
for path_section in path.split():
if path_section:
try:
field, model, direct, m2m = _get_field_by_name(root_model, path_section)
except FieldDoesNotExist:
return root_model
if direct:
if _get_remote_field(field):
try:
root_model = _get_remote_field(field).parent_model()
except AttributeError:
root_model = _get_remote_field(field).model
else:
if hasattr(field, ):
root_model = field.related_model
else:
root_model = field.model
return root_model
|
#vtb
def calc_steady_state_dist(R):
w, v = np.linalg.eig(R)
for i in range(4):
if np.abs(w[i] - 1) < 1e-8:
return np.real(v[:, i] / np.sum(v[:, i]))
return -1
|
Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
|
### Input:
Calculate the steady state dist of a 4 state markov transition matrix.
Parameters
----------
R : ndarray
Markov transition matrix
Returns
-------
p_ss : ndarray
Steady state probability distribution
### Response:
#vtb
def calc_steady_state_dist(R):
w, v = np.linalg.eig(R)
for i in range(4):
if np.abs(w[i] - 1) < 1e-8:
return np.real(v[:, i] / np.sum(v[:, i]))
return -1
|
#vtb
def logger(name=None, save=False):
logger = logging.getLogger(name)
if save:
logformat =
log_file_path =
open(log_file_path, ).write()
logger.setLevel(logging.DEBUG)
logger_handler = logging.FileHandler(log_file_path)
logger_handler.setFormatter(logging.Formatter(logformat))
else:
logger_handler = NullHandler()
logger.addHandler(logger_handler)
return logger
|
Init and configure logger.
|
### Input:
Init and configure logger.
### Response:
#vtb
def logger(name=None, save=False):
logger = logging.getLogger(name)
if save:
logformat =
log_file_path =
open(log_file_path, ).write()
logger.setLevel(logging.DEBUG)
logger_handler = logging.FileHandler(log_file_path)
logger_handler.setFormatter(logging.Formatter(logformat))
else:
logger_handler = NullHandler()
logger.addHandler(logger_handler)
return logger
|
#vtb
def find_usage(self):
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_nodes()
self._find_usage_subnet_groups()
self._find_usage_parameter_groups()
self._find_usage_security_groups()
self._have_usage = True
logger.debug("Done checking usage.")
|
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
|
### Input:
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
### Response:
#vtb
def find_usage(self):
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_nodes()
self._find_usage_subnet_groups()
self._find_usage_parameter_groups()
self._find_usage_security_groups()
self._have_usage = True
logger.debug("Done checking usage.")
|
#vtb
def parse_raxml(handle):
s = .join(handle.readlines())
result = {}
try_set_fields(result, r, s)
try_set_fields(result, r, s)
result[] = (
result[] != or
re.search(, s, re.IGNORECASE) is not None)
try_set_fields(result, r, s)
rates = {}
if result[] != :
try_set_fields(rates,
(r"rates\[0\] ac ag at cg ct gt: "
r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) "
r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"),
s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
if len(rates) > 0:
result[] = rates
result[] = {: 4}
try_set_fields(result[],
r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float)
result[] =
return result
|
Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned.
|
### Input:
Parse RAxML's summary output.
*handle* should be an open file handle containing the RAxML
output. It is parsed and a dictionary returned.
### Response:
#vtb
def parse_raxml(handle):
s = .join(handle.readlines())
result = {}
try_set_fields(result, r, s)
try_set_fields(result, r, s)
result[] = (
result[] != or
re.search(, s, re.IGNORECASE) is not None)
try_set_fields(result, r, s)
rates = {}
if result[] != :
try_set_fields(rates,
(r"rates\[0\] ac ag at cg ct gt: "
r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) "
r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"),
s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
try_set_fields(rates, r, s, hook=float)
if len(rates) > 0:
result[] = rates
result[] = {: 4}
try_set_fields(result[],
r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float)
result[] =
return result
|
#vtb
def current_changed(self, i):
m = self.model()
ri = self.rootModelIndex()
index = m.index(i, 0, ri)
self.new_root.emit(index)
|
Slot for when the current index changes.
Emits the :data:`AbstractLevel.new_root` signal.
:param index: the new current index
:type index: int
:returns: None
:rtype: None
:raises: None
|
### Input:
Slot for when the current index changes.
Emits the :data:`AbstractLevel.new_root` signal.
:param index: the new current index
:type index: int
:returns: None
:rtype: None
:raises: None
### Response:
#vtb
def current_changed(self, i):
m = self.model()
ri = self.rootModelIndex()
index = m.index(i, 0, ri)
self.new_root.emit(index)
|
#vtb
def line(self, serie, rescale=False):
serie_node = self.svg.serie(serie)
if rescale and self.secondary_series:
points = self._rescale(serie.points)
else:
points = serie.points
view_values = list(map(self.view, points))
if serie.show_dots:
for i, (x, y) in enumerate(view_values):
if None in (x, y):
continue
if self.logarithmic:
if points[i][1] is None or points[i][1] <= 0:
continue
if (serie.show_only_major_dots and self.x_labels
and i < len(self.x_labels)
and self.x_labels[i] not in self._x_labels_major):
continue
metadata = serie.metadata.get(i)
classes = []
if x > self.view.width / 2:
classes.append()
if y > self.view.height / 2:
classes.append()
classes = .join(classes)
self._confidence_interval(
serie_node[], x, y, serie.values[i], metadata
)
dots = decorate(
self.svg,
self.svg.node(serie_node[], class_="dots"),
metadata
)
val = self._format(serie, i)
alter(
self.svg.transposable_node(
dots,
,
cx=x,
cy=y,
r=serie.dots_size,
class_=
), metadata
)
self._tooltip_data(
dots, val, x, y, xlabel=self._get_x_label(i)
)
self._static_value(
serie_node, val, x + self.style.value_font_size,
y + self.style.value_font_size, metadata
)
if serie.stroke:
if self.interpolate:
points = serie.interpolated
if rescale and self.secondary_series:
points = self._rescale(points)
view_values = list(map(self.view, points))
if serie.fill:
view_values = self._fill(view_values)
if serie.allow_interruptions:
sequences = []
cur_sequence = []
for x, y in view_values:
if y is None and len(cur_sequence) > 0:
sequences.append(cur_sequence)
cur_sequence = []
elif y is None:
continue
else:
cur_sequence.append((x, y))
if len(cur_sequence) > 0:
sequences.append(cur_sequence)
else:
sequences = [view_values]
if self.logarithmic:
for seq in sequences:
for ele in seq[::-1]:
y = points[seq.index(ele)][1]
if y is None or y <= 0:
del seq[seq.index(ele)]
for seq in sequences:
self.svg.line(
serie_node[],
seq,
close=self._self_close,
class_= +
( if not serie.fill else )
)
|
Draw the line serie
|
### Input:
Draw the line serie
### Response:
#vtb
def line(self, serie, rescale=False):
serie_node = self.svg.serie(serie)
if rescale and self.secondary_series:
points = self._rescale(serie.points)
else:
points = serie.points
view_values = list(map(self.view, points))
if serie.show_dots:
for i, (x, y) in enumerate(view_values):
if None in (x, y):
continue
if self.logarithmic:
if points[i][1] is None or points[i][1] <= 0:
continue
if (serie.show_only_major_dots and self.x_labels
and i < len(self.x_labels)
and self.x_labels[i] not in self._x_labels_major):
continue
metadata = serie.metadata.get(i)
classes = []
if x > self.view.width / 2:
classes.append()
if y > self.view.height / 2:
classes.append()
classes = .join(classes)
self._confidence_interval(
serie_node[], x, y, serie.values[i], metadata
)
dots = decorate(
self.svg,
self.svg.node(serie_node[], class_="dots"),
metadata
)
val = self._format(serie, i)
alter(
self.svg.transposable_node(
dots,
,
cx=x,
cy=y,
r=serie.dots_size,
class_=
), metadata
)
self._tooltip_data(
dots, val, x, y, xlabel=self._get_x_label(i)
)
self._static_value(
serie_node, val, x + self.style.value_font_size,
y + self.style.value_font_size, metadata
)
if serie.stroke:
if self.interpolate:
points = serie.interpolated
if rescale and self.secondary_series:
points = self._rescale(points)
view_values = list(map(self.view, points))
if serie.fill:
view_values = self._fill(view_values)
if serie.allow_interruptions:
sequences = []
cur_sequence = []
for x, y in view_values:
if y is None and len(cur_sequence) > 0:
sequences.append(cur_sequence)
cur_sequence = []
elif y is None:
continue
else:
cur_sequence.append((x, y))
if len(cur_sequence) > 0:
sequences.append(cur_sequence)
else:
sequences = [view_values]
if self.logarithmic:
for seq in sequences:
for ele in seq[::-1]:
y = points[seq.index(ele)][1]
if y is None or y <= 0:
del seq[seq.index(ele)]
for seq in sequences:
self.svg.line(
serie_node[],
seq,
close=self._self_close,
class_= +
( if not serie.fill else )
)
|
#vtb
def as_dict(self, cache=None, fetch=True):
if not self._fetched and fetch:
info = self.fetch(cache)
elif self._use_cache(cache):
info = self._attrs.copy()
else:
info = {}
info.update(url=self.url)
return info
|
Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached.
|
### Input:
Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached.
### Response:
#vtb
def as_dict(self, cache=None, fetch=True):
if not self._fetched and fetch:
info = self.fetch(cache)
elif self._use_cache(cache):
info = self._attrs.copy()
else:
info = {}
info.update(url=self.url)
return info
|
#vtb
def create_ui(self):
builder = gtk.Builder()
glade_str = pkgutil.get_data(__name__,
)
builder.add_from_string(glade_str)
self.window = builder.get_object()
self.vbox_form = builder.get_object()
if self.title:
self.window.set_title(self.title)
if self.short_desc:
self.short_label = gtk.Label()
self.short_label.set_text(self.short_desc)
self.short_label.set_alignment(0, .5)
self.vbox_form.pack_start(self.short_label, expand=True, fill=True)
if self.long_desc:
self.long_label = gtk.Label()
self.long_label.set_text(self.long_desc)
self.long_label.set_alignment(.1, .5)
self.long_expander = gtk.Expander(label=)
self.long_expander.set_spacing(5)
self.long_expander.add(self.long_label)
self.vbox_form.pack_start(self.long_expander, expand=True,
fill=True)
if self.parent is None:
self.parent = self.default_parent
self.window.set_default_response(gtk.RESPONSE_OK)
self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
if self.parent:
self.window.set_transient_for(self.parent)
self.window.show_all()
|
.. versionchanged:: 0.21.2
Load the builder configuration file using :func:`pkgutil.getdata`,
which supports loading from `.zip` archives (e.g., in an app
packaged with Py2Exe).
|
### Input:
.. versionchanged:: 0.21.2
Load the builder configuration file using :func:`pkgutil.getdata`,
which supports loading from `.zip` archives (e.g., in an app
packaged with Py2Exe).
### Response:
#vtb
def create_ui(self):
builder = gtk.Builder()
glade_str = pkgutil.get_data(__name__,
)
builder.add_from_string(glade_str)
self.window = builder.get_object()
self.vbox_form = builder.get_object()
if self.title:
self.window.set_title(self.title)
if self.short_desc:
self.short_label = gtk.Label()
self.short_label.set_text(self.short_desc)
self.short_label.set_alignment(0, .5)
self.vbox_form.pack_start(self.short_label, expand=True, fill=True)
if self.long_desc:
self.long_label = gtk.Label()
self.long_label.set_text(self.long_desc)
self.long_label.set_alignment(.1, .5)
self.long_expander = gtk.Expander(label=)
self.long_expander.set_spacing(5)
self.long_expander.add(self.long_label)
self.vbox_form.pack_start(self.long_expander, expand=True,
fill=True)
if self.parent is None:
self.parent = self.default_parent
self.window.set_default_response(gtk.RESPONSE_OK)
self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
if self.parent:
self.window.set_transient_for(self.parent)
self.window.show_all()
|
#vtb
def which(cmd):
def is_exe(fp):
return os.path.isfile(fp) and os.access(fp, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None
|
Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python")
|
### Input:
Returns full path to a executable.
Args:
cmd (str): Executable command to search for.
Returns:
(str) Full path to command. None if it is not found.
Example::
full_path_to_python = which("python")
### Response:
#vtb
def which(cmd):
def is_exe(fp):
return os.path.isfile(fp) and os.access(fp, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None
|
#vtb
def find_n50(contig_lengths_dict, genome_length_dict):
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
if currentlength >= genome_length_dict[file_name] * 0.5:
n50_dict[file_name] = contig_length
break
return n50_dict
|
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
|
### Input:
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig
:param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths
:param genome_length_dict: dictionary of strain name: total genome length
:return: n50_dict: dictionary of strain name: N50
### Response:
#vtb
def find_n50(contig_lengths_dict, genome_length_dict):
n50_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
currentlength = 0
for contig_length in contig_lengths:
currentlength += contig_length
if currentlength >= genome_length_dict[file_name] * 0.5:
n50_dict[file_name] = contig_length
break
return n50_dict
|
#vtb
def transform(self, maps):
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
return self.format_output(maps, out)
|
This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
|
### Input:
This function transforms from chirp mass and symmetric mass ratio to
component masses.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
>>> import numpy
>>> from pycbc import transforms
>>> t = transforms.MchirpEtaToMass1Mass2()
>>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])})
{'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]),
'mchirp': array([ 10.]), 'eta': array([ 0.25])}
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
### Response:
#vtb
def transform(self, maps):
out = {}
out[parameters.mass1] = conversions.mass1_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
out[parameters.mass2] = conversions.mass2_from_mchirp_eta(
maps[parameters.mchirp],
maps[parameters.eta])
return self.format_output(maps, out)
|
#vtb
def splitterfields(data, commdct):
objkey = "Connector:Splitter".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists)
|
get splitter fields to diagram it
|
### Input:
get splitter fields to diagram it
### Response:
#vtb
def splitterfields(data, commdct):
objkey = "Connector:Splitter".upper()
fieldlists = splittermixerfieldlists(data, commdct, objkey)
return extractfields(data, commdct, objkey, fieldlists)
|
#vtb
def report(*arrays, **kwargs):
name = kwargs.pop("name",None)
grouped = len(arrays) > 1
if grouped:
arr = N.concatenate(arrays)
components = [PCAOrientation(a)
for a in arrays]
else:
arr = arrays[0]
components = []
pca = PCAOrientation(arr)
distances = list(distance_from_group(components,pca))
kwargs = dict(
levels=[1,2,3],
alpha=[0.8,0.5,0.2],
linewidth=2)
kwargs = dict(n=500,levels=[1,2], ellipse=True)
stereonet_data = dict(
main=pca.error_coords(**kwargs),
components=[i.error_coords(**kwargs)
for i in components])
t = env.get_template("report.html")
return t.render(
name=name,
pca=pca,
stereonet_data=stereonet_data,
angular_errors=tuple(N.degrees(i)
for i in pca.angular_errors()[::-1]),
aligned=plot_aligned(pca),
distances=distances)
|
Outputs a standalone HTML 'report card' for a
measurement (or several grouped measurements),
including relevant statistical information.
|
### Input:
Outputs a standalone HTML 'report card' for a
measurement (or several grouped measurements),
including relevant statistical information.
### Response:
#vtb
def report(*arrays, **kwargs):
name = kwargs.pop("name",None)
grouped = len(arrays) > 1
if grouped:
arr = N.concatenate(arrays)
components = [PCAOrientation(a)
for a in arrays]
else:
arr = arrays[0]
components = []
pca = PCAOrientation(arr)
distances = list(distance_from_group(components,pca))
kwargs = dict(
levels=[1,2,3],
alpha=[0.8,0.5,0.2],
linewidth=2)
kwargs = dict(n=500,levels=[1,2], ellipse=True)
stereonet_data = dict(
main=pca.error_coords(**kwargs),
components=[i.error_coords(**kwargs)
for i in components])
t = env.get_template("report.html")
return t.render(
name=name,
pca=pca,
stereonet_data=stereonet_data,
angular_errors=tuple(N.degrees(i)
for i in pca.angular_errors()[::-1]),
aligned=plot_aligned(pca),
distances=distances)
|
#vtb
def get_timer(self, name=None):
return self.get_client(name=name, class_=statsd.Timer)
|
Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
|
### Input:
Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
### Response:
#vtb
def get_timer(self, name=None):
return self.get_client(name=name, class_=statsd.Timer)
|
#vtb
def write_to_conll_eval_file(prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str]):
verb_only_sentence = ["-"] * len(sentence)
if verb_index:
verb_only_sentence[verb_index] = sentence[verb_index]
conll_format_predictions = convert_bio_tags_to_conll_format(prediction)
conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels)
for word, predicted, gold in zip(verb_only_sentence,
conll_format_predictions,
conll_format_gold_labels):
prediction_file.write(word.ljust(15))
prediction_file.write(predicted.rjust(15) + "\n")
gold_file.write(word.ljust(15))
gold_file.write(gold.rjust(15) + "\n")
prediction_file.write("\n")
gold_file.write("\n")
|
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
Parameters
----------
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
prediction : List[str], required.
The predicted BIO labels.
gold_labels : List[str], required.
The gold BIO labels.
|
### Input:
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
Parameters
----------
prediction_file : TextIO, required.
A file reference to print predictions to.
gold_file : TextIO, required.
A file reference to print gold labels to.
verb_index : Optional[int], required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : List[str], required.
The word tokens.
prediction : List[str], required.
The predicted BIO labels.
gold_labels : List[str], required.
The gold BIO labels.
### Response:
#vtb
def write_to_conll_eval_file(prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str]):
verb_only_sentence = ["-"] * len(sentence)
if verb_index:
verb_only_sentence[verb_index] = sentence[verb_index]
conll_format_predictions = convert_bio_tags_to_conll_format(prediction)
conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels)
for word, predicted, gold in zip(verb_only_sentence,
conll_format_predictions,
conll_format_gold_labels):
prediction_file.write(word.ljust(15))
prediction_file.write(predicted.rjust(15) + "\n")
gold_file.write(word.ljust(15))
gold_file.write(gold.rjust(15) + "\n")
prediction_file.write("\n")
gold_file.write("\n")
|
#vtb
def _extract_properties(config):
general, options, sets = {}, {}, {}
for line in config.splitlines():
if not line or not line[-1:] == and not in line:
continue
line = line[:-1].lstrip()
if line[:6] == :
key, value = _extract_prop_option(line)
options[key] = value
elif line[:3] == :
key, value = _extract_prop_set(line)
sets[key] = value
else:
key, value = _extract_prop_general(line)
general[key] = value
return general, options, sets
|
Parse a line within a lease block
The line should basically match the expression:
>>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);"
For easier seperation of the cases and faster parsing this is done using substrings etc..
:param config:
:return: tuple of properties dict, options dict and sets dict
|
### Input:
Parse a line within a lease block
The line should basically match the expression:
>>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);"
For easier seperation of the cases and faster parsing this is done using substrings etc..
:param config:
:return: tuple of properties dict, options dict and sets dict
### Response:
#vtb
def _extract_properties(config):
general, options, sets = {}, {}, {}
for line in config.splitlines():
if not line or not line[-1:] == and not in line:
continue
line = line[:-1].lstrip()
if line[:6] == :
key, value = _extract_prop_option(line)
options[key] = value
elif line[:3] == :
key, value = _extract_prop_set(line)
sets[key] = value
else:
key, value = _extract_prop_general(line)
general[key] = value
return general, options, sets
|
#vtb
def find(self, name):
result = None
for t in self.array:
if str(t) == name:
result = Tag(t.jobject)
break
return result
|
Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag
|
### Input:
Returns the Tag that matches the name.
:param name: the string representation of the tag
:type name: str
:return: the tag, None if not found
:rtype: Tag
### Response:
#vtb
def find(self, name):
result = None
for t in self.array:
if str(t) == name:
result = Tag(t.jobject)
break
return result
|
#vtb
def find_kernel_specs(self):
specs = self.find_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager,
self).find_kernel_specs())
return specs
|
Returns a dict mapping kernel names to resource directories.
|
### Input:
Returns a dict mapping kernel names to resource directories.
### Response:
#vtb
def find_kernel_specs(self):
specs = self.find_kernel_specs_for_envs()
specs.update(super(EnvironmentKernelSpecManager,
self).find_kernel_specs())
return specs
|
#vtb
def import_module(name, package=None):
if name.startswith():
if not package:
raise TypeError("relative imports require the argument")
level = 0
for character in name:
if character != :
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
|
Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
|
### Input:
Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
### Response:
#vtb
def import_module(name, package=None):
if name.startswith():
if not package:
raise TypeError("relative imports require the argument")
level = 0
for character in name:
if character != :
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
|
#vtb
def url(self):
scheme = self.scheme
host = self.host
path = self.path
query = self.query
port = self.port
host_domain, host_port = Url.split_hostname_from_port(host)
if host_port:
port = host_port
controller_path = ""
if self.controller_info:
controller_path = self.controller_info.get("path", "")
u = Url(
scheme=scheme,
hostname=host,
path=path,
query=query,
port=port,
controller_path=controller_path,
)
return u
|
return the full request url as an Url() instance
|
### Input:
return the full request url as an Url() instance
### Response:
#vtb
def url(self):
scheme = self.scheme
host = self.host
path = self.path
query = self.query
port = self.port
host_domain, host_port = Url.split_hostname_from_port(host)
if host_port:
port = host_port
controller_path = ""
if self.controller_info:
controller_path = self.controller_info.get("path", "")
u = Url(
scheme=scheme,
hostname=host,
path=path,
query=query,
port=port,
controller_path=controller_path,
)
return u
|
#vtb
def converter_pm_log10(data):
indices_gt_zero = np.where(data > 0)
indices_lt_zero = np.where(data < 0)
data_converted = np.zeros(data.shape)
data_converted[indices_gt_zero] = np.log10(data[indices_gt_zero])
data_converted[indices_lt_zero] = -np.log10(-data[indices_lt_zero])
return indices_gt_zero, indices_lt_zero, data_converted
|
Convert the given data to:
log10(subdata) for subdata > 0
log10(-subdata') for subdata' < 0
0 for subdata'' == 0
Parameters
----------
data: array
input data
Returns
-------
array_converted: array
converted data
|
### Input:
Convert the given data to:
log10(subdata) for subdata > 0
log10(-subdata') for subdata' < 0
0 for subdata'' == 0
Parameters
----------
data: array
input data
Returns
-------
array_converted: array
converted data
### Response:
#vtb
def converter_pm_log10(data):
indices_gt_zero = np.where(data > 0)
indices_lt_zero = np.where(data < 0)
data_converted = np.zeros(data.shape)
data_converted[indices_gt_zero] = np.log10(data[indices_gt_zero])
data_converted[indices_lt_zero] = -np.log10(-data[indices_lt_zero])
return indices_gt_zero, indices_lt_zero, data_converted
|
#vtb
def is_uniform_join_units(join_units):
return (
all(type(ju.block) is type(join_units[0].block) for ju in join_units) and
all(not ju.is_na or ju.block.is_extension for ju in join_units) and
all(not ju.indexers for ju in join_units) and
all(ju.block.ndim <= 2 for ju in join_units) and
len(join_units) > 1)
|
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`).
|
### Input:
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`).
### Response:
#vtb
def is_uniform_join_units(join_units):
return (
all(type(ju.block) is type(join_units[0].block) for ju in join_units) and
all(not ju.is_na or ju.block.is_extension for ju in join_units) and
all(not ju.indexers for ju in join_units) and
all(ju.block.ndim <= 2 for ju in join_units) and
len(join_units) > 1)
|
#vtb
def get_mutation_rates(transcripts, mut_dict, ensembl):
rates = {: 0, : 0, : 0,
: 0, : 0}
combined = None
for tx_id in transcripts:
try:
tx = construct_gene_object(ensembl, tx_id)
except ValueError:
continue
if len(tx.get_cds_sequence()) % 3 != 0:
raise ValueError("anomalous_coding_sequence")
if tx.get_chrom() == "MT":
continue
sites = SiteRates(tx, mut_dict, masked_sites=combined)
combined = tx + combined
for cq in [, , , , ]:
rates[cq] += sites[cq].get_summed_rate()
if combined is None:
raise ValueError()
length = combined.get_coding_distance(combined.get_cds_end())[]
return rates, combined, length
|
determines mutation rates per functional category for transcripts
Args:
transcripts: list of transcript IDs for a gene
mut_dict: dictionary of local sequence context mutation rates
ensembl: EnsemblRequest object, to retrieve information from Ensembl.
Returns:
tuple of (rates, merged transcript, and transcript CDS length)
|
### Input:
determines mutation rates per functional category for transcripts
Args:
transcripts: list of transcript IDs for a gene
mut_dict: dictionary of local sequence context mutation rates
ensembl: EnsemblRequest object, to retrieve information from Ensembl.
Returns:
tuple of (rates, merged transcript, and transcript CDS length)
### Response:
#vtb
def get_mutation_rates(transcripts, mut_dict, ensembl):
rates = {: 0, : 0, : 0,
: 0, : 0}
combined = None
for tx_id in transcripts:
try:
tx = construct_gene_object(ensembl, tx_id)
except ValueError:
continue
if len(tx.get_cds_sequence()) % 3 != 0:
raise ValueError("anomalous_coding_sequence")
if tx.get_chrom() == "MT":
continue
sites = SiteRates(tx, mut_dict, masked_sites=combined)
combined = tx + combined
for cq in [, , , , ]:
rates[cq] += sites[cq].get_summed_rate()
if combined is None:
raise ValueError()
length = combined.get_coding_distance(combined.get_cds_end())[]
return rates, combined, length
|
#vtb
def run(self):
try:
self.job_state =
time.sleep(1)
image =
cmd =
name = .format(self.job_name, self.job_id)
self.job_state =
time.sleep(1)
self.job_state =
container = self.docker_client.containers.run(
image, cmd,
detach=True,
name=name
)
self.job_state =
self.job_started_at = datetime.datetime.now()
try:
logs_stdout = []
logs_stderr = []
container.reload()
now = datetime.datetime.now()
i = 1
while container.status == and not self.stop:
time.sleep(0.15)
if i % 10 == 0:
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split())
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split())
now = datetime.datetime.now()
container.reload()
i += 1
if container.status == :
container.kill()
self.job_stopped_at = datetime.datetime.now()
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split())
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split())
self.job_state = if not self.stop else
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(, 1)
date = dateutil.parser.parse(date)
date = int(date.timestamp())
logs.append({: date, : line.strip()})
log_group =
stream_name = .format(self.job_definition.name, self.job_id)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error(.format(self.name, err))
self.job_state =
container.kill()
finally:
container.remove()
except Exception as err:
logger.error(.format(self.name, err))
self.job_state =
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now()
|
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
|
### Input:
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
### Response:
#vtb
def run(self):
try:
self.job_state =
time.sleep(1)
image =
cmd =
name = .format(self.job_name, self.job_id)
self.job_state =
time.sleep(1)
self.job_state =
container = self.docker_client.containers.run(
image, cmd,
detach=True,
name=name
)
self.job_state =
self.job_started_at = datetime.datetime.now()
try:
logs_stdout = []
logs_stderr = []
container.reload()
now = datetime.datetime.now()
i = 1
while container.status == and not self.stop:
time.sleep(0.15)
if i % 10 == 0:
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split())
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split())
now = datetime.datetime.now()
container.reload()
i += 1
if container.status == :
container.kill()
self.job_stopped_at = datetime.datetime.now()
logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split())
logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split())
self.job_state = if not self.stop else
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(, 1)
date = dateutil.parser.parse(date)
date = int(date.timestamp())
logs.append({: date, : line.strip()})
log_group =
stream_name = .format(self.job_definition.name, self.job_id)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error(.format(self.name, err))
self.job_state =
container.kill()
finally:
container.remove()
except Exception as err:
logger.error(.format(self.name, err))
self.job_state =
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now()
|
#vtb
def _acquire_lock(self, identifier, atime=30, ltime=5):
conn = redis.Redis(connection_pool=self.pool)
end = time.time() + atime
while end > time.time():
if conn.set(self._lock_name, identifier, ex=ltime, nx=True):
return identifier
sleep_time = random.uniform(0, 3)
time.sleep(sleep_time)
logger.warn(,
self._lock_name, atime)
return False
|
Acquire a lock for a given identifier.
If the lock cannot be obtained immediately, keep trying at random
intervals, up to 3 seconds, until `atime` has passed. Once the
lock has been obtained, continue to hold it for `ltime`.
:param str identifier: lock token to write
:param int atime: maximum time (in seconds) to acquire lock
:param int ltime: maximum time (in seconds) to own lock
:return: `identifier` if the lock was obtained, :const:`False`
otherwise
|
### Input:
Acquire a lock for a given identifier.
If the lock cannot be obtained immediately, keep trying at random
intervals, up to 3 seconds, until `atime` has passed. Once the
lock has been obtained, continue to hold it for `ltime`.
:param str identifier: lock token to write
:param int atime: maximum time (in seconds) to acquire lock
:param int ltime: maximum time (in seconds) to own lock
:return: `identifier` if the lock was obtained, :const:`False`
otherwise
### Response:
#vtb
def _acquire_lock(self, identifier, atime=30, ltime=5):
conn = redis.Redis(connection_pool=self.pool)
end = time.time() + atime
while end > time.time():
if conn.set(self._lock_name, identifier, ex=ltime, nx=True):
return identifier
sleep_time = random.uniform(0, 3)
time.sleep(sleep_time)
logger.warn(,
self._lock_name, atime)
return False
|
#vtb
def set_active_scalar(self, name, preference=):
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveScalars(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveScalars(name)
else:
raise RuntimeError(.format(field))
self._active_scalar_info = [field, name]
|
Finds the scalar by name and appropriately sets it as active
|
### Input:
Finds the scalar by name and appropriately sets it as active
### Response:
#vtb
def set_active_scalar(self, name, preference=):
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveScalars(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveScalars(name)
else:
raise RuntimeError(.format(field))
self._active_scalar_info = [field, name]
|
#vtb
def create(cls, name, members):
NewEnum = type(name, (cls,), {})
if isinstance(members, dict):
members = members.items()
for member in members:
if isinstance(member, tuple):
name, value = member
setattr(NewEnum, name, value)
elif isinstance(member, EnumBase):
setattr(NewEnum, member.short_name, member.value)
else:
assert False, (
"members must be either a dict, "
+ "a list of (name, value) tuples, "
+ "or a list of EnumBase instances."
)
NewEnum.process()
try:
NewEnum.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError):
pass
return NewEnum
|
Creates a new enum type based on this one (cls) and adds newly
passed members to the newly created subclass of cls.
This method helps to create enums having the same member values as
values of other enum(s).
:param name: name of the newly created type
:param members: 1) a dict or 2) a list of (name, value) tuples
and/or EnumBase instances describing new members
:return: newly created enum type.
|
### Input:
Creates a new enum type based on this one (cls) and adds newly
passed members to the newly created subclass of cls.
This method helps to create enums having the same member values as
values of other enum(s).
:param name: name of the newly created type
:param members: 1) a dict or 2) a list of (name, value) tuples
and/or EnumBase instances describing new members
:return: newly created enum type.
### Response:
#vtb
def create(cls, name, members):
NewEnum = type(name, (cls,), {})
if isinstance(members, dict):
members = members.items()
for member in members:
if isinstance(member, tuple):
name, value = member
setattr(NewEnum, name, value)
elif isinstance(member, EnumBase):
setattr(NewEnum, member.short_name, member.value)
else:
assert False, (
"members must be either a dict, "
+ "a list of (name, value) tuples, "
+ "or a list of EnumBase instances."
)
NewEnum.process()
try:
NewEnum.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError):
pass
return NewEnum
|
#vtb
def long_description():
changes = latest_changes()
changes[0] = "`Changes for v{}".format(changes[0][1:])
changes[1] = * len(changes[0])
return "\n\n\n".join([
read_file(),
.join(changes),
"`Full changelog <{}/en/develop/changelog.html
DOCUMENTATION_URL)])
|
Collates project README and latest changes.
|
### Input:
Collates project README and latest changes.
### Response:
#vtb
def long_description():
changes = latest_changes()
changes[0] = "`Changes for v{}".format(changes[0][1:])
changes[1] = * len(changes[0])
return "\n\n\n".join([
read_file(),
.join(changes),
"`Full changelog <{}/en/develop/changelog.html
DOCUMENTATION_URL)])
|
#vtb
def __set_basic_auth_string(self, username, password):
auth = b2handle.utilhandle.create_authentication_string(username, password)
self.__basic_authentication_string = auth
|
Creates and sets the authentication string for (write-)accessing the
Handle Server. No return, the string is set as an attribute to
the client instance.
:param username: Username handle with index: index:prefix/suffix.
:param password: The password contained in the index of the username
handle.
|
### Input:
Creates and sets the authentication string for (write-)accessing the
Handle Server. No return, the string is set as an attribute to
the client instance.
:param username: Username handle with index: index:prefix/suffix.
:param password: The password contained in the index of the username
handle.
### Response:
#vtb
def __set_basic_auth_string(self, username, password):
auth = b2handle.utilhandle.create_authentication_string(username, password)
self.__basic_authentication_string = auth
|
#vtb
def magic_set(obj):
def decorator(func):
is_class = isinstance(obj, six.class_types)
args, varargs, varkw, defaults = inspect.getargspec(func)
if not args or args[0] not in (, , ):
if is_class:
replacement = staticmethod(func)
else:
replacement = func
elif args[0] == :
if is_class:
replacement = func
else:
def replacement(*args, **kw):
return func(obj, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
else:
if is_class:
replacement = classmethod(func)
else:
def replacement(*args, **kw):
return func(obj.__class__, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
setattr(obj, func.__name__, replacement)
return replacement
return decorator
|
Adds a function/method to an object. Uses the name of the first
argument as a hint about whether it is a method (``self``), class
method (``cls`` or ``klass``), or static method (anything else).
Works on both instances and classes.
>>> class color:
... def __init__(self, r, g, b):
... self.r, self.g, self.b = r, g, b
>>> c = color(0, 1, 0)
>>> c # doctest: +ELLIPSIS
<__main__.color instance at ...>
>>> @magic_set(color)
... def __repr__(self):
... return '<color %s %s %s>' % (self.r, self.g, self.b)
>>> c
<color 0 1 0>
>>> @magic_set(color)
... def red(cls):
... return cls(1, 0, 0)
>>> color.red()
<color 1 0 0>
>>> c.red()
<color 1 0 0>
>>> @magic_set(color)
... def name():
... return 'color'
>>> color.name()
'color'
>>> @magic_set(c)
... def name(self):
... return 'red'
>>> c.name()
'red'
>>> @magic_set(c)
... def name(cls):
... return cls.__name__
>>> c.name()
'color'
>>> @magic_set(c)
... def pr(obj):
... print obj
>>> c.pr(1)
1
|
### Input:
Adds a function/method to an object. Uses the name of the first
argument as a hint about whether it is a method (``self``), class
method (``cls`` or ``klass``), or static method (anything else).
Works on both instances and classes.
>>> class color:
... def __init__(self, r, g, b):
... self.r, self.g, self.b = r, g, b
>>> c = color(0, 1, 0)
>>> c # doctest: +ELLIPSIS
<__main__.color instance at ...>
>>> @magic_set(color)
... def __repr__(self):
... return '<color %s %s %s>' % (self.r, self.g, self.b)
>>> c
<color 0 1 0>
>>> @magic_set(color)
... def red(cls):
... return cls(1, 0, 0)
>>> color.red()
<color 1 0 0>
>>> c.red()
<color 1 0 0>
>>> @magic_set(color)
... def name():
... return 'color'
>>> color.name()
'color'
>>> @magic_set(c)
... def name(self):
... return 'red'
>>> c.name()
'red'
>>> @magic_set(c)
... def name(cls):
... return cls.__name__
>>> c.name()
'color'
>>> @magic_set(c)
... def pr(obj):
... print obj
>>> c.pr(1)
1
### Response:
#vtb
def magic_set(obj):
def decorator(func):
is_class = isinstance(obj, six.class_types)
args, varargs, varkw, defaults = inspect.getargspec(func)
if not args or args[0] not in (, , ):
if is_class:
replacement = staticmethod(func)
else:
replacement = func
elif args[0] == :
if is_class:
replacement = func
else:
def replacement(*args, **kw):
return func(obj, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
else:
if is_class:
replacement = classmethod(func)
else:
def replacement(*args, **kw):
return func(obj.__class__, *args, **kw)
try:
replacement.__name__ = func.__name__
except:
pass
setattr(obj, func.__name__, replacement)
return replacement
return decorator
|
#vtb
def get_ntp_peers(self):
ntp_stats = self.get_ntp_stats()
return {
ntp_peer.get("remote"): {}
for ntp_peer in ntp_stats
if ntp_peer.get("remote")
}
|
Implementation of get_ntp_peers for IOS.
|
### Input:
Implementation of get_ntp_peers for IOS.
### Response:
#vtb
def get_ntp_peers(self):
ntp_stats = self.get_ntp_stats()
return {
ntp_peer.get("remote"): {}
for ntp_peer in ntp_stats
if ntp_peer.get("remote")
}
|
#vtb
def attendee(request, form, user_id=None):
if user_id is None and form.cleaned_data["user"] is not None:
user_id = form.cleaned_data["user"]
if user_id is None:
return attendee_list(request)
attendee = people.Attendee.objects.get(user__id=user_id)
name = attendee.attendeeprofilebase.attendee_name()
reports = []
profile_data = []
try:
profile = people.AttendeeProfileBase.objects.get_subclass(
attendee=attendee
)
fields = profile._meta.get_fields()
except people.AttendeeProfileBase.DoesNotExist:
fields = []
exclude = set(["attendeeprofilebase_ptr", "id"])
for field in fields:
if field.name in exclude:
continue
if not hasattr(field, "verbose_name"):
continue
value = getattr(profile, field.name)
if isinstance(field, models.ManyToManyField):
value = ", ".join(str(i) for i in value.all())
profile_data.append((field.verbose_name, value))
cart = CartController.for_user(attendee.user)
reservation = cart.cart.reservation_duration + cart.cart.time_last_updated
profile_data.append(("Current cart reserved until", reservation))
reports.append(ListReport("Profile", ["", ""], profile_data))
links = []
links.append((
reverse(views.badge, args=[user_id]),
"View badge",
))
links.append((
reverse(views.amend_registration, args=[user_id]),
"Amend current cart",
))
links.append((
reverse(views.extend_reservation, args=[user_id]),
"Extend reservation",
))
reports.append(Links("Actions for " + name, links))
ic = ItemController(attendee.user)
reports.append(ListReport(
"Paid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_purchased()],
))
reports.append(ListReport(
"Unpaid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_pending()],
))
invoices = commerce.Invoice.objects.filter(
user=attendee.user,
)
reports.append(QuerysetReport(
"Invoices",
["id", "get_status_display", "value"],
invoices,
headings=["Invoice ID", "Status", "Value"],
link_view=views.invoice,
))
credit_notes = commerce.CreditNote.objects.filter(
invoice__user=attendee.user,
).select_related("invoice", "creditnoteapplication", "creditnoterefund")
reports.append(QuerysetReport(
"Credit Notes",
["id", "status", "value"],
credit_notes,
link_view=views.credit_note,
))
payments = commerce.PaymentBase.objects.filter(
invoice__user=attendee.user,
).select_related("invoice")
reports.append(QuerysetReport(
"Payments",
["invoice__id", "id", "reference", "amount"],
payments,
link_view=views.invoice,
))
return reports
|
Returns a list of all manifested attendees if no attendee is specified,
else displays the attendee manifest.
|
### Input:
Returns a list of all manifested attendees if no attendee is specified,
else displays the attendee manifest.
### Response:
#vtb
def attendee(request, form, user_id=None):
if user_id is None and form.cleaned_data["user"] is not None:
user_id = form.cleaned_data["user"]
if user_id is None:
return attendee_list(request)
attendee = people.Attendee.objects.get(user__id=user_id)
name = attendee.attendeeprofilebase.attendee_name()
reports = []
profile_data = []
try:
profile = people.AttendeeProfileBase.objects.get_subclass(
attendee=attendee
)
fields = profile._meta.get_fields()
except people.AttendeeProfileBase.DoesNotExist:
fields = []
exclude = set(["attendeeprofilebase_ptr", "id"])
for field in fields:
if field.name in exclude:
continue
if not hasattr(field, "verbose_name"):
continue
value = getattr(profile, field.name)
if isinstance(field, models.ManyToManyField):
value = ", ".join(str(i) for i in value.all())
profile_data.append((field.verbose_name, value))
cart = CartController.for_user(attendee.user)
reservation = cart.cart.reservation_duration + cart.cart.time_last_updated
profile_data.append(("Current cart reserved until", reservation))
reports.append(ListReport("Profile", ["", ""], profile_data))
links = []
links.append((
reverse(views.badge, args=[user_id]),
"View badge",
))
links.append((
reverse(views.amend_registration, args=[user_id]),
"Amend current cart",
))
links.append((
reverse(views.extend_reservation, args=[user_id]),
"Extend reservation",
))
reports.append(Links("Actions for " + name, links))
ic = ItemController(attendee.user)
reports.append(ListReport(
"Paid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_purchased()],
))
reports.append(ListReport(
"Unpaid Products",
["Product", "Quantity"],
[(pq.product, pq.quantity) for pq in ic.items_pending()],
))
invoices = commerce.Invoice.objects.filter(
user=attendee.user,
)
reports.append(QuerysetReport(
"Invoices",
["id", "get_status_display", "value"],
invoices,
headings=["Invoice ID", "Status", "Value"],
link_view=views.invoice,
))
credit_notes = commerce.CreditNote.objects.filter(
invoice__user=attendee.user,
).select_related("invoice", "creditnoteapplication", "creditnoterefund")
reports.append(QuerysetReport(
"Credit Notes",
["id", "status", "value"],
credit_notes,
link_view=views.credit_note,
))
payments = commerce.PaymentBase.objects.filter(
invoice__user=attendee.user,
).select_related("invoice")
reports.append(QuerysetReport(
"Payments",
["invoice__id", "id", "reference", "amount"],
payments,
link_view=views.invoice,
))
return reports
|
#vtb
def _QueryHash(self, digest):
if not self._url:
self._url = .format(
self._protocol, self._host, self._port)
request_data = {self.lookup_hash: digest}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._url, , data=request_data)
except errors.ConnectionError as exception:
json_response = None
logger.error(.format(
exception))
return json_response
|
Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
|
### Input:
Queries the Viper Server for a specfic hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
### Response:
#vtb
def _QueryHash(self, digest):
if not self._url:
self._url = .format(
self._protocol, self._host, self._port)
request_data = {self.lookup_hash: digest}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._url, , data=request_data)
except errors.ConnectionError as exception:
json_response = None
logger.error(.format(
exception))
return json_response
|
#vtb
def xAxisIsMajor(self):
return max(self.radius.x, self.radius.y) == self.radius.x
|
Returns True if the major axis is parallel to the X axis, boolean.
|
### Input:
Returns True if the major axis is parallel to the X axis, boolean.
### Response:
#vtb
def xAxisIsMajor(self):
return max(self.radius.x, self.radius.y) == self.radius.x
|
#vtb
def which(program, environ=None):
def is_exe(path):
return isfile(path) and os.access(path, os.X_OK)
if program is None:
raise CommandException("Invalid program name passed")
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
if environ is None:
environ = os.environ
for path in environ[].split(os.pathsep):
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
raise CommandException("Could not find %s" % program)
|
Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
|
### Input:
Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
### Response:
#vtb
def which(program, environ=None):
def is_exe(path):
return isfile(path) and os.access(path, os.X_OK)
if program is None:
raise CommandException("Invalid program name passed")
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
if environ is None:
environ = os.environ
for path in environ[].split(os.pathsep):
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
raise CommandException("Could not find %s" % program)
|
#vtb
def get_newest_app_version() -> Version:
with urllib3.PoolManager(cert_reqs=, ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen(, static_data.PYPI_JSON_URL).data.decode()
releases = json.loads(pypi_json).get(, [])
online_version = Version()
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version
|
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
|
### Input:
Download the version tag from remote.
:return: version from remote
:rtype: ~packaging.version.Version
### Response:
#vtb
def get_newest_app_version() -> Version:
with urllib3.PoolManager(cert_reqs=, ca_certs=certifi.where()) as p_man:
pypi_json = p_man.urlopen(, static_data.PYPI_JSON_URL).data.decode()
releases = json.loads(pypi_json).get(, [])
online_version = Version()
for release in releases:
cur_version = Version(release)
if not cur_version.is_prerelease:
online_version = max(online_version, cur_version)
return online_version
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.