text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
0.18
|
---|---|---|---|
def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor) | [
"def",
"_handle_command",
"(",
"self",
",",
"command",
")",
":",
"logger",
".",
"info",
"(",
"'Handle command %r'",
",",
"command",
")",
"def",
"in_executor",
"(",
")",
":",
"self",
".",
"handling_command",
"=",
"True",
"try",
":",
"if",
"self",
".",
"callback",
"is",
"not",
"None",
":",
"self",
".",
"callback",
"(",
"self",
",",
"command",
")",
"finally",
":",
"self",
".",
"server",
".",
"call_from_executor",
"(",
"done",
")",
"def",
"done",
"(",
")",
":",
"self",
".",
"handling_command",
"=",
"False",
"# Reset state and draw again. (If the connection is still open --",
"# the application could have called TelnetConnection.close()",
"if",
"not",
"self",
".",
"closed",
":",
"self",
".",
"cli",
".",
"reset",
"(",
")",
"self",
".",
"cli",
".",
"buffers",
"[",
"DEFAULT_BUFFER",
"]",
".",
"reset",
"(",
")",
"self",
".",
"cli",
".",
"renderer",
".",
"request_absolute_cursor_position",
"(",
")",
"self",
".",
"vt100_output",
".",
"flush",
"(",
")",
"self",
".",
"cli",
".",
"_redraw",
"(",
")",
"self",
".",
"server",
".",
"run_in_executor",
"(",
"in_executor",
")"
] | 34.857143 | 0.001994 |
def delete_connection():
"""
Stop and destroy Bloomberg connection
"""
if _CON_SYM_ in globals():
con = globals().pop(_CON_SYM_)
if not getattr(con, '_session').start(): con.stop() | [
"def",
"delete_connection",
"(",
")",
":",
"if",
"_CON_SYM_",
"in",
"globals",
"(",
")",
":",
"con",
"=",
"globals",
"(",
")",
".",
"pop",
"(",
"_CON_SYM_",
")",
"if",
"not",
"getattr",
"(",
"con",
",",
"'_session'",
")",
".",
"start",
"(",
")",
":",
"con",
".",
"stop",
"(",
")"
] | 29.428571 | 0.009434 |
def download(self):
"""Method which downloads submission to local directory."""
# Structure of the download directory:
# submission_dir=LOCAL_SUBMISSIONS_DIR/submission_id
# submission_dir/s.ext <-- archived submission
# submission_dir/extracted <-- extracted submission
# Check whether submission is already there
if self.extracted_submission_dir:
return
self.submission_dir = os.path.join(LOCAL_SUBMISSIONS_DIR,
self.submission_id)
if (os.path.isdir(self.submission_dir)
and os.path.isdir(os.path.join(self.submission_dir, 'extracted'))):
# submission already there, just re-read metadata
self.extracted_submission_dir = os.path.join(self.submission_dir,
'extracted')
with open(os.path.join(self.extracted_submission_dir, 'metadata.json'),
'r') as f:
meta_json = json.load(f)
self.container_name = str(meta_json[METADATA_CONTAINER])
self.entry_point = str(meta_json[METADATA_ENTRY_POINT])
return
# figure out submission location in the Cloud and determine extractor
submission_cloud_path = os.path.join('gs://', self.storage_bucket,
self.submission.path)
extract_command_tmpl = None
extension = None
for k, v in iteritems(EXTRACT_COMMAND):
if submission_cloud_path.endswith(k):
extension = k
extract_command_tmpl = v
break
if not extract_command_tmpl:
raise WorkerError('Unsupported submission extension')
# download archive
try:
os.makedirs(self.submission_dir)
tmp_extract_dir = os.path.join(self.submission_dir, 'tmp')
os.makedirs(tmp_extract_dir)
download_path = os.path.join(self.submission_dir, 's' + extension)
try:
logging.info('Downloading submission from %s to %s',
submission_cloud_path, download_path)
shell_call(['gsutil', 'cp', submission_cloud_path, download_path])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t copy submission locally', e)
# extract archive
try:
shell_call(extract_command_tmpl,
src=download_path, dst=tmp_extract_dir)
except subprocess.CalledProcessError as e:
# proceed even if extraction returned non zero error code,
# sometimes it's just warning
logging.warning('Submission extraction returned non-zero error code. '
'It may be just a warning, continuing execution. '
'Error: %s', e)
try:
make_directory_writable(tmp_extract_dir)
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t make submission directory writable', e)
# determine root of the submission
tmp_root_dir = tmp_extract_dir
root_dir_content = [d for d in os.listdir(tmp_root_dir)
if d != '__MACOSX']
if (len(root_dir_content) == 1
and os.path.isdir(os.path.join(tmp_root_dir, root_dir_content[0]))):
tmp_root_dir = os.path.join(tmp_root_dir, root_dir_content[0])
# move files to extract subdirectory
self.extracted_submission_dir = os.path.join(self.submission_dir,
'extracted')
try:
shell_call(['mv', os.path.join(tmp_root_dir),
self.extracted_submission_dir])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t move submission files', e)
# read metadata file
try:
with open(os.path.join(self.extracted_submission_dir, 'metadata.json'),
'r') as f:
meta_json = json.load(f)
except IOError as e:
raise WorkerError(
'Can''t read metadata.json for submission "{0}"'.format(
self.submission_id),
e)
try:
self.container_name = str(meta_json[METADATA_CONTAINER])
self.entry_point = str(meta_json[METADATA_ENTRY_POINT])
type_from_meta = METADATA_JSON_TYPE_TO_TYPE[meta_json[METADATA_TYPE]]
except KeyError as e:
raise WorkerError('Invalid metadata.json file', e)
if type_from_meta != self.type:
raise WorkerError('Inconsistent submission type in metadata: '
+ type_from_meta + ' vs ' + self.type)
except WorkerError as e:
self.extracted_submission_dir = None
sudo_remove_dirtree(self.submission_dir)
raise | [
"def",
"download",
"(",
"self",
")",
":",
"# Structure of the download directory:",
"# submission_dir=LOCAL_SUBMISSIONS_DIR/submission_id",
"# submission_dir/s.ext <-- archived submission",
"# submission_dir/extracted <-- extracted submission",
"# Check whether submission is already there",
"if",
"self",
".",
"extracted_submission_dir",
":",
"return",
"self",
".",
"submission_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"LOCAL_SUBMISSIONS_DIR",
",",
"self",
".",
"submission_id",
")",
"if",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"submission_dir",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"submission_dir",
",",
"'extracted'",
")",
")",
")",
":",
"# submission already there, just re-read metadata",
"self",
".",
"extracted_submission_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"submission_dir",
",",
"'extracted'",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"extracted_submission_dir",
",",
"'metadata.json'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"meta_json",
"=",
"json",
".",
"load",
"(",
"f",
")",
"self",
".",
"container_name",
"=",
"str",
"(",
"meta_json",
"[",
"METADATA_CONTAINER",
"]",
")",
"self",
".",
"entry_point",
"=",
"str",
"(",
"meta_json",
"[",
"METADATA_ENTRY_POINT",
"]",
")",
"return",
"# figure out submission location in the Cloud and determine extractor",
"submission_cloud_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'gs://'",
",",
"self",
".",
"storage_bucket",
",",
"self",
".",
"submission",
".",
"path",
")",
"extract_command_tmpl",
"=",
"None",
"extension",
"=",
"None",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"EXTRACT_COMMAND",
")",
":",
"if",
"submission_cloud_path",
".",
"endswith",
"(",
"k",
")",
":",
"extension",
"=",
"k",
"extract_command_tmpl",
"=",
"v",
"break",
"if",
"not",
"extract_command_tmpl",
":",
"raise",
"WorkerError",
"(",
"'Unsupported submission extension'",
")",
"# download archive",
"try",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"submission_dir",
")",
"tmp_extract_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"submission_dir",
",",
"'tmp'",
")",
"os",
".",
"makedirs",
"(",
"tmp_extract_dir",
")",
"download_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"submission_dir",
",",
"'s'",
"+",
"extension",
")",
"try",
":",
"logging",
".",
"info",
"(",
"'Downloading submission from %s to %s'",
",",
"submission_cloud_path",
",",
"download_path",
")",
"shell_call",
"(",
"[",
"'gsutil'",
",",
"'cp'",
",",
"submission_cloud_path",
",",
"download_path",
"]",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"raise",
"WorkerError",
"(",
"'Can'",
"'t copy submission locally'",
",",
"e",
")",
"# extract archive",
"try",
":",
"shell_call",
"(",
"extract_command_tmpl",
",",
"src",
"=",
"download_path",
",",
"dst",
"=",
"tmp_extract_dir",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"# proceed even if extraction returned non zero error code,",
"# sometimes it's just warning",
"logging",
".",
"warning",
"(",
"'Submission extraction returned non-zero error code. '",
"'It may be just a warning, continuing execution. '",
"'Error: %s'",
",",
"e",
")",
"try",
":",
"make_directory_writable",
"(",
"tmp_extract_dir",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"raise",
"WorkerError",
"(",
"'Can'",
"'t make submission directory writable'",
",",
"e",
")",
"# determine root of the submission",
"tmp_root_dir",
"=",
"tmp_extract_dir",
"root_dir_content",
"=",
"[",
"d",
"for",
"d",
"in",
"os",
".",
"listdir",
"(",
"tmp_root_dir",
")",
"if",
"d",
"!=",
"'__MACOSX'",
"]",
"if",
"(",
"len",
"(",
"root_dir_content",
")",
"==",
"1",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tmp_root_dir",
",",
"root_dir_content",
"[",
"0",
"]",
")",
")",
")",
":",
"tmp_root_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_root_dir",
",",
"root_dir_content",
"[",
"0",
"]",
")",
"# move files to extract subdirectory",
"self",
".",
"extracted_submission_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"submission_dir",
",",
"'extracted'",
")",
"try",
":",
"shell_call",
"(",
"[",
"'mv'",
",",
"os",
".",
"path",
".",
"join",
"(",
"tmp_root_dir",
")",
",",
"self",
".",
"extracted_submission_dir",
"]",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"raise",
"WorkerError",
"(",
"'Can'",
"'t move submission files'",
",",
"e",
")",
"# read metadata file",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"extracted_submission_dir",
",",
"'metadata.json'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"meta_json",
"=",
"json",
".",
"load",
"(",
"f",
")",
"except",
"IOError",
"as",
"e",
":",
"raise",
"WorkerError",
"(",
"'Can'",
"'t read metadata.json for submission \"{0}\"'",
".",
"format",
"(",
"self",
".",
"submission_id",
")",
",",
"e",
")",
"try",
":",
"self",
".",
"container_name",
"=",
"str",
"(",
"meta_json",
"[",
"METADATA_CONTAINER",
"]",
")",
"self",
".",
"entry_point",
"=",
"str",
"(",
"meta_json",
"[",
"METADATA_ENTRY_POINT",
"]",
")",
"type_from_meta",
"=",
"METADATA_JSON_TYPE_TO_TYPE",
"[",
"meta_json",
"[",
"METADATA_TYPE",
"]",
"]",
"except",
"KeyError",
"as",
"e",
":",
"raise",
"WorkerError",
"(",
"'Invalid metadata.json file'",
",",
"e",
")",
"if",
"type_from_meta",
"!=",
"self",
".",
"type",
":",
"raise",
"WorkerError",
"(",
"'Inconsistent submission type in metadata: '",
"+",
"type_from_meta",
"+",
"' vs '",
"+",
"self",
".",
"type",
")",
"except",
"WorkerError",
"as",
"e",
":",
"self",
".",
"extracted_submission_dir",
"=",
"None",
"sudo_remove_dirtree",
"(",
"self",
".",
"submission_dir",
")",
"raise"
] | 45.373737 | 0.008932 |
def child_task(self):
'''child process - this holds all the GUI elements'''
mp_util.child_close_fds()
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
from MAVProxy.modules.mavproxy_map.mp_slipmap_ui import MPSlipMapFrame
state = self
self.mt = mp_tile.MPTile(download=self.download,
service=self.service,
tile_delay=self.tile_delay,
debug=self.debug,
max_zoom=self.max_zoom)
state.layers = {}
state.info = {}
state.need_redraw = True
self.app = wx.App(False)
self.app.SetExitOnFrameDelete(True)
self.app.frame = MPSlipMapFrame(state=self)
self.app.frame.Show()
self.app.MainLoop() | [
"def",
"child_task",
"(",
"self",
")",
":",
"mp_util",
".",
"child_close_fds",
"(",
")",
"from",
"MAVProxy",
".",
"modules",
".",
"lib",
"import",
"wx_processguard",
"from",
"MAVProxy",
".",
"modules",
".",
"lib",
".",
"wx_loader",
"import",
"wx",
"from",
"MAVProxy",
".",
"modules",
".",
"mavproxy_map",
".",
"mp_slipmap_ui",
"import",
"MPSlipMapFrame",
"state",
"=",
"self",
"self",
".",
"mt",
"=",
"mp_tile",
".",
"MPTile",
"(",
"download",
"=",
"self",
".",
"download",
",",
"service",
"=",
"self",
".",
"service",
",",
"tile_delay",
"=",
"self",
".",
"tile_delay",
",",
"debug",
"=",
"self",
".",
"debug",
",",
"max_zoom",
"=",
"self",
".",
"max_zoom",
")",
"state",
".",
"layers",
"=",
"{",
"}",
"state",
".",
"info",
"=",
"{",
"}",
"state",
".",
"need_redraw",
"=",
"True",
"self",
".",
"app",
"=",
"wx",
".",
"App",
"(",
"False",
")",
"self",
".",
"app",
".",
"SetExitOnFrameDelete",
"(",
"True",
")",
"self",
".",
"app",
".",
"frame",
"=",
"MPSlipMapFrame",
"(",
"state",
"=",
"self",
")",
"self",
".",
"app",
".",
"frame",
".",
"Show",
"(",
")",
"self",
".",
"app",
".",
"MainLoop",
"(",
")"
] | 35.833333 | 0.002265 |
def update_login_profile(self, user_name, password):
"""
Resets the password associated with the user's login profile.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName' : user_name,
'Password' : password}
return self.get_response('UpdateLoginProfile', params) | [
"def",
"update_login_profile",
"(",
"self",
",",
"user_name",
",",
"password",
")",
":",
"params",
"=",
"{",
"'UserName'",
":",
"user_name",
",",
"'Password'",
":",
"password",
"}",
"return",
"self",
".",
"get_response",
"(",
"'UpdateLoginProfile'",
",",
"params",
")"
] | 31.928571 | 0.008696 |
def choose_intronic_effect_class(
variant,
nearest_exon,
distance_to_exon):
"""
Infer effect of variant which does not overlap any exon of
the given transcript.
"""
assert distance_to_exon > 0, \
"Expected intronic effect to have distance_to_exon > 0, got %d" % (
distance_to_exon,)
if nearest_exon.strand == "+":
# if exon on positive strand
start_before = variant.trimmed_base1_start < nearest_exon.start
start_same = variant.trimmed_base1_start == nearest_exon.start
before_exon = start_before or (variant.is_insertion and start_same)
else:
# if exon on negative strand
end_after = variant.trimmed_base1_end > nearest_exon.end
end_same = variant.trimmed_base1_end == nearest_exon.end
before_exon = end_after or (variant.is_insertion and end_same)
# distance cutoffs based on consensus splice sequences from
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/
# 5' splice site: MAG|GURAGU consensus
# M is A or C; R is purine; | is the exon-intron boundary
# 3' splice site: YAG|R
if distance_to_exon <= 2:
if before_exon:
# 2 last nucleotides of intron before exon are the splice
# acceptor site, typically "AG"
return SpliceAcceptor
else:
# 2 first nucleotides of intron after exon are the splice donor
# site, typically "GT"
return SpliceDonor
elif not before_exon and distance_to_exon <= 6:
# variants in nucleotides 3-6 at start of intron aren't as certain
# to cause problems as nucleotides 1-2 but still implicated in
# alternative splicing
return IntronicSpliceSite
elif before_exon and distance_to_exon <= 3:
# nucleotide -3 before exon is part of the 3' splicing
# motif but allows for more degeneracy than the -2, -1 nucleotides
return IntronicSpliceSite
else:
# intronic mutation unrelated to splicing
return Intronic | [
"def",
"choose_intronic_effect_class",
"(",
"variant",
",",
"nearest_exon",
",",
"distance_to_exon",
")",
":",
"assert",
"distance_to_exon",
">",
"0",
",",
"\"Expected intronic effect to have distance_to_exon > 0, got %d\"",
"%",
"(",
"distance_to_exon",
",",
")",
"if",
"nearest_exon",
".",
"strand",
"==",
"\"+\"",
":",
"# if exon on positive strand",
"start_before",
"=",
"variant",
".",
"trimmed_base1_start",
"<",
"nearest_exon",
".",
"start",
"start_same",
"=",
"variant",
".",
"trimmed_base1_start",
"==",
"nearest_exon",
".",
"start",
"before_exon",
"=",
"start_before",
"or",
"(",
"variant",
".",
"is_insertion",
"and",
"start_same",
")",
"else",
":",
"# if exon on negative strand",
"end_after",
"=",
"variant",
".",
"trimmed_base1_end",
">",
"nearest_exon",
".",
"end",
"end_same",
"=",
"variant",
".",
"trimmed_base1_end",
"==",
"nearest_exon",
".",
"end",
"before_exon",
"=",
"end_after",
"or",
"(",
"variant",
".",
"is_insertion",
"and",
"end_same",
")",
"# distance cutoffs based on consensus splice sequences from",
"# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/",
"# 5' splice site: MAG|GURAGU consensus",
"# M is A or C; R is purine; | is the exon-intron boundary",
"# 3' splice site: YAG|R",
"if",
"distance_to_exon",
"<=",
"2",
":",
"if",
"before_exon",
":",
"# 2 last nucleotides of intron before exon are the splice",
"# acceptor site, typically \"AG\"",
"return",
"SpliceAcceptor",
"else",
":",
"# 2 first nucleotides of intron after exon are the splice donor",
"# site, typically \"GT\"",
"return",
"SpliceDonor",
"elif",
"not",
"before_exon",
"and",
"distance_to_exon",
"<=",
"6",
":",
"# variants in nucleotides 3-6 at start of intron aren't as certain",
"# to cause problems as nucleotides 1-2 but still implicated in",
"# alternative splicing",
"return",
"IntronicSpliceSite",
"elif",
"before_exon",
"and",
"distance_to_exon",
"<=",
"3",
":",
"# nucleotide -3 before exon is part of the 3' splicing",
"# motif but allows for more degeneracy than the -2, -1 nucleotides",
"return",
"IntronicSpliceSite",
"else",
":",
"# intronic mutation unrelated to splicing",
"return",
"Intronic"
] | 41.265306 | 0.000483 |
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys | [
"def",
"list_keys",
"(",
"user",
"=",
"None",
",",
"gnupghome",
"=",
"None",
")",
":",
"_keys",
"=",
"[",
"]",
"for",
"_key",
"in",
"_list_keys",
"(",
"user",
",",
"gnupghome",
")",
":",
"tmp",
"=",
"{",
"'keyid'",
":",
"_key",
"[",
"'keyid'",
"]",
",",
"'fingerprint'",
":",
"_key",
"[",
"'fingerprint'",
"]",
",",
"'uids'",
":",
"_key",
"[",
"'uids'",
"]",
"}",
"expires",
"=",
"_key",
".",
"get",
"(",
"'expires'",
",",
"None",
")",
"date",
"=",
"_key",
".",
"get",
"(",
"'date'",
",",
"None",
")",
"length",
"=",
"_key",
".",
"get",
"(",
"'length'",
",",
"None",
")",
"owner_trust",
"=",
"_key",
".",
"get",
"(",
"'ownertrust'",
",",
"None",
")",
"trust",
"=",
"_key",
".",
"get",
"(",
"'trust'",
",",
"None",
")",
"if",
"expires",
":",
"tmp",
"[",
"'expires'",
"]",
"=",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d'",
",",
"time",
".",
"localtime",
"(",
"float",
"(",
"_key",
"[",
"'expires'",
"]",
")",
")",
")",
"if",
"date",
":",
"tmp",
"[",
"'created'",
"]",
"=",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d'",
",",
"time",
".",
"localtime",
"(",
"float",
"(",
"_key",
"[",
"'date'",
"]",
")",
")",
")",
"if",
"length",
":",
"tmp",
"[",
"'keyLength'",
"]",
"=",
"_key",
"[",
"'length'",
"]",
"if",
"owner_trust",
":",
"tmp",
"[",
"'ownerTrust'",
"]",
"=",
"LETTER_TRUST_DICT",
"[",
"_key",
"[",
"'ownertrust'",
"]",
"]",
"if",
"trust",
":",
"tmp",
"[",
"'trust'",
"]",
"=",
"LETTER_TRUST_DICT",
"[",
"_key",
"[",
"'trust'",
"]",
"]",
"_keys",
".",
"append",
"(",
"tmp",
")",
"return",
"_keys"
] | 31.177778 | 0.001382 |
def add_from_file(self, filename, handler_decorator=None):
"""
Wrapper around add() that reads the handlers from the
file with the given name. The file is a Python script containing
a list named 'commands' of tuples that map command names to
handlers.
:type filename: str
:param filename: The name of the file containing the tuples.
:type handler_decorator: function
:param handler_decorator: A function that is used to decorate
each of the handlers in the file.
"""
args = {}
execfile(filename, args)
commands = args.get('commands')
if commands is None:
raise Exception(filename + ' has no variable named "commands"')
elif not hasattr(commands, '__iter__'):
raise Exception(filename + ': "commands" is not iterable')
for key, handler in commands:
if handler_decorator:
handler = handler_decorator(handler)
self.add(key, handler) | [
"def",
"add_from_file",
"(",
"self",
",",
"filename",
",",
"handler_decorator",
"=",
"None",
")",
":",
"args",
"=",
"{",
"}",
"execfile",
"(",
"filename",
",",
"args",
")",
"commands",
"=",
"args",
".",
"get",
"(",
"'commands'",
")",
"if",
"commands",
"is",
"None",
":",
"raise",
"Exception",
"(",
"filename",
"+",
"' has no variable named \"commands\"'",
")",
"elif",
"not",
"hasattr",
"(",
"commands",
",",
"'__iter__'",
")",
":",
"raise",
"Exception",
"(",
"filename",
"+",
"': \"commands\" is not iterable'",
")",
"for",
"key",
",",
"handler",
"in",
"commands",
":",
"if",
"handler_decorator",
":",
"handler",
"=",
"handler_decorator",
"(",
"handler",
")",
"self",
".",
"add",
"(",
"key",
",",
"handler",
")"
] | 42.333333 | 0.001925 |
def sendNotification(snmpDispatcher, authData, transportTarget,
notifyType, *varBinds, **options):
"""Creates a generator to send one or more SNMP notifications.
On each iteration, new SNMP TRAP or INFORM notification is send
(:RFC:`1905#section-4,2,6`). The iterator blocks waiting for
INFORM acknowledgement to arrive or error to occur.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asyncore-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
notifyType: str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`.
Yields
------
errorIndication: str
True value indicates local SNMP error.
errorStatus: str
True value indicates SNMP PDU error reported by remote.
errorIndex: int
Non-zero value refers to `varBinds[errorIndex-1]`
varBinds: tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `sendNotification` generator will be exhausted immediately unless
an instance of :py:class:`~pysnmp.smi.rfc1902.NotificationType` class
or a sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` `varBinds`
are send back into running generator (supported since Python 2.6).
Examples
--------
>>> from pysnmp.hlapi.v1arch import *
>>>
>>> g = sendNotification(SnmpDispatcher(),
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 162)),
>>> 'trap',
>>> NotificationType(ObjectIdentity('IF-MIB', 'linkDown')))
>>> next(g)
(None, 0, 0, [])
"""
def cbFun(*args, **kwargs):
response[:] = args
options['cbFun'] = cbFun
errorIndication, errorStatus, errorIndex = None, 0, 0
response = [None, 0, 0, []]
while True:
if varBinds:
ntforg.sendNotification(snmpDispatcher, authData, transportTarget,
notifyType, *varBinds, **options)
snmpDispatcher.transportDispatcher.runDispatcher()
errorIndication, errorStatus, errorIndex, varBinds = response
varBinds = (yield errorIndication, errorStatus, errorIndex, varBinds)
if not varBinds:
break | [
"def",
"sendNotification",
"(",
"snmpDispatcher",
",",
"authData",
",",
"transportTarget",
",",
"notifyType",
",",
"*",
"varBinds",
",",
"*",
"*",
"options",
")",
":",
"def",
"cbFun",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"[",
":",
"]",
"=",
"args",
"options",
"[",
"'cbFun'",
"]",
"=",
"cbFun",
"errorIndication",
",",
"errorStatus",
",",
"errorIndex",
"=",
"None",
",",
"0",
",",
"0",
"response",
"=",
"[",
"None",
",",
"0",
",",
"0",
",",
"[",
"]",
"]",
"while",
"True",
":",
"if",
"varBinds",
":",
"ntforg",
".",
"sendNotification",
"(",
"snmpDispatcher",
",",
"authData",
",",
"transportTarget",
",",
"notifyType",
",",
"*",
"varBinds",
",",
"*",
"*",
"options",
")",
"snmpDispatcher",
".",
"transportDispatcher",
".",
"runDispatcher",
"(",
")",
"errorIndication",
",",
"errorStatus",
",",
"errorIndex",
",",
"varBinds",
"=",
"response",
"varBinds",
"=",
"(",
"yield",
"errorIndication",
",",
"errorStatus",
",",
"errorIndex",
",",
"varBinds",
")",
"if",
"not",
"varBinds",
":",
"break"
] | 38.14876 | 0.001689 |
def make_archive(self, path):
"""Create archive of directory and write to ``path``.
:param path: Path to archive
Ignored::
* build/* - This is used for packing the charm itself and any
similar tasks.
* */.* - Hidden files are all ignored for now. This will most
likely be changed into a specific ignore list
(.bzr, etc)
"""
zf = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(self.path):
relative_path = dirpath[len(self.path) + 1:]
if relative_path and not self._ignore(relative_path):
zf.write(dirpath, relative_path)
for name in filenames:
archive_name = os.path.join(relative_path, name)
if not self._ignore(archive_name):
real_path = os.path.join(dirpath, name)
self._check_type(real_path)
if os.path.islink(real_path):
self._check_link(real_path)
self._write_symlink(
zf, os.readlink(real_path), archive_name)
else:
zf.write(real_path, archive_name)
zf.close()
return path | [
"def",
"make_archive",
"(",
"self",
",",
"path",
")",
":",
"zf",
"=",
"zipfile",
".",
"ZipFile",
"(",
"path",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"for",
"dirpath",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"path",
")",
":",
"relative_path",
"=",
"dirpath",
"[",
"len",
"(",
"self",
".",
"path",
")",
"+",
"1",
":",
"]",
"if",
"relative_path",
"and",
"not",
"self",
".",
"_ignore",
"(",
"relative_path",
")",
":",
"zf",
".",
"write",
"(",
"dirpath",
",",
"relative_path",
")",
"for",
"name",
"in",
"filenames",
":",
"archive_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"relative_path",
",",
"name",
")",
"if",
"not",
"self",
".",
"_ignore",
"(",
"archive_name",
")",
":",
"real_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"name",
")",
"self",
".",
"_check_type",
"(",
"real_path",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"real_path",
")",
":",
"self",
".",
"_check_link",
"(",
"real_path",
")",
"self",
".",
"_write_symlink",
"(",
"zf",
",",
"os",
".",
"readlink",
"(",
"real_path",
")",
",",
"archive_name",
")",
"else",
":",
"zf",
".",
"write",
"(",
"real_path",
",",
"archive_name",
")",
"zf",
".",
"close",
"(",
")",
"return",
"path"
] | 41.59375 | 0.001468 |
def getgoal(path, opts=None):
'''
Return goal(s) for a file or directory
CLI Example:
.. code-block:: bash
salt '*' moosefs.getgoal /path/to/file [-[n][h|H]]
salt '*' moosefs.getgoal /path/to/dir/ [-[n][h|H][r]]
'''
cmd = 'mfsgetgoal'
ret = {}
if opts:
cmd += ' -' + opts
else:
opts = ''
cmd += ' ' + path
out = __salt__['cmd.run_all'](cmd, python_shell=False)
output = out['stdout'].splitlines()
if 'r' not in opts:
goal = output[0].split(': ')
ret = {
'goal': goal[1],
}
else:
for line in output:
if not line:
continue
if path in line:
continue
comps = line.split()
keytext = comps[0] + ' with goal'
if keytext not in ret:
ret[keytext] = {}
ret[keytext][comps[3]] = comps[5]
return ret | [
"def",
"getgoal",
"(",
"path",
",",
"opts",
"=",
"None",
")",
":",
"cmd",
"=",
"'mfsgetgoal'",
"ret",
"=",
"{",
"}",
"if",
"opts",
":",
"cmd",
"+=",
"' -'",
"+",
"opts",
"else",
":",
"opts",
"=",
"''",
"cmd",
"+=",
"' '",
"+",
"path",
"out",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"output",
"=",
"out",
"[",
"'stdout'",
"]",
".",
"splitlines",
"(",
")",
"if",
"'r'",
"not",
"in",
"opts",
":",
"goal",
"=",
"output",
"[",
"0",
"]",
".",
"split",
"(",
"': '",
")",
"ret",
"=",
"{",
"'goal'",
":",
"goal",
"[",
"1",
"]",
",",
"}",
"else",
":",
"for",
"line",
"in",
"output",
":",
"if",
"not",
"line",
":",
"continue",
"if",
"path",
"in",
"line",
":",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"keytext",
"=",
"comps",
"[",
"0",
"]",
"+",
"' with goal'",
"if",
"keytext",
"not",
"in",
"ret",
":",
"ret",
"[",
"keytext",
"]",
"=",
"{",
"}",
"ret",
"[",
"keytext",
"]",
"[",
"comps",
"[",
"3",
"]",
"]",
"=",
"comps",
"[",
"5",
"]",
"return",
"ret"
] | 23.947368 | 0.001056 |
def geometric_delay(sig, dur, copies, pamp=.5):
"""
Delay effect by copying data (with Streamix).
Parameters
----------
sig:
Input signal (an iterable).
dur:
Duration, in samples.
copies:
Number of times the signal will be replayed in the given duration. The
signal is played copies + 1 times.
pamp:
The relative remaining amplitude fraction for the next played Stream,
based on the idea that total amplitude should sum to 1. Defaults to 0.5.
"""
out = Streamix()
sig = thub(sig, copies + 1)
out.add(0, sig * pamp) # Original
remain = 1 - pamp
for unused in xrange(copies):
gain = remain * pamp
out.add(dur / copies, sig * gain)
remain -= gain
return out | [
"def",
"geometric_delay",
"(",
"sig",
",",
"dur",
",",
"copies",
",",
"pamp",
"=",
".5",
")",
":",
"out",
"=",
"Streamix",
"(",
")",
"sig",
"=",
"thub",
"(",
"sig",
",",
"copies",
"+",
"1",
")",
"out",
".",
"add",
"(",
"0",
",",
"sig",
"*",
"pamp",
")",
"# Original",
"remain",
"=",
"1",
"-",
"pamp",
"for",
"unused",
"in",
"xrange",
"(",
"copies",
")",
":",
"gain",
"=",
"remain",
"*",
"pamp",
"out",
".",
"add",
"(",
"dur",
"/",
"copies",
",",
"sig",
"*",
"gain",
")",
"remain",
"-=",
"gain",
"return",
"out"
] | 25.777778 | 0.012465 |
def get_hash(file_path, checksum='sha1'):
"""
Generate a hash for the given file
Args:
file_path (str): Path to the file to generate the hash for
checksum (str): hash to apply, one of the supported by hashlib, for
example sha1 or sha512
Returns:
str: hash for that file
"""
sha = getattr(hashlib, checksum)()
with open(file_path) as file_descriptor:
while True:
chunk = file_descriptor.read(65536)
if not chunk:
break
sha.update(chunk)
return sha.hexdigest() | [
"def",
"get_hash",
"(",
"file_path",
",",
"checksum",
"=",
"'sha1'",
")",
":",
"sha",
"=",
"getattr",
"(",
"hashlib",
",",
"checksum",
")",
"(",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"file_descriptor",
":",
"while",
"True",
":",
"chunk",
"=",
"file_descriptor",
".",
"read",
"(",
"65536",
")",
"if",
"not",
"chunk",
":",
"break",
"sha",
".",
"update",
"(",
"chunk",
")",
"return",
"sha",
".",
"hexdigest",
"(",
")"
] | 27.095238 | 0.001698 |
def setup_logger(log_level, log_file=None, logger_name=None):
"""setup logger
@param log_level: debug/info/warning/error/critical
@param log_file: log file path
@param logger_name: the name of logger, default is 'root' if not specify
"""
applogger = AppLog(logger_name)
level = getattr(logging, log_level.upper(), None)
if not level:
color_print("Invalid log level: %s" % log_level, "RED")
sys.exit(1)
# hide traceback when log level is INFO/WARNING/ERROR/CRITICAL
if level >= logging.INFO:
sys.tracebacklimit = 0
if log_file:
applogger._handle2file(log_file)
else:
applogger._handle2screen(color = True)
applogger.logger.setLevel(level) | [
"def",
"setup_logger",
"(",
"log_level",
",",
"log_file",
"=",
"None",
",",
"logger_name",
"=",
"None",
")",
":",
"applogger",
"=",
"AppLog",
"(",
"logger_name",
")",
"level",
"=",
"getattr",
"(",
"logging",
",",
"log_level",
".",
"upper",
"(",
")",
",",
"None",
")",
"if",
"not",
"level",
":",
"color_print",
"(",
"\"Invalid log level: %s\"",
"%",
"log_level",
",",
"\"RED\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# hide traceback when log level is INFO/WARNING/ERROR/CRITICAL\r",
"if",
"level",
">=",
"logging",
".",
"INFO",
":",
"sys",
".",
"tracebacklimit",
"=",
"0",
"if",
"log_file",
":",
"applogger",
".",
"_handle2file",
"(",
"log_file",
")",
"else",
":",
"applogger",
".",
"_handle2screen",
"(",
"color",
"=",
"True",
")",
"applogger",
".",
"logger",
".",
"setLevel",
"(",
"level",
")"
] | 37.818182 | 0.009379 |
def get_config(config, default_config):
'''Load configuration from file if in config, else use default'''
if not config:
logging.warning('Using default config: %s', default_config)
config = default_config
try:
with open(config, 'r') as config_file:
return yaml.load(config_file)
except (yaml.reader.ReaderError,
yaml.parser.ParserError,
yaml.scanner.ScannerError) as e:
raise ConfigError('Invalid yaml file: \n %s' % str(e)) | [
"def",
"get_config",
"(",
"config",
",",
"default_config",
")",
":",
"if",
"not",
"config",
":",
"logging",
".",
"warning",
"(",
"'Using default config: %s'",
",",
"default_config",
")",
"config",
"=",
"default_config",
"try",
":",
"with",
"open",
"(",
"config",
",",
"'r'",
")",
"as",
"config_file",
":",
"return",
"yaml",
".",
"load",
"(",
"config_file",
")",
"except",
"(",
"yaml",
".",
"reader",
".",
"ReaderError",
",",
"yaml",
".",
"parser",
".",
"ParserError",
",",
"yaml",
".",
"scanner",
".",
"ScannerError",
")",
"as",
"e",
":",
"raise",
"ConfigError",
"(",
"'Invalid yaml file: \\n %s'",
"%",
"str",
"(",
"e",
")",
")"
] | 38.230769 | 0.001965 |
def grab_file_url(file_url, appname='utool', download_dir=None, delay=None,
spoof=False, fname=None, verbose=True, redownload=False,
check_hash=False):
r"""
Downloads a file and returns the local path of the file.
The resulting file is cached, so multiple calls to this function do not
result in multiple dowloads.
Args:
file_url (str): url to the file
appname (str): (default = 'utool')
download_dir custom directory (None): (default = None)
delay (None): delay time before download (default = None)
spoof (bool): (default = False)
fname (str): custom file name (default = None)
verbose (bool): verbosity flag (default = True)
redownload (bool): if True forces redownload of the file
(default = False)
check_hash (bool or iterable): if True, defaults to checking 4 hashes
(in order): custom, md5, sha1, sha256. These hashes are checked
for remote copies and, if found, will check the local file. You may
also specify a list of hashes to check, for example ['md5', 'sha256']
in the specified order. The first verified hash to be found is used
(default = False)
Returns:
str: fpath - file path string
CommandLine:
python -m utool.util_grabdata --test-grab_file_url:0
python -m utool.util_grabdata --test-grab_file_url:1
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> from os.path import basename
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'http://i.imgur.com/JGrqMnV.png'
>>> redownload = True
>>> fname = 'lena.png'
>>> lena_fpath = ut.grab_file_url(file_url, fname=fname,
>>> redownload=redownload)
>>> result = basename(lena_fpath)
>>> print(result)
lena.png
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'https://lev.cs.rpi.edu/public/models/detect.yolo.12.classes'
>>> fname = 'detect.yolo.12.classes'
>>> check_hash = True
>>> fpath = ut.grab_file_url(file_url, fname=fname, check_hash=check_hash)
"""
file_url = clean_dropbox_link(file_url)
if fname is None:
fname = basename(file_url)
# Download zipfile to
if download_dir is None:
download_dir = util_cplat.get_app_cache_dir(appname)
# Zipfile should unzip to:
fpath = join(download_dir, fname)
# If check hash, get remote hash and assert local copy is the same
if check_hash:
if isinstance(check_hash, (list, tuple)):
hash_list = check_hash
else:
hash_list = ['md5']
# hash_list = ['sha1.custom', 'md5', 'sha1', 'sha256']
# Get expected remote file
hash_remote, hash_tag_remote = grab_file_remote_hash(file_url, hash_list, verbose=verbose)
hash_list = [hash_tag_remote]
# We have a valid candidate hash from remote, check for same hash locally
hash_local, hash_tag_local = get_file_local_hash(fpath, hash_list, verbose=verbose)
if verbose:
print('[utool] Pre Local Hash: %r' % (hash_local, ))
print('[utool] Pre Remote Hash: %r' % (hash_remote, ))
# Check all 4 hash conditions
if hash_remote is None:
# No remote hash provided, turn off post-download hash check
check_hash = False
elif hash_local is None:
if verbose:
print('[utool] Remote hash provided but local hash missing, redownloading.')
redownload = True
elif hash_local == hash_remote:
assert hash_tag_local == hash_tag_remote, ('hash tag disagreement')
else:
if verbose:
print('[utool] Both hashes provided, but they disagree, redownloading.')
redownload = True
# Download
util_path.ensurepath(download_dir)
if redownload or not exists(fpath):
# Download testdata
if verbose:
print('[utool] Downloading file %s' % fpath)
if delay is not None:
print('[utool] delay download by %r seconds' % (delay,))
time.sleep(delay)
download_url(file_url, fpath, spoof=spoof)
else:
if verbose:
print('[utool] Already have file %s' % fpath)
util_path.assert_exists(fpath)
# Post-download local hash verification
if check_hash:
# File has been successfuly downloaded, write remote hash to local hash file
hash_fpath = '%s.%s' % (fpath, hash_tag_remote, )
with open(hash_fpath, 'w') as hash_file:
hash_file.write(hash_remote)
# For sanity check (custom) and file verification (hashing), get local hash again
hash_local, hash_tag_local = get_file_local_hash(fpath, hash_list, verbose=verbose)
if verbose:
print('[utool] Post Local Hash: %r' % (hash_local, ))
assert hash_local == hash_remote, 'Post-download hash disagreement'
assert hash_tag_local == hash_tag_remote, 'Post-download hash tag disagreement'
return fpath | [
"def",
"grab_file_url",
"(",
"file_url",
",",
"appname",
"=",
"'utool'",
",",
"download_dir",
"=",
"None",
",",
"delay",
"=",
"None",
",",
"spoof",
"=",
"False",
",",
"fname",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"redownload",
"=",
"False",
",",
"check_hash",
"=",
"False",
")",
":",
"file_url",
"=",
"clean_dropbox_link",
"(",
"file_url",
")",
"if",
"fname",
"is",
"None",
":",
"fname",
"=",
"basename",
"(",
"file_url",
")",
"# Download zipfile to",
"if",
"download_dir",
"is",
"None",
":",
"download_dir",
"=",
"util_cplat",
".",
"get_app_cache_dir",
"(",
"appname",
")",
"# Zipfile should unzip to:",
"fpath",
"=",
"join",
"(",
"download_dir",
",",
"fname",
")",
"# If check hash, get remote hash and assert local copy is the same",
"if",
"check_hash",
":",
"if",
"isinstance",
"(",
"check_hash",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"hash_list",
"=",
"check_hash",
"else",
":",
"hash_list",
"=",
"[",
"'md5'",
"]",
"# hash_list = ['sha1.custom', 'md5', 'sha1', 'sha256']",
"# Get expected remote file",
"hash_remote",
",",
"hash_tag_remote",
"=",
"grab_file_remote_hash",
"(",
"file_url",
",",
"hash_list",
",",
"verbose",
"=",
"verbose",
")",
"hash_list",
"=",
"[",
"hash_tag_remote",
"]",
"# We have a valid candidate hash from remote, check for same hash locally",
"hash_local",
",",
"hash_tag_local",
"=",
"get_file_local_hash",
"(",
"fpath",
",",
"hash_list",
",",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Pre Local Hash: %r'",
"%",
"(",
"hash_local",
",",
")",
")",
"print",
"(",
"'[utool] Pre Remote Hash: %r'",
"%",
"(",
"hash_remote",
",",
")",
")",
"# Check all 4 hash conditions",
"if",
"hash_remote",
"is",
"None",
":",
"# No remote hash provided, turn off post-download hash check",
"check_hash",
"=",
"False",
"elif",
"hash_local",
"is",
"None",
":",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Remote hash provided but local hash missing, redownloading.'",
")",
"redownload",
"=",
"True",
"elif",
"hash_local",
"==",
"hash_remote",
":",
"assert",
"hash_tag_local",
"==",
"hash_tag_remote",
",",
"(",
"'hash tag disagreement'",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Both hashes provided, but they disagree, redownloading.'",
")",
"redownload",
"=",
"True",
"# Download",
"util_path",
".",
"ensurepath",
"(",
"download_dir",
")",
"if",
"redownload",
"or",
"not",
"exists",
"(",
"fpath",
")",
":",
"# Download testdata",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Downloading file %s'",
"%",
"fpath",
")",
"if",
"delay",
"is",
"not",
"None",
":",
"print",
"(",
"'[utool] delay download by %r seconds'",
"%",
"(",
"delay",
",",
")",
")",
"time",
".",
"sleep",
"(",
"delay",
")",
"download_url",
"(",
"file_url",
",",
"fpath",
",",
"spoof",
"=",
"spoof",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Already have file %s'",
"%",
"fpath",
")",
"util_path",
".",
"assert_exists",
"(",
"fpath",
")",
"# Post-download local hash verification",
"if",
"check_hash",
":",
"# File has been successfuly downloaded, write remote hash to local hash file",
"hash_fpath",
"=",
"'%s.%s'",
"%",
"(",
"fpath",
",",
"hash_tag_remote",
",",
")",
"with",
"open",
"(",
"hash_fpath",
",",
"'w'",
")",
"as",
"hash_file",
":",
"hash_file",
".",
"write",
"(",
"hash_remote",
")",
"# For sanity check (custom) and file verification (hashing), get local hash again",
"hash_local",
",",
"hash_tag_local",
"=",
"get_file_local_hash",
"(",
"fpath",
",",
"hash_list",
",",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Post Local Hash: %r'",
"%",
"(",
"hash_local",
",",
")",
")",
"assert",
"hash_local",
"==",
"hash_remote",
",",
"'Post-download hash disagreement'",
"assert",
"hash_tag_local",
"==",
"hash_tag_remote",
",",
"'Post-download hash tag disagreement'",
"return",
"fpath"
] | 42.717742 | 0.001845 |
def add_my_api_key_to_groups(self, body, **kwargs): # noqa: E501
"""Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_my_api_key_to_groups(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501
return data | [
"def",
"add_my_api_key_to_groups",
"(",
"self",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"add_my_api_key_to_groups_with_http_info",
"(",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"add_my_api_key_to_groups_with_http_info",
"(",
"body",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | 58.904762 | 0.001591 |
def _is_trailing_comma(tokens, index):
"""Check if the given token is a trailing comma
:param tokens: Sequence of modules tokens
:type tokens: list[tokenize.TokenInfo]
:param int index: Index of token under check in tokens
:returns: True if the token is a comma which trails an expression
:rtype: bool
"""
token = tokens[index]
if token.exact_type != tokenize.COMMA:
return False
# Must have remaining tokens on the same line such as NEWLINE
left_tokens = itertools.islice(tokens, index + 1, None)
same_line_remaining_tokens = list(
itertools.takewhile(
lambda other_token, _token=token: other_token.start[0] == _token.start[0],
left_tokens,
)
)
# Note: If the newline is tokenize.NEWLINE and not tokenize.NL
# then the newline denotes the end of expression
is_last_element = all(
other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
for other_token in same_line_remaining_tokens
)
if not same_line_remaining_tokens or not is_last_element:
return False
def get_curline_index_start():
"""Get the index denoting the start of the current line"""
for subindex, token in enumerate(reversed(tokens[:index])):
# See Lib/tokenize.py and Lib/token.py in cpython for more info
if token.type in (tokenize.NEWLINE, tokenize.NL):
return index - subindex
return 0
curline_start = get_curline_index_start()
expected_tokens = {"return", "yield"}
for prevtoken in tokens[curline_start:index]:
if "=" in prevtoken.string or prevtoken.string in expected_tokens:
return True
return False | [
"def",
"_is_trailing_comma",
"(",
"tokens",
",",
"index",
")",
":",
"token",
"=",
"tokens",
"[",
"index",
"]",
"if",
"token",
".",
"exact_type",
"!=",
"tokenize",
".",
"COMMA",
":",
"return",
"False",
"# Must have remaining tokens on the same line such as NEWLINE",
"left_tokens",
"=",
"itertools",
".",
"islice",
"(",
"tokens",
",",
"index",
"+",
"1",
",",
"None",
")",
"same_line_remaining_tokens",
"=",
"list",
"(",
"itertools",
".",
"takewhile",
"(",
"lambda",
"other_token",
",",
"_token",
"=",
"token",
":",
"other_token",
".",
"start",
"[",
"0",
"]",
"==",
"_token",
".",
"start",
"[",
"0",
"]",
",",
"left_tokens",
",",
")",
")",
"# Note: If the newline is tokenize.NEWLINE and not tokenize.NL",
"# then the newline denotes the end of expression",
"is_last_element",
"=",
"all",
"(",
"other_token",
".",
"type",
"in",
"(",
"tokenize",
".",
"NEWLINE",
",",
"tokenize",
".",
"COMMENT",
")",
"for",
"other_token",
"in",
"same_line_remaining_tokens",
")",
"if",
"not",
"same_line_remaining_tokens",
"or",
"not",
"is_last_element",
":",
"return",
"False",
"def",
"get_curline_index_start",
"(",
")",
":",
"\"\"\"Get the index denoting the start of the current line\"\"\"",
"for",
"subindex",
",",
"token",
"in",
"enumerate",
"(",
"reversed",
"(",
"tokens",
"[",
":",
"index",
"]",
")",
")",
":",
"# See Lib/tokenize.py and Lib/token.py in cpython for more info",
"if",
"token",
".",
"type",
"in",
"(",
"tokenize",
".",
"NEWLINE",
",",
"tokenize",
".",
"NL",
")",
":",
"return",
"index",
"-",
"subindex",
"return",
"0",
"curline_start",
"=",
"get_curline_index_start",
"(",
")",
"expected_tokens",
"=",
"{",
"\"return\"",
",",
"\"yield\"",
"}",
"for",
"prevtoken",
"in",
"tokens",
"[",
"curline_start",
":",
"index",
"]",
":",
"if",
"\"=\"",
"in",
"prevtoken",
".",
"string",
"or",
"prevtoken",
".",
"string",
"in",
"expected_tokens",
":",
"return",
"True",
"return",
"False"
] | 39.069767 | 0.001161 |
def mt_deconvolve(data_a, data_b, delta, nfft=None, time_bandwidth=None,
number_of_tapers=None, weights="adaptive", demean=True,
fmax=0.0):
"""
Deconvolve two time series using multitapers.
This uses the eigencoefficients and the weights from the multitaper
spectral estimations and more or less follows this paper:
.. |br| raw:: html
<br />
**Receiver Functions from Multiple-Taper Spectral Correlation Estimates**
*Jeffrey Park, Vadim Levin* |br|
Bulletin of the Seismological Society of America Dec 2000,
90 (6) 1507-1520
http://dx.doi.org/10.1785/0119990122
:type data_a: :class:`numpy.ndarray`
:param data_a: Data for first time series.
:type data_b: :class:`numpy.ndarray`
:param data_b: Data for second time series.
:type delta: float
:param delta: Sample spacing of the data.
:type nfft: int
:param nfft: Number of points for the FFT. If ``nfft == None``, no zero
padding will be applied before the FFT.
:type time_bandwidth: float
:param time_bandwidth: Time-bandwidth product. Common values are 2, 3, 4,
and numbers in between.
:type number_of_tapers: int
:param number_of_tapers: Number of tapers to use. Defaults to
``int(2*time_bandwidth) - 1``. This is maximum senseful amount. More
tapers will have no great influence on the final spectrum but increase
the calculation time. Use fewer tapers for a faster calculation.
:type weights: str
:param weights: ``"adaptive"`` or ``"constant"`` weights.
:type deman: bool
:param demean: Force the complex TF to be demeaned.
:type fmax: float
:param fmax: Maximum frequency for lowpass cosine filter. Set this to
zero to not have a filter.
:return: Returns a dictionary with 5 :class:`numpy.ndarray`'s. See the note
below.
.. note::
Returns a dictionary with five arrays:
* ``"deconvolved"``: Deconvolved time series.
* ``"spectrum_a"``: Spectrum of the first time series.
* ``"spectrum_b"``: Spectrum of the second time series.
* ``"spectral_ratio"``: The ratio of both spectra.
* ``"frequencies"``: The used frequency bins for the spectra.
"""
npts = len(data_a)
if len(data_b) != npts:
raise ValueError("Input arrays must have the same length!")
if nfft is None:
nfft = npts
elif nfft < npts:
raise ValueError("nfft must be larger then the number of samples in "
"the array.")
# Deconvolution utilizes the 32bit version.
mt = _MtspecType("float32")
# Use the optimal number of tapers in case no number is specified.
if number_of_tapers is None:
number_of_tapers = int(2 * time_bandwidth) - 1
# Transform the data to work with the library.
data_a = np.require(data_a, mt.float, requirements=[mt.order])
data_b = np.require(data_b, mt.float, requirements=[mt.order])
nf = nfft // 2 + 1
# Internally uses integers
if demean:
demean = 1
else:
demean = 0
# iad = 0 are adaptive, iad = 1 are constant weight - this is
# counter intuitive.
if weights == "constant":
adaptive = 1
elif weights == "adaptive":
adaptive = 0
else:
raise ValueError('Weights must be either "adaptive" or "constant".')
tfun = mt.empty(nfft)
freq = mt.empty(nf)
spec_ratio = mt.empty(nf)
speci = mt.empty(nf)
specj = mt.empty(nf)
mtspeclib.mt_deconv_(
C.byref(C.c_int(int(npts))),
C.byref(C.c_int(int(nfft))),
C.byref(C.c_float(float(delta))),
mt.p(data_a),
mt.p(data_b),
C.byref(C.c_float(float(time_bandwidth))),
C.byref(C.c_int(int(number_of_tapers))),
C.byref(C.c_int(int(nf))),
C.byref(C.c_int(adaptive)),
mt.p(freq),
mt.p(tfun),
mt.p(spec_ratio),
mt.p(speci),
mt.p(specj),
C.byref(C.c_int(demean)),
C.byref(C.c_float(fmax)))
return {
"frequencies": freq,
"deconvolved": tfun,
"spectral_ratio": spec_ratio,
"spectrum_a": speci,
"spectrum_b": specj
} | [
"def",
"mt_deconvolve",
"(",
"data_a",
",",
"data_b",
",",
"delta",
",",
"nfft",
"=",
"None",
",",
"time_bandwidth",
"=",
"None",
",",
"number_of_tapers",
"=",
"None",
",",
"weights",
"=",
"\"adaptive\"",
",",
"demean",
"=",
"True",
",",
"fmax",
"=",
"0.0",
")",
":",
"npts",
"=",
"len",
"(",
"data_a",
")",
"if",
"len",
"(",
"data_b",
")",
"!=",
"npts",
":",
"raise",
"ValueError",
"(",
"\"Input arrays must have the same length!\"",
")",
"if",
"nfft",
"is",
"None",
":",
"nfft",
"=",
"npts",
"elif",
"nfft",
"<",
"npts",
":",
"raise",
"ValueError",
"(",
"\"nfft must be larger then the number of samples in \"",
"\"the array.\"",
")",
"# Deconvolution utilizes the 32bit version.",
"mt",
"=",
"_MtspecType",
"(",
"\"float32\"",
")",
"# Use the optimal number of tapers in case no number is specified.",
"if",
"number_of_tapers",
"is",
"None",
":",
"number_of_tapers",
"=",
"int",
"(",
"2",
"*",
"time_bandwidth",
")",
"-",
"1",
"# Transform the data to work with the library.",
"data_a",
"=",
"np",
".",
"require",
"(",
"data_a",
",",
"mt",
".",
"float",
",",
"requirements",
"=",
"[",
"mt",
".",
"order",
"]",
")",
"data_b",
"=",
"np",
".",
"require",
"(",
"data_b",
",",
"mt",
".",
"float",
",",
"requirements",
"=",
"[",
"mt",
".",
"order",
"]",
")",
"nf",
"=",
"nfft",
"//",
"2",
"+",
"1",
"# Internally uses integers",
"if",
"demean",
":",
"demean",
"=",
"1",
"else",
":",
"demean",
"=",
"0",
"# iad = 0 are adaptive, iad = 1 are constant weight - this is",
"# counter intuitive.",
"if",
"weights",
"==",
"\"constant\"",
":",
"adaptive",
"=",
"1",
"elif",
"weights",
"==",
"\"adaptive\"",
":",
"adaptive",
"=",
"0",
"else",
":",
"raise",
"ValueError",
"(",
"'Weights must be either \"adaptive\" or \"constant\".'",
")",
"tfun",
"=",
"mt",
".",
"empty",
"(",
"nfft",
")",
"freq",
"=",
"mt",
".",
"empty",
"(",
"nf",
")",
"spec_ratio",
"=",
"mt",
".",
"empty",
"(",
"nf",
")",
"speci",
"=",
"mt",
".",
"empty",
"(",
"nf",
")",
"specj",
"=",
"mt",
".",
"empty",
"(",
"nf",
")",
"mtspeclib",
".",
"mt_deconv_",
"(",
"C",
".",
"byref",
"(",
"C",
".",
"c_int",
"(",
"int",
"(",
"npts",
")",
")",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_int",
"(",
"int",
"(",
"nfft",
")",
")",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_float",
"(",
"float",
"(",
"delta",
")",
")",
")",
",",
"mt",
".",
"p",
"(",
"data_a",
")",
",",
"mt",
".",
"p",
"(",
"data_b",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_float",
"(",
"float",
"(",
"time_bandwidth",
")",
")",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_int",
"(",
"int",
"(",
"number_of_tapers",
")",
")",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_int",
"(",
"int",
"(",
"nf",
")",
")",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_int",
"(",
"adaptive",
")",
")",
",",
"mt",
".",
"p",
"(",
"freq",
")",
",",
"mt",
".",
"p",
"(",
"tfun",
")",
",",
"mt",
".",
"p",
"(",
"spec_ratio",
")",
",",
"mt",
".",
"p",
"(",
"speci",
")",
",",
"mt",
".",
"p",
"(",
"specj",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_int",
"(",
"demean",
")",
")",
",",
"C",
".",
"byref",
"(",
"C",
".",
"c_float",
"(",
"fmax",
")",
")",
")",
"return",
"{",
"\"frequencies\"",
":",
"freq",
",",
"\"deconvolved\"",
":",
"tfun",
",",
"\"spectral_ratio\"",
":",
"spec_ratio",
",",
"\"spectrum_a\"",
":",
"speci",
",",
"\"spectrum_b\"",
":",
"specj",
"}"
] | 32.472441 | 0.000235 |
def from_clause(cls, clause):
""" Factory method """
[_, field, operator, val] = clause
return cls(field, operator, resolve(val)) | [
"def",
"from_clause",
"(",
"cls",
",",
"clause",
")",
":",
"[",
"_",
",",
"field",
",",
"operator",
",",
"val",
"]",
"=",
"clause",
"return",
"cls",
"(",
"field",
",",
"operator",
",",
"resolve",
"(",
"val",
")",
")"
] | 37.5 | 0.013072 |
def pupatizeElements(self) :
"""Transform all raba object into pupas"""
for i in range(len(self)) :
self[i] = self[i].pupa() | [
"def",
"pupatizeElements",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
")",
":",
"self",
"[",
"i",
"]",
"=",
"self",
"[",
"i",
"]",
".",
"pupa",
"(",
")"
] | 32 | 0.053435 |
def _joinclass(codtuple):
"""
The opposite of splitclass(). Joins a (service, major, minor) class-of-
device tuple into a whole class of device value.
"""
if not isinstance(codtuple, tuple):
raise TypeError("argument must be tuple, was %s" % type(codtuple))
if len(codtuple) != 3:
raise ValueError("tuple must have 3 items, has %d" % len(codtuple))
serviceclass = codtuple[0] << 2 << 11
majorclass = codtuple[1] << 2 << 6
minorclass = codtuple[2] << 2
return (serviceclass | majorclass | minorclass) | [
"def",
"_joinclass",
"(",
"codtuple",
")",
":",
"if",
"not",
"isinstance",
"(",
"codtuple",
",",
"tuple",
")",
":",
"raise",
"TypeError",
"(",
"\"argument must be tuple, was %s\"",
"%",
"type",
"(",
"codtuple",
")",
")",
"if",
"len",
"(",
"codtuple",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"tuple must have 3 items, has %d\"",
"%",
"len",
"(",
"codtuple",
")",
")",
"serviceclass",
"=",
"codtuple",
"[",
"0",
"]",
"<<",
"2",
"<<",
"11",
"majorclass",
"=",
"codtuple",
"[",
"1",
"]",
"<<",
"2",
"<<",
"6",
"minorclass",
"=",
"codtuple",
"[",
"2",
"]",
"<<",
"2",
"return",
"(",
"serviceclass",
"|",
"majorclass",
"|",
"minorclass",
")"
] | 38.785714 | 0.001799 |
def generate_sitemap(sitemap: typing.Mapping, prefix: list=None):
"""Create a sitemap template from the given sitemap.
The `sitemap` should be a mapping where the key is a string which
represents a single URI segment, and the value is either another mapping
or a callable (e.g. function) object.
Args:
sitemap: The definition of the routes and their views
prefix: The base url segment which gets prepended to the given map.
Examples:
The sitemap should follow the following format:
>>> {
>>> 'string_literal': {
>>> '': func1,
>>> '{arg}': func2,
>>> },
>>> }
The key points here are thus:
- Any string key not matched by the following rule will be matched
literally
- Any string key surrounded by curly brackets matches a url segment
which represents a parameter whose name is the enclosed string
(i.e. should be a valid keyword argument)
- *note* a side effect of this is that an empty string key will
match all routes leading up to the current given mapping
The above sitemap would compile to the following url mappings:
- /string_literal/ -> calls `func1()`
- /string_literal/{arg}/ -> calls `func2(arg=<the matched value>)`
"""
# Ensures all generated urls are prefixed with a the prefix string
if prefix is None:
prefix = []
for segment, sub_segment in sitemap.items():
if isinstance(sub_segment, collections.abc.Mapping):
yield from generate_sitemap(sub_segment, prefix + [segment])
elif isinstance(sub_segment, collections.abc.Callable):
if segment:
prefix = prefix + [segment]
yield (prefix, sub_segment)
else:
raise ValueError('Invalid datatype for sitemap') | [
"def",
"generate_sitemap",
"(",
"sitemap",
":",
"typing",
".",
"Mapping",
",",
"prefix",
":",
"list",
"=",
"None",
")",
":",
"# Ensures all generated urls are prefixed with a the prefix string",
"if",
"prefix",
"is",
"None",
":",
"prefix",
"=",
"[",
"]",
"for",
"segment",
",",
"sub_segment",
"in",
"sitemap",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"sub_segment",
",",
"collections",
".",
"abc",
".",
"Mapping",
")",
":",
"yield",
"from",
"generate_sitemap",
"(",
"sub_segment",
",",
"prefix",
"+",
"[",
"segment",
"]",
")",
"elif",
"isinstance",
"(",
"sub_segment",
",",
"collections",
".",
"abc",
".",
"Callable",
")",
":",
"if",
"segment",
":",
"prefix",
"=",
"prefix",
"+",
"[",
"segment",
"]",
"yield",
"(",
"prefix",
",",
"sub_segment",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid datatype for sitemap'",
")"
] | 41.152174 | 0.001548 |
def html_to_plain_text(html):
"""Converts html code into formatted plain text."""
# Use BeautifulSoup to normalize the html
soup = BeautifulSoup(html, "html.parser")
# Init the parser
parser = HTML2PlainParser()
parser.feed(str(soup.encode('utf-8')))
# Strip the end of the plain text
result = parser.text.rstrip()
# Add footnotes
if parser.links:
result += '\n\n'
for link in parser.links:
result += '[{}]: {}\n'.format(link[0], link[1])
return result | [
"def",
"html_to_plain_text",
"(",
"html",
")",
":",
"# Use BeautifulSoup to normalize the html",
"soup",
"=",
"BeautifulSoup",
"(",
"html",
",",
"\"html.parser\"",
")",
"# Init the parser",
"parser",
"=",
"HTML2PlainParser",
"(",
")",
"parser",
".",
"feed",
"(",
"str",
"(",
"soup",
".",
"encode",
"(",
"'utf-8'",
")",
")",
")",
"# Strip the end of the plain text",
"result",
"=",
"parser",
".",
"text",
".",
"rstrip",
"(",
")",
"# Add footnotes",
"if",
"parser",
".",
"links",
":",
"result",
"+=",
"'\\n\\n'",
"for",
"link",
"in",
"parser",
".",
"links",
":",
"result",
"+=",
"'[{}]: {}\\n'",
".",
"format",
"(",
"link",
"[",
"0",
"]",
",",
"link",
"[",
"1",
"]",
")",
"return",
"result"
] | 34 | 0.001908 |
def post_report(coverage, args):
"""Post coverage report to coveralls.io."""
response = requests.post(URL, files={'json_file': json.dumps(coverage)},
verify=(not args.skip_ssl_verify))
try:
result = response.json()
except ValueError:
result = {'error': 'Failure to submit data. '
'Response [%(status)s]: %(text)s' % {
'status': response.status_code,
'text': response.text}}
print(result)
if 'error' in result:
return result['error']
return 0 | [
"def",
"post_report",
"(",
"coverage",
",",
"args",
")",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"URL",
",",
"files",
"=",
"{",
"'json_file'",
":",
"json",
".",
"dumps",
"(",
"coverage",
")",
"}",
",",
"verify",
"=",
"(",
"not",
"args",
".",
"skip_ssl_verify",
")",
")",
"try",
":",
"result",
"=",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"result",
"=",
"{",
"'error'",
":",
"'Failure to submit data. '",
"'Response [%(status)s]: %(text)s'",
"%",
"{",
"'status'",
":",
"response",
".",
"status_code",
",",
"'text'",
":",
"response",
".",
"text",
"}",
"}",
"print",
"(",
"result",
")",
"if",
"'error'",
"in",
"result",
":",
"return",
"result",
"[",
"'error'",
"]",
"return",
"0"
] | 38 | 0.001712 |
def assemble_caption(begin_line, begin_index, end_line, end_index, lines):
"""
Take the caption of a picture and put it all together
in a nice way. If it spans multiple lines, put it on one line. If it
contains controlled characters, strip them out. If it has tags we don't
want to worry about, get rid of them, etc.
:param: begin_line (int): the index of the line where the caption begins
:param: begin_index (int): the index within the line where the caption
begins
:param: end_line (int): the index of the line where the caption ends
:param: end_index (int): the index within the line where the caption ends
:param: lines ([string, string, ...]): the line strings of the text
:return: caption (string): the caption, formatted and pieced together
"""
# stuff we don't like
label_head = '\\label{'
# reassemble that sucker
if end_line > begin_line:
# our caption spanned multiple lines
caption = lines[begin_line][begin_index:]
for included_line_index in range(begin_line + 1, end_line):
caption = caption + ' ' + lines[included_line_index]
caption = caption + ' ' + lines[end_line][:end_index]
caption = caption.replace('\n', ' ')
caption = caption.replace(' ', ' ')
else:
# it fit on one line
caption = lines[begin_line][begin_index:end_index]
# clean out a label tag, if there is one
label_begin = caption.find(label_head)
if label_begin > -1:
# we know that our caption is only one line, so if there's a label
# tag in it, it will be all on one line. so we make up some args
dummy_start, dummy_start_line, label_end, dummy_end = \
find_open_and_close_braces(0, label_begin, '{', [caption])
caption = caption[:label_begin] + caption[label_end + 1:]
caption = caption.strip()
if len(caption) > 1 and caption[0] == '{' and caption[-1] == '}':
caption = caption[1:-1]
return caption | [
"def",
"assemble_caption",
"(",
"begin_line",
",",
"begin_index",
",",
"end_line",
",",
"end_index",
",",
"lines",
")",
":",
"# stuff we don't like",
"label_head",
"=",
"'\\\\label{'",
"# reassemble that sucker",
"if",
"end_line",
">",
"begin_line",
":",
"# our caption spanned multiple lines",
"caption",
"=",
"lines",
"[",
"begin_line",
"]",
"[",
"begin_index",
":",
"]",
"for",
"included_line_index",
"in",
"range",
"(",
"begin_line",
"+",
"1",
",",
"end_line",
")",
":",
"caption",
"=",
"caption",
"+",
"' '",
"+",
"lines",
"[",
"included_line_index",
"]",
"caption",
"=",
"caption",
"+",
"' '",
"+",
"lines",
"[",
"end_line",
"]",
"[",
":",
"end_index",
"]",
"caption",
"=",
"caption",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
"caption",
"=",
"caption",
".",
"replace",
"(",
"' '",
",",
"' '",
")",
"else",
":",
"# it fit on one line",
"caption",
"=",
"lines",
"[",
"begin_line",
"]",
"[",
"begin_index",
":",
"end_index",
"]",
"# clean out a label tag, if there is one",
"label_begin",
"=",
"caption",
".",
"find",
"(",
"label_head",
")",
"if",
"label_begin",
">",
"-",
"1",
":",
"# we know that our caption is only one line, so if there's a label",
"# tag in it, it will be all on one line. so we make up some args",
"dummy_start",
",",
"dummy_start_line",
",",
"label_end",
",",
"dummy_end",
"=",
"find_open_and_close_braces",
"(",
"0",
",",
"label_begin",
",",
"'{'",
",",
"[",
"caption",
"]",
")",
"caption",
"=",
"caption",
"[",
":",
"label_begin",
"]",
"+",
"caption",
"[",
"label_end",
"+",
"1",
":",
"]",
"caption",
"=",
"caption",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"caption",
")",
">",
"1",
"and",
"caption",
"[",
"0",
"]",
"==",
"'{'",
"and",
"caption",
"[",
"-",
"1",
"]",
"==",
"'}'",
":",
"caption",
"=",
"caption",
"[",
"1",
":",
"-",
"1",
"]",
"return",
"caption"
] | 39.62 | 0.000493 |
def path(self):
"""str: URL path for the model's APIs."""
return "/projects/%s/datasets/%s/models/%s" % (
self._proto.project_id,
self._proto.dataset_id,
self._proto.model_id,
) | [
"def",
"path",
"(",
"self",
")",
":",
"return",
"\"/projects/%s/datasets/%s/models/%s\"",
"%",
"(",
"self",
".",
"_proto",
".",
"project_id",
",",
"self",
".",
"_proto",
".",
"dataset_id",
",",
"self",
".",
"_proto",
".",
"model_id",
",",
")"
] | 33 | 0.008439 |
def Overlay_highlightQuad(self, quad, **kwargs):
"""
Function path: Overlay.highlightQuad
Domain: Overlay
Method name: highlightQuad
Parameters:
Required arguments:
'quad' (type: DOM.Quad) -> Quad to highlight
Optional arguments:
'color' (type: DOM.RGBA) -> The highlight fill color (default: transparent).
'outlineColor' (type: DOM.RGBA) -> The highlight outline color (default: transparent).
No return value.
Description: Highlights given quad. Coordinates are absolute with respect to the main frame viewport.
"""
expected = ['color', 'outlineColor']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['color', 'outlineColor']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Overlay.highlightQuad', quad=
quad, **kwargs)
return subdom_funcs | [
"def",
"Overlay_highlightQuad",
"(",
"self",
",",
"quad",
",",
"*",
"*",
"kwargs",
")",
":",
"expected",
"=",
"[",
"'color'",
",",
"'outlineColor'",
"]",
"passed_keys",
"=",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"assert",
"all",
"(",
"[",
"(",
"key",
"in",
"expected",
")",
"for",
"key",
"in",
"passed_keys",
"]",
")",
",",
"\"Allowed kwargs are ['color', 'outlineColor']. Passed kwargs: %s\"",
"%",
"passed_keys",
"subdom_funcs",
"=",
"self",
".",
"synchronous_command",
"(",
"'Overlay.highlightQuad'",
",",
"quad",
"=",
"quad",
",",
"*",
"*",
"kwargs",
")",
"return",
"subdom_funcs"
] | 38.304348 | 0.03876 |
def load_stl_ascii(file_obj):
"""
Load an ASCII STL file from a file object.
Parameters
----------
file_obj: open file- like object
Returns
----------
loaded: kwargs for a Trimesh constructor with keys:
vertices: (n,3) float, vertices
faces: (m,3) int, indexes of vertices
face_normals: (m,3) float, normal vector of each face
"""
# the first line is the header
header = file_obj.readline()
# make sure header is a string, not bytes
if hasattr(header, 'decode'):
try:
header = header.decode('utf-8')
except BaseException:
header = ''
# save header to metadata
metadata = {'header': header}
# read all text into one string
text = file_obj.read()
# convert bytes to string
if hasattr(text, 'decode'):
text = text.decode('utf-8')
# split by endsolid keyword
text = text.lower().split('endsolid')[0]
# create array of splits
blob = np.array(text.strip().split())
# there are 21 'words' in each face
face_len = 21
# length of blob should be multiple of face_len
if (len(blob) % face_len) != 0:
raise HeaderError('Incorrect length STL file!')
face_count = int(len(blob) / face_len)
# this offset is to be added to a fixed set of tiled indices
offset = face_len * np.arange(face_count).reshape((-1, 1))
normal_index = np.tile([2, 3, 4], (face_count, 1)) + offset
vertex_index = np.tile([8, 9, 10,
12, 13, 14,
16, 17, 18], (face_count, 1)) + offset
# faces are groups of three sequential vertices
faces = np.arange(face_count * 3).reshape((-1, 3))
face_normals = blob[normal_index].astype('<f8')
vertices = blob[vertex_index.reshape((-1, 3))].astype('<f8')
return {'vertices': vertices,
'faces': faces,
'metadata': metadata,
'face_normals': face_normals} | [
"def",
"load_stl_ascii",
"(",
"file_obj",
")",
":",
"# the first line is the header",
"header",
"=",
"file_obj",
".",
"readline",
"(",
")",
"# make sure header is a string, not bytes",
"if",
"hasattr",
"(",
"header",
",",
"'decode'",
")",
":",
"try",
":",
"header",
"=",
"header",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"BaseException",
":",
"header",
"=",
"''",
"# save header to metadata",
"metadata",
"=",
"{",
"'header'",
":",
"header",
"}",
"# read all text into one string",
"text",
"=",
"file_obj",
".",
"read",
"(",
")",
"# convert bytes to string",
"if",
"hasattr",
"(",
"text",
",",
"'decode'",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"'utf-8'",
")",
"# split by endsolid keyword",
"text",
"=",
"text",
".",
"lower",
"(",
")",
".",
"split",
"(",
"'endsolid'",
")",
"[",
"0",
"]",
"# create array of splits",
"blob",
"=",
"np",
".",
"array",
"(",
"text",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
")",
"# there are 21 'words' in each face",
"face_len",
"=",
"21",
"# length of blob should be multiple of face_len",
"if",
"(",
"len",
"(",
"blob",
")",
"%",
"face_len",
")",
"!=",
"0",
":",
"raise",
"HeaderError",
"(",
"'Incorrect length STL file!'",
")",
"face_count",
"=",
"int",
"(",
"len",
"(",
"blob",
")",
"/",
"face_len",
")",
"# this offset is to be added to a fixed set of tiled indices",
"offset",
"=",
"face_len",
"*",
"np",
".",
"arange",
"(",
"face_count",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"normal_index",
"=",
"np",
".",
"tile",
"(",
"[",
"2",
",",
"3",
",",
"4",
"]",
",",
"(",
"face_count",
",",
"1",
")",
")",
"+",
"offset",
"vertex_index",
"=",
"np",
".",
"tile",
"(",
"[",
"8",
",",
"9",
",",
"10",
",",
"12",
",",
"13",
",",
"14",
",",
"16",
",",
"17",
",",
"18",
"]",
",",
"(",
"face_count",
",",
"1",
")",
")",
"+",
"offset",
"# faces are groups of three sequential vertices",
"faces",
"=",
"np",
".",
"arange",
"(",
"face_count",
"*",
"3",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"3",
")",
")",
"face_normals",
"=",
"blob",
"[",
"normal_index",
"]",
".",
"astype",
"(",
"'<f8'",
")",
"vertices",
"=",
"blob",
"[",
"vertex_index",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"3",
")",
")",
"]",
".",
"astype",
"(",
"'<f8'",
")",
"return",
"{",
"'vertices'",
":",
"vertices",
",",
"'faces'",
":",
"faces",
",",
"'metadata'",
":",
"metadata",
",",
"'face_normals'",
":",
"face_normals",
"}"
] | 31.868852 | 0.000499 |
def set_tags(self, tags):
"""Sets this object's tags to only those tags.
* tags: a sequence of tag names or Tag objects.
"""
c_old_tags = []
old_tags = []
c_new_tags = []
new_tags = []
to_remove = []
to_add = []
tags_on_server = self.get_tags()
for tag in tags_on_server:
c_old_tags.append(tag.get_name().lower())
old_tags.append(tag.get_name())
for tag in tags:
c_new_tags.append(tag.lower())
new_tags.append(tag)
for i in range(0, len(old_tags)):
if not c_old_tags[i] in c_new_tags:
to_remove.append(old_tags[i])
for i in range(0, len(new_tags)):
if not c_new_tags[i] in c_old_tags:
to_add.append(new_tags[i])
self.remove_tags(to_remove)
self.add_tags(to_add) | [
"def",
"set_tags",
"(",
"self",
",",
"tags",
")",
":",
"c_old_tags",
"=",
"[",
"]",
"old_tags",
"=",
"[",
"]",
"c_new_tags",
"=",
"[",
"]",
"new_tags",
"=",
"[",
"]",
"to_remove",
"=",
"[",
"]",
"to_add",
"=",
"[",
"]",
"tags_on_server",
"=",
"self",
".",
"get_tags",
"(",
")",
"for",
"tag",
"in",
"tags_on_server",
":",
"c_old_tags",
".",
"append",
"(",
"tag",
".",
"get_name",
"(",
")",
".",
"lower",
"(",
")",
")",
"old_tags",
".",
"append",
"(",
"tag",
".",
"get_name",
"(",
")",
")",
"for",
"tag",
"in",
"tags",
":",
"c_new_tags",
".",
"append",
"(",
"tag",
".",
"lower",
"(",
")",
")",
"new_tags",
".",
"append",
"(",
"tag",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"old_tags",
")",
")",
":",
"if",
"not",
"c_old_tags",
"[",
"i",
"]",
"in",
"c_new_tags",
":",
"to_remove",
".",
"append",
"(",
"old_tags",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"new_tags",
")",
")",
":",
"if",
"not",
"c_new_tags",
"[",
"i",
"]",
"in",
"c_old_tags",
":",
"to_add",
".",
"append",
"(",
"new_tags",
"[",
"i",
"]",
")",
"self",
".",
"remove_tags",
"(",
"to_remove",
")",
"self",
".",
"add_tags",
"(",
"to_add",
")"
] | 26.333333 | 0.00222 |
def expr_items(expr):
"""
Returns a set() of all items (symbols and choices) that appear in the
expression 'expr'.
"""
res = set()
def rec(subexpr):
if subexpr.__class__ is tuple:
# AND, OR, NOT, or relation
rec(subexpr[1])
# NOTs only have a single operand
if subexpr[0] is not NOT:
rec(subexpr[2])
else:
# Symbol or choice
res.add(subexpr)
rec(expr)
return res | [
"def",
"expr_items",
"(",
"expr",
")",
":",
"res",
"=",
"set",
"(",
")",
"def",
"rec",
"(",
"subexpr",
")",
":",
"if",
"subexpr",
".",
"__class__",
"is",
"tuple",
":",
"# AND, OR, NOT, or relation",
"rec",
"(",
"subexpr",
"[",
"1",
"]",
")",
"# NOTs only have a single operand",
"if",
"subexpr",
"[",
"0",
"]",
"is",
"not",
"NOT",
":",
"rec",
"(",
"subexpr",
"[",
"2",
"]",
")",
"else",
":",
"# Symbol or choice",
"res",
".",
"add",
"(",
"subexpr",
")",
"rec",
"(",
"expr",
")",
"return",
"res"
] | 20.041667 | 0.001984 |
def count_characters(root, out):
"""Count the occurrances of the different characters in the files"""
if os.path.isfile(root):
with open(root, 'rb') as in_f:
for line in in_f:
for char in line:
if char not in out:
out[char] = 0
out[char] = out[char] + 1
elif os.path.isdir(root):
for filename in os.listdir(root):
count_characters(os.path.join(root, filename), out) | [
"def",
"count_characters",
"(",
"root",
",",
"out",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"root",
")",
":",
"with",
"open",
"(",
"root",
",",
"'rb'",
")",
"as",
"in_f",
":",
"for",
"line",
"in",
"in_f",
":",
"for",
"char",
"in",
"line",
":",
"if",
"char",
"not",
"in",
"out",
":",
"out",
"[",
"char",
"]",
"=",
"0",
"out",
"[",
"char",
"]",
"=",
"out",
"[",
"char",
"]",
"+",
"1",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"root",
")",
":",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"root",
")",
":",
"count_characters",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
",",
"out",
")"
] | 40.5 | 0.002012 |
def readArray(self):
"""
Reads an array from the stream.
@warning: There is a very specific problem with AMF3 where the first
three bytes of an encoded empty C{dict} will mirror that of an encoded
C{{'': 1, '2': 2}}
"""
size = self.readInteger(False)
if size & REFERENCE_BIT == 0:
return self.context.getObject(size >> 1)
size >>= 1
key = self.readBytes()
if key == '':
# integer indexes only -> python list
result = []
self.context.addObject(result)
for i in xrange(size):
result.append(self.readElement())
return result
result = pyamf.MixedArray()
self.context.addObject(result)
while key:
result[key] = self.readElement()
key = self.readBytes()
for i in xrange(size):
el = self.readElement()
result[i] = el
return result | [
"def",
"readArray",
"(",
"self",
")",
":",
"size",
"=",
"self",
".",
"readInteger",
"(",
"False",
")",
"if",
"size",
"&",
"REFERENCE_BIT",
"==",
"0",
":",
"return",
"self",
".",
"context",
".",
"getObject",
"(",
"size",
">>",
"1",
")",
"size",
">>=",
"1",
"key",
"=",
"self",
".",
"readBytes",
"(",
")",
"if",
"key",
"==",
"''",
":",
"# integer indexes only -> python list",
"result",
"=",
"[",
"]",
"self",
".",
"context",
".",
"addObject",
"(",
"result",
")",
"for",
"i",
"in",
"xrange",
"(",
"size",
")",
":",
"result",
".",
"append",
"(",
"self",
".",
"readElement",
"(",
")",
")",
"return",
"result",
"result",
"=",
"pyamf",
".",
"MixedArray",
"(",
")",
"self",
".",
"context",
".",
"addObject",
"(",
"result",
")",
"while",
"key",
":",
"result",
"[",
"key",
"]",
"=",
"self",
".",
"readElement",
"(",
")",
"key",
"=",
"self",
".",
"readBytes",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"size",
")",
":",
"el",
"=",
"self",
".",
"readElement",
"(",
")",
"result",
"[",
"i",
"]",
"=",
"el",
"return",
"result"
] | 24.615385 | 0.002004 |
def get_host(environ):
# type: (Dict[str, str]) -> str
"""Return the host for the given WSGI environment. Yanked from Werkzeug."""
if environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv | [
"def",
"get_host",
"(",
"environ",
")",
":",
"# type: (Dict[str, str]) -> str",
"if",
"environ",
".",
"get",
"(",
"\"HTTP_HOST\"",
")",
":",
"rv",
"=",
"environ",
"[",
"\"HTTP_HOST\"",
"]",
"if",
"environ",
"[",
"\"wsgi.url_scheme\"",
"]",
"==",
"\"http\"",
"and",
"rv",
".",
"endswith",
"(",
"\":80\"",
")",
":",
"rv",
"=",
"rv",
"[",
":",
"-",
"3",
"]",
"elif",
"environ",
"[",
"\"wsgi.url_scheme\"",
"]",
"==",
"\"https\"",
"and",
"rv",
".",
"endswith",
"(",
"\":443\"",
")",
":",
"rv",
"=",
"rv",
"[",
":",
"-",
"4",
"]",
"elif",
"environ",
".",
"get",
"(",
"\"SERVER_NAME\"",
")",
":",
"rv",
"=",
"environ",
"[",
"\"SERVER_NAME\"",
"]",
"if",
"(",
"environ",
"[",
"\"wsgi.url_scheme\"",
"]",
",",
"environ",
"[",
"\"SERVER_PORT\"",
"]",
")",
"not",
"in",
"(",
"(",
"\"https\"",
",",
"\"443\"",
")",
",",
"(",
"\"http\"",
",",
"\"80\"",
")",
",",
")",
":",
"rv",
"+=",
"\":\"",
"+",
"environ",
"[",
"\"SERVER_PORT\"",
"]",
"else",
":",
"# In spite of the WSGI spec, SERVER_NAME might not be present.",
"rv",
"=",
"\"unknown\"",
"return",
"rv"
] | 36.380952 | 0.001276 |
def polfit_residuals_with_sigma_rejection(
x, y, deg, times_sigma_reject,
color='b', size=75,
xlim=None, ylim=None,
xlabel=None, ylabel=None, title=None,
use_r=None,
geometry=(0,0,640,480),
debugplot=0):
"""Polynomial fit with iterative rejection of points.
This function makes use of function polfit_residuals for display
purposes.
Parameters
----------
x : 1d numpy array, float
X coordinates of the data being fitted.
y : 1d numpy array, float
Y coordinates of the data being fitted.
deg : int
Degree of the fitting polynomial.
times_sigma_reject : float or None
Number of times the standard deviation to reject points
iteratively. If None, the fit does not reject any point.
color : single character or 1d numpy array of characters
Color for all the symbols (single character) or for each
individual symbol (array of color names with the same length as
'x' or 'y'). If 'color' is a single character, the rejected
points are displayed in red color, whereas when 'color' is an
array of color names, rejected points are displayed with the
color provided in this array.
size : int
Marker size for all the symbols (single character) or for each
individual symbol (array of integers with the same length as
'x' or 'y').
xlim : tuple (floats)
Plot limits in the X axis.
ylim : tuple (floats)
Plot limits in the Y axis.
xlabel : string
Character string for label in X axis.
ylabel : string
Character string for label in y axis.
title : string
Character string for graph title.
use_r : bool
If True, the function computes several fits, using R, to
polynomials of degree deg, deg+1 and deg+2 (when possible).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Return
------
poly : instance of Polynomial (numpy)
Result from the polynomial fit using numpy Polynomial. Only
points not flagged as rejected are employed in the fit.
yres : 1d numpy array, float
Residuals from polynomial fit. Note that the residuals are
computed for all the points, including the rejected ones. In
this way the dimension of this array is the same as the
dimensions of the input 'x' and 'y' arrays.
reject : 1d numpy array, bool
Boolean array indicating rejected points.
"""
# protections
if type(x) is not np.ndarray:
raise ValueError("x=" + str(x) + " must be a numpy.ndarray")
elif x.ndim != 1:
raise ValueError("x.ndim=" + str(x.ndim) + " must be 1")
if type(y) is not np.ndarray:
raise ValueError("y=" + str(y) + " must be a numpy.ndarray")
elif y.ndim != 1:
raise ValueError("y.ndim=" + str(y.ndim) + " must be 1")
npoints = x.size
if npoints != y.size:
raise ValueError("x.size != y.size")
if type(deg) not in [np.int, np.int64]:
raise ValueError("deg=" + str(deg) +
" is not a valid integer")
if deg >= npoints:
raise ValueError("Polynomial degree=" + str(deg) +
" can't be fitted with npoints=" + str(npoints))
# initialize boolean rejection array
reject = np.zeros(npoints, dtype=np.bool)
# if there is no room to remove any point, compute a fit without
# rejection
if deg == npoints - 1:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=None,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
return poly, yres, reject
# main loop to reject points iteratively
loop_to_reject_points = True
poly = None
yres = None
while loop_to_reject_points:
if abs(debugplot) in [21, 22]:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
else:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject)
# check that there is room to remove a point with the current
# polynomial degree
npoints_effective = npoints - np.sum(reject)
if deg < npoints_effective - 1:
# determine robuts standard deviation, excluding points
# already rejected
# --- method 1 ---
# yres_fitted = yres[np.logical_not(reject)]
# q25, q75 = np.percentile(yres_fitted, q=[25.0, 75.0])
# rms = 0.7413 * (q75 - q25)
# --- method 2 ---
yres_fitted = np.abs(yres[np.logical_not(reject)])
rms = np.median(yres_fitted)
if abs(debugplot) >= 10:
print("--> robust rms:", rms)
# reject fitted point exceeding the threshold with the
# largest deviation (note: with this method only one point
# is removed in each iteration of the loop; this allows the
# recomputation of the polynomial fit which, sometimes,
# transforms deviant points into good ones)
index_to_remove = []
for i in range(npoints):
if not reject[i]:
if np.abs(yres[i]) > times_sigma_reject * rms:
index_to_remove.append(i)
if abs(debugplot) >= 10:
print('--> suspicious point #', i + 1)
if len(index_to_remove) == 0:
if abs(debugplot) >= 10:
print('==> no need to remove any point')
loop_to_reject_points = False
else:
imax = np.argmax(np.abs(yres[index_to_remove]))
reject[index_to_remove[imax]] = True
if abs(debugplot) >= 10:
print('==> removing point #', index_to_remove[imax] + 1)
else:
loop_to_reject_points = False
# plot final fit in case it has not been already shown
if abs(debugplot) % 10 != 0:
if abs(debugplot) not in [21, 22]:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
else:
if abs(debugplot) >= 10:
print(' ')
# return result
return poly, yres, reject | [
"def",
"polfit_residuals_with_sigma_rejection",
"(",
"x",
",",
"y",
",",
"deg",
",",
"times_sigma_reject",
",",
"color",
"=",
"'b'",
",",
"size",
"=",
"75",
",",
"xlim",
"=",
"None",
",",
"ylim",
"=",
"None",
",",
"xlabel",
"=",
"None",
",",
"ylabel",
"=",
"None",
",",
"title",
"=",
"None",
",",
"use_r",
"=",
"None",
",",
"geometry",
"=",
"(",
"0",
",",
"0",
",",
"640",
",",
"480",
")",
",",
"debugplot",
"=",
"0",
")",
":",
"# protections",
"if",
"type",
"(",
"x",
")",
"is",
"not",
"np",
".",
"ndarray",
":",
"raise",
"ValueError",
"(",
"\"x=\"",
"+",
"str",
"(",
"x",
")",
"+",
"\" must be a numpy.ndarray\"",
")",
"elif",
"x",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"x.ndim=\"",
"+",
"str",
"(",
"x",
".",
"ndim",
")",
"+",
"\" must be 1\"",
")",
"if",
"type",
"(",
"y",
")",
"is",
"not",
"np",
".",
"ndarray",
":",
"raise",
"ValueError",
"(",
"\"y=\"",
"+",
"str",
"(",
"y",
")",
"+",
"\" must be a numpy.ndarray\"",
")",
"elif",
"y",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"y.ndim=\"",
"+",
"str",
"(",
"y",
".",
"ndim",
")",
"+",
"\" must be 1\"",
")",
"npoints",
"=",
"x",
".",
"size",
"if",
"npoints",
"!=",
"y",
".",
"size",
":",
"raise",
"ValueError",
"(",
"\"x.size != y.size\"",
")",
"if",
"type",
"(",
"deg",
")",
"not",
"in",
"[",
"np",
".",
"int",
",",
"np",
".",
"int64",
"]",
":",
"raise",
"ValueError",
"(",
"\"deg=\"",
"+",
"str",
"(",
"deg",
")",
"+",
"\" is not a valid integer\"",
")",
"if",
"deg",
">=",
"npoints",
":",
"raise",
"ValueError",
"(",
"\"Polynomial degree=\"",
"+",
"str",
"(",
"deg",
")",
"+",
"\" can't be fitted with npoints=\"",
"+",
"str",
"(",
"npoints",
")",
")",
"# initialize boolean rejection array",
"reject",
"=",
"np",
".",
"zeros",
"(",
"npoints",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"# if there is no room to remove any point, compute a fit without",
"# rejection",
"if",
"deg",
"==",
"npoints",
"-",
"1",
":",
"poly",
",",
"yres",
"=",
"polfit_residuals",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"deg",
"=",
"deg",
",",
"reject",
"=",
"None",
",",
"color",
"=",
"color",
",",
"size",
"=",
"size",
",",
"xlim",
"=",
"xlim",
",",
"ylim",
"=",
"ylim",
",",
"xlabel",
"=",
"xlabel",
",",
"ylabel",
"=",
"ylabel",
",",
"title",
"=",
"title",
",",
"use_r",
"=",
"use_r",
",",
"geometry",
"=",
"geometry",
",",
"debugplot",
"=",
"debugplot",
")",
"return",
"poly",
",",
"yres",
",",
"reject",
"# main loop to reject points iteratively",
"loop_to_reject_points",
"=",
"True",
"poly",
"=",
"None",
"yres",
"=",
"None",
"while",
"loop_to_reject_points",
":",
"if",
"abs",
"(",
"debugplot",
")",
"in",
"[",
"21",
",",
"22",
"]",
":",
"poly",
",",
"yres",
"=",
"polfit_residuals",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"deg",
"=",
"deg",
",",
"reject",
"=",
"reject",
",",
"color",
"=",
"color",
",",
"size",
"=",
"size",
",",
"xlim",
"=",
"xlim",
",",
"ylim",
"=",
"ylim",
",",
"xlabel",
"=",
"xlabel",
",",
"ylabel",
"=",
"ylabel",
",",
"title",
"=",
"title",
",",
"use_r",
"=",
"use_r",
",",
"geometry",
"=",
"geometry",
",",
"debugplot",
"=",
"debugplot",
")",
"else",
":",
"poly",
",",
"yres",
"=",
"polfit_residuals",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"deg",
"=",
"deg",
",",
"reject",
"=",
"reject",
")",
"# check that there is room to remove a point with the current",
"# polynomial degree",
"npoints_effective",
"=",
"npoints",
"-",
"np",
".",
"sum",
"(",
"reject",
")",
"if",
"deg",
"<",
"npoints_effective",
"-",
"1",
":",
"# determine robuts standard deviation, excluding points",
"# already rejected",
"# --- method 1 ---",
"# yres_fitted = yres[np.logical_not(reject)]",
"# q25, q75 = np.percentile(yres_fitted, q=[25.0, 75.0])",
"# rms = 0.7413 * (q75 - q25)",
"# --- method 2 ---",
"yres_fitted",
"=",
"np",
".",
"abs",
"(",
"yres",
"[",
"np",
".",
"logical_not",
"(",
"reject",
")",
"]",
")",
"rms",
"=",
"np",
".",
"median",
"(",
"yres_fitted",
")",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"print",
"(",
"\"--> robust rms:\"",
",",
"rms",
")",
"# reject fitted point exceeding the threshold with the",
"# largest deviation (note: with this method only one point",
"# is removed in each iteration of the loop; this allows the",
"# recomputation of the polynomial fit which, sometimes,",
"# transforms deviant points into good ones)",
"index_to_remove",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"npoints",
")",
":",
"if",
"not",
"reject",
"[",
"i",
"]",
":",
"if",
"np",
".",
"abs",
"(",
"yres",
"[",
"i",
"]",
")",
">",
"times_sigma_reject",
"*",
"rms",
":",
"index_to_remove",
".",
"append",
"(",
"i",
")",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"print",
"(",
"'--> suspicious point #'",
",",
"i",
"+",
"1",
")",
"if",
"len",
"(",
"index_to_remove",
")",
"==",
"0",
":",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"print",
"(",
"'==> no need to remove any point'",
")",
"loop_to_reject_points",
"=",
"False",
"else",
":",
"imax",
"=",
"np",
".",
"argmax",
"(",
"np",
".",
"abs",
"(",
"yres",
"[",
"index_to_remove",
"]",
")",
")",
"reject",
"[",
"index_to_remove",
"[",
"imax",
"]",
"]",
"=",
"True",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"print",
"(",
"'==> removing point #'",
",",
"index_to_remove",
"[",
"imax",
"]",
"+",
"1",
")",
"else",
":",
"loop_to_reject_points",
"=",
"False",
"# plot final fit in case it has not been already shown",
"if",
"abs",
"(",
"debugplot",
")",
"%",
"10",
"!=",
"0",
":",
"if",
"abs",
"(",
"debugplot",
")",
"not",
"in",
"[",
"21",
",",
"22",
"]",
":",
"poly",
",",
"yres",
"=",
"polfit_residuals",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"deg",
"=",
"deg",
",",
"reject",
"=",
"reject",
",",
"color",
"=",
"color",
",",
"size",
"=",
"size",
",",
"xlim",
"=",
"xlim",
",",
"ylim",
"=",
"ylim",
",",
"xlabel",
"=",
"xlabel",
",",
"ylabel",
"=",
"ylabel",
",",
"title",
"=",
"title",
",",
"use_r",
"=",
"use_r",
",",
"geometry",
"=",
"geometry",
",",
"debugplot",
"=",
"debugplot",
")",
"else",
":",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"print",
"(",
"' '",
")",
"# return result",
"return",
"poly",
",",
"yres",
",",
"reject"
] | 42.519774 | 0.000519 |
def blob_containers(self):
"""Instance depends on the API version:
* 2018-02-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_02_01.operations.BlobContainersOperations>`
* 2018-03-01-preview: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_03_01_preview.operations.BlobContainersOperations>`
* 2018-07-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_07_01.operations.BlobContainersOperations>`
"""
api_version = self._get_api_version('blob_containers')
if api_version == '2018-02-01':
from .v2018_02_01.operations import BlobContainersOperations as OperationClass
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview.operations import BlobContainersOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import BlobContainersOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | [
"def",
"blob_containers",
"(",
"self",
")",
":",
"api_version",
"=",
"self",
".",
"_get_api_version",
"(",
"'blob_containers'",
")",
"if",
"api_version",
"==",
"'2018-02-01'",
":",
"from",
".",
"v2018_02_01",
".",
"operations",
"import",
"BlobContainersOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-03-01-preview'",
":",
"from",
".",
"v2018_03_01_preview",
".",
"operations",
"import",
"BlobContainersOperations",
"as",
"OperationClass",
"elif",
"api_version",
"==",
"'2018-07-01'",
":",
"from",
".",
"v2018_07_01",
".",
"operations",
"import",
"BlobContainersOperations",
"as",
"OperationClass",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"APIVersion {} is not available\"",
".",
"format",
"(",
"api_version",
")",
")",
"return",
"OperationClass",
"(",
"self",
".",
"_client",
",",
"self",
".",
"config",
",",
"Serializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
",",
"Deserializer",
"(",
"self",
".",
"_models_dict",
"(",
"api_version",
")",
")",
")"
] | 70.411765 | 0.008244 |
def config(self, configlet, plane, **attributes):
"""Apply config to the device."""
try:
config_text = configlet.format(**attributes)
except KeyError as exp:
raise CommandSyntaxError("Configuration template error: {}".format(str(exp)))
return self.driver.config(config_text, plane) | [
"def",
"config",
"(",
"self",
",",
"configlet",
",",
"plane",
",",
"*",
"*",
"attributes",
")",
":",
"try",
":",
"config_text",
"=",
"configlet",
".",
"format",
"(",
"*",
"*",
"attributes",
")",
"except",
"KeyError",
"as",
"exp",
":",
"raise",
"CommandSyntaxError",
"(",
"\"Configuration template error: {}\"",
".",
"format",
"(",
"str",
"(",
"exp",
")",
")",
")",
"return",
"self",
".",
"driver",
".",
"config",
"(",
"config_text",
",",
"plane",
")"
] | 41.375 | 0.008876 |
def get_name():
'''Get desktop environment or OS.
Get the OS name or desktop environment.
**List of Possible Values**
+-------------------------+---------------+
| Windows | windows |
+-------------------------+---------------+
| Mac OS X | mac |
+-------------------------+---------------+
| GNOME 3+ | gnome |
+-------------------------+---------------+
| GNOME 2 | gnome2 |
+-------------------------+---------------+
| XFCE | xfce4 |
+-------------------------+---------------+
| KDE | kde |
+-------------------------+---------------+
| Unity | unity |
+-------------------------+---------------+
| LXDE | lxde |
+-------------------------+---------------+
| i3wm | i3 |
+-------------------------+---------------+
| \*box | \*box |
+-------------------------+---------------+
| Trinity (KDE 3 fork) | trinity |
+-------------------------+---------------+
| MATE | mate |
+-------------------------+---------------+
| IceWM | icewm |
+-------------------------+---------------+
| Pantheon (elementaryOS) | pantheon |
+-------------------------+---------------+
| LXQt | lxqt |
+-------------------------+---------------+
| Awesome WM | awesome |
+-------------------------+---------------+
| Enlightenment | enlightenment |
+-------------------------+---------------+
| AfterStep | afterstep |
+-------------------------+---------------+
| WindowMaker | windowmaker |
+-------------------------+---------------+
| [Other] | unknown |
+-------------------------+---------------+
Returns:
str: The name of the desktop environment or OS.
'''
if sys.platform in ['win32', 'cygwin']:
return 'windows'
elif sys.platform == 'darwin':
return 'mac'
else:
desktop_session = os.environ.get(
'XDG_CURRENT_DESKTOP') or os.environ.get('DESKTOP_SESSION')
if desktop_session is not None:
desktop_session = desktop_session.lower()
# Fix for X-Cinnamon etc
if desktop_session.startswith('x-'):
desktop_session = desktop_session.replace('x-', '')
if desktop_session in ['gnome', 'unity', 'cinnamon', 'mate',
'xfce4', 'lxde', 'fluxbox',
'blackbox', 'openbox', 'icewm', 'jwm',
'afterstep', 'trinity', 'kde', 'pantheon',
'i3', 'lxqt', 'awesome', 'enlightenment']:
return desktop_session
#-- Special cases --#
# Canonical sets environment var to Lubuntu rather than
# LXDE if using LXDE.
# There is no guarantee that they will not do the same
# with the other desktop environments.
elif 'xfce' in desktop_session:
return 'xfce4'
elif desktop_session.startswith('ubuntu'):
return 'unity'
elif desktop_session.startswith('xubuntu'):
return 'xfce4'
elif desktop_session.startswith('lubuntu'):
return 'lxde'
elif desktop_session.startswith('kubuntu'):
return 'kde'
elif desktop_session.startswith('razor'):
return 'razor-qt'
elif desktop_session.startswith('wmaker'):
return 'windowmaker'
if os.environ.get('KDE_FULL_SESSION') == 'true':
return 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
if not 'deprecated' in os.environ.get('GNOME_DESKTOP_SESSION_ID'):
return 'gnome2'
elif is_running('xfce-mcs-manage'):
return 'xfce4'
elif is_running('ksmserver'):
return 'kde'
return 'unknown' | [
"def",
"get_name",
"(",
")",
":",
"if",
"sys",
".",
"platform",
"in",
"[",
"'win32'",
",",
"'cygwin'",
"]",
":",
"return",
"'windows'",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"return",
"'mac'",
"else",
":",
"desktop_session",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'XDG_CURRENT_DESKTOP'",
")",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"'DESKTOP_SESSION'",
")",
"if",
"desktop_session",
"is",
"not",
"None",
":",
"desktop_session",
"=",
"desktop_session",
".",
"lower",
"(",
")",
"# Fix for X-Cinnamon etc",
"if",
"desktop_session",
".",
"startswith",
"(",
"'x-'",
")",
":",
"desktop_session",
"=",
"desktop_session",
".",
"replace",
"(",
"'x-'",
",",
"''",
")",
"if",
"desktop_session",
"in",
"[",
"'gnome'",
",",
"'unity'",
",",
"'cinnamon'",
",",
"'mate'",
",",
"'xfce4'",
",",
"'lxde'",
",",
"'fluxbox'",
",",
"'blackbox'",
",",
"'openbox'",
",",
"'icewm'",
",",
"'jwm'",
",",
"'afterstep'",
",",
"'trinity'",
",",
"'kde'",
",",
"'pantheon'",
",",
"'i3'",
",",
"'lxqt'",
",",
"'awesome'",
",",
"'enlightenment'",
"]",
":",
"return",
"desktop_session",
"#-- Special cases --#",
"# Canonical sets environment var to Lubuntu rather than",
"# LXDE if using LXDE.",
"# There is no guarantee that they will not do the same",
"# with the other desktop environments.",
"elif",
"'xfce'",
"in",
"desktop_session",
":",
"return",
"'xfce4'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'ubuntu'",
")",
":",
"return",
"'unity'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'xubuntu'",
")",
":",
"return",
"'xfce4'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'lubuntu'",
")",
":",
"return",
"'lxde'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'kubuntu'",
")",
":",
"return",
"'kde'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'razor'",
")",
":",
"return",
"'razor-qt'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'wmaker'",
")",
":",
"return",
"'windowmaker'",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'KDE_FULL_SESSION'",
")",
"==",
"'true'",
":",
"return",
"'kde'",
"elif",
"os",
".",
"environ",
".",
"get",
"(",
"'GNOME_DESKTOP_SESSION_ID'",
")",
":",
"if",
"not",
"'deprecated'",
"in",
"os",
".",
"environ",
".",
"get",
"(",
"'GNOME_DESKTOP_SESSION_ID'",
")",
":",
"return",
"'gnome2'",
"elif",
"is_running",
"(",
"'xfce-mcs-manage'",
")",
":",
"return",
"'xfce4'",
"elif",
"is_running",
"(",
"'ksmserver'",
")",
":",
"return",
"'kde'",
"return",
"'unknown'"
] | 27.882353 | 0.031141 |
def children(self):
"""
Returns the children in this group.
:return [<QtGui.QListWidgetItem>, ..]
"""
new_refs = set()
output = []
for ref in self._children:
item = ref()
if item is not None:
output.append(item)
new_refs.add(ref)
self._children = new_refs
return output | [
"def",
"children",
"(",
"self",
")",
":",
"new_refs",
"=",
"set",
"(",
")",
"output",
"=",
"[",
"]",
"for",
"ref",
"in",
"self",
".",
"_children",
":",
"item",
"=",
"ref",
"(",
")",
"if",
"item",
"is",
"not",
"None",
":",
"output",
".",
"append",
"(",
"item",
")",
"new_refs",
".",
"add",
"(",
"ref",
")",
"self",
".",
"_children",
"=",
"new_refs",
"return",
"output"
] | 26.1875 | 0.009217 |
def get_catalog(self, locale):
"""Create Django translation catalogue for `locale`."""
with translation.override(locale):
translation_engine = DjangoTranslation(locale, domain=self.domain, localedirs=self.paths)
trans_cat = translation_engine._catalog
trans_fallback_cat = translation_engine._fallback._catalog if translation_engine._fallback else {}
return trans_cat, trans_fallback_cat | [
"def",
"get_catalog",
"(",
"self",
",",
"locale",
")",
":",
"with",
"translation",
".",
"override",
"(",
"locale",
")",
":",
"translation_engine",
"=",
"DjangoTranslation",
"(",
"locale",
",",
"domain",
"=",
"self",
".",
"domain",
",",
"localedirs",
"=",
"self",
".",
"paths",
")",
"trans_cat",
"=",
"translation_engine",
".",
"_catalog",
"trans_fallback_cat",
"=",
"translation_engine",
".",
"_fallback",
".",
"_catalog",
"if",
"translation_engine",
".",
"_fallback",
"else",
"{",
"}",
"return",
"trans_cat",
",",
"trans_fallback_cat"
] | 49.444444 | 0.00883 |
def apply(self, events):
"""
EventManager.apply(events) -> Takes an object with methods, and applies
them to EventManager.
Example:
class TestEvents(object):
@staticmethod
def test_method():
pass
e = TestEvents()
em = EventManager()
em.apply(e)
# em now has an event called test_method,
# and e.test_method as handler."""
for method in dir(events):
# Skip attributes
if not callable(getattr(events, method)):
continue
# Skip "trash" functions
if method.startswith("_"):
continue
if not hasattr(self, method): # Didn't have such an event already
self[method] = Event() # So we create it
self[method].add_handler(getattr(events, method)) | [
"def",
"apply",
"(",
"self",
",",
"events",
")",
":",
"for",
"method",
"in",
"dir",
"(",
"events",
")",
":",
"# Skip attributes\r",
"if",
"not",
"callable",
"(",
"getattr",
"(",
"events",
",",
"method",
")",
")",
":",
"continue",
"# Skip \"trash\" functions\r",
"if",
"method",
".",
"startswith",
"(",
"\"_\"",
")",
":",
"continue",
"if",
"not",
"hasattr",
"(",
"self",
",",
"method",
")",
":",
"# Didn't have such an event already\r",
"self",
"[",
"method",
"]",
"=",
"Event",
"(",
")",
"# So we create it\r",
"self",
"[",
"method",
"]",
".",
"add_handler",
"(",
"getattr",
"(",
"events",
",",
"method",
")",
")"
] | 34.222222 | 0.002105 |
def _get_session_for_table(self, base_session):
"""
Only present session for modeling when doses were dropped if it's succesful;
otherwise show the original modeling session.
"""
if base_session.recommended_model is None and base_session.doses_dropped > 0:
return base_session.doses_dropped_sessions[0]
return base_session | [
"def",
"_get_session_for_table",
"(",
"self",
",",
"base_session",
")",
":",
"if",
"base_session",
".",
"recommended_model",
"is",
"None",
"and",
"base_session",
".",
"doses_dropped",
">",
"0",
":",
"return",
"base_session",
".",
"doses_dropped_sessions",
"[",
"0",
"]",
"return",
"base_session"
] | 46.875 | 0.010471 |
def translate_table(data):
""" Translates data where data["Type"]=="Table" """
headers = sorted(data.get("Headers", []))
table = '\\FloatBarrier \n \\section{$NAME} \n'.replace('$NAME', data.get("Title", "table"))
table += '\\begin{table}[!ht] \n \\begin{center}'
# Set the number of columns
n_cols = "c"*(len(headers)+1)
table += '\n \\begin{tabular}{$NCOLS} \n'.replace("$NCOLS", n_cols)
# Put in the headers
for header in headers:
table += ' $HEADER &'.replace('$HEADER', header).replace('%', '\%')
table = table[:-1] + ' \\\\ \n \hline \n'
# Put in the data
for header in headers:
table += ' $VAL &'.replace("$VAL", str(data["Data"][header]))
table = table[:-1] + ' \\\\ \n \hline'
table += '\n \end{tabular} \n \end{center} \n \end{table}\n'
return table | [
"def",
"translate_table",
"(",
"data",
")",
":",
"headers",
"=",
"sorted",
"(",
"data",
".",
"get",
"(",
"\"Headers\"",
",",
"[",
"]",
")",
")",
"table",
"=",
"'\\\\FloatBarrier \\n \\\\section{$NAME} \\n'",
".",
"replace",
"(",
"'$NAME'",
",",
"data",
".",
"get",
"(",
"\"Title\"",
",",
"\"table\"",
")",
")",
"table",
"+=",
"'\\\\begin{table}[!ht] \\n \\\\begin{center}'",
"# Set the number of columns",
"n_cols",
"=",
"\"c\"",
"*",
"(",
"len",
"(",
"headers",
")",
"+",
"1",
")",
"table",
"+=",
"'\\n \\\\begin{tabular}{$NCOLS} \\n'",
".",
"replace",
"(",
"\"$NCOLS\"",
",",
"n_cols",
")",
"# Put in the headers",
"for",
"header",
"in",
"headers",
":",
"table",
"+=",
"' $HEADER &'",
".",
"replace",
"(",
"'$HEADER'",
",",
"header",
")",
".",
"replace",
"(",
"'%'",
",",
"'\\%'",
")",
"table",
"=",
"table",
"[",
":",
"-",
"1",
"]",
"+",
"' \\\\\\\\ \\n \\hline \\n'",
"# Put in the data",
"for",
"header",
"in",
"headers",
":",
"table",
"+=",
"' $VAL &'",
".",
"replace",
"(",
"\"$VAL\"",
",",
"str",
"(",
"data",
"[",
"\"Data\"",
"]",
"[",
"header",
"]",
")",
")",
"table",
"=",
"table",
"[",
":",
"-",
"1",
"]",
"+",
"' \\\\\\\\ \\n \\hline'",
"table",
"+=",
"'\\n \\end{tabular} \\n \\end{center} \\n \\end{table}\\n'",
"return",
"table"
] | 38.952381 | 0.009547 |
def set_layout_settings(self, settings):
"""Restore layout state"""
size = settings.get('size')
if size is not None:
self.resize( QSize(*size) )
self.window_size = self.size()
pos = settings.get('pos')
if pos is not None:
self.move( QPoint(*pos) )
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
if settings.get('is_maximized'):
self.setWindowState(Qt.WindowMaximized)
if settings.get('is_fullscreen'):
self.setWindowState(Qt.WindowFullScreen)
splitsettings = settings.get('splitsettings')
if splitsettings is not None:
self.editorwidget.editorsplitter.set_layout_settings(splitsettings) | [
"def",
"set_layout_settings",
"(",
"self",
",",
"settings",
")",
":",
"size",
"=",
"settings",
".",
"get",
"(",
"'size'",
")",
"if",
"size",
"is",
"not",
"None",
":",
"self",
".",
"resize",
"(",
"QSize",
"(",
"*",
"size",
")",
")",
"self",
".",
"window_size",
"=",
"self",
".",
"size",
"(",
")",
"pos",
"=",
"settings",
".",
"get",
"(",
"'pos'",
")",
"if",
"pos",
"is",
"not",
"None",
":",
"self",
".",
"move",
"(",
"QPoint",
"(",
"*",
"pos",
")",
")",
"hexstate",
"=",
"settings",
".",
"get",
"(",
"'hexstate'",
")",
"if",
"hexstate",
"is",
"not",
"None",
":",
"self",
".",
"restoreState",
"(",
"QByteArray",
"(",
")",
".",
"fromHex",
"(",
"str",
"(",
"hexstate",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
")",
"if",
"settings",
".",
"get",
"(",
"'is_maximized'",
")",
":",
"self",
".",
"setWindowState",
"(",
"Qt",
".",
"WindowMaximized",
")",
"if",
"settings",
".",
"get",
"(",
"'is_fullscreen'",
")",
":",
"self",
".",
"setWindowState",
"(",
"Qt",
".",
"WindowFullScreen",
")",
"splitsettings",
"=",
"settings",
".",
"get",
"(",
"'splitsettings'",
")",
"if",
"splitsettings",
"is",
"not",
"None",
":",
"self",
".",
"editorwidget",
".",
"editorsplitter",
".",
"set_layout_settings",
"(",
"splitsettings",
")"
] | 43.3 | 0.00904 |
def duration(self, value):
"""
Setter for **self.__duration** attribute.
:param value: Attribute value.
:type value: int
"""
if value is not None:
assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("duration", value)
assert value >= 0, "'{0}' attribute: '{1}' need to be exactly positive!".format("duration", value)
self.__duration = value | [
"def",
"duration",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"value",
")",
"is",
"int",
",",
"\"'{0}' attribute: '{1}' type is not 'int'!\"",
".",
"format",
"(",
"\"duration\"",
",",
"value",
")",
"assert",
"value",
">=",
"0",
",",
"\"'{0}' attribute: '{1}' need to be exactly positive!\"",
".",
"format",
"(",
"\"duration\"",
",",
"value",
")",
"self",
".",
"__duration",
"=",
"value"
] | 36.416667 | 0.008929 |
def flush(self):
"""Flushes the current queue by notifying the :func:`sender` via the :func:`flush_notification` event.
"""
self._flush_notification.set()
if self.sender:
self.sender.start() | [
"def",
"flush",
"(",
"self",
")",
":",
"self",
".",
"_flush_notification",
".",
"set",
"(",
")",
"if",
"self",
".",
"sender",
":",
"self",
".",
"sender",
".",
"start",
"(",
")"
] | 38.166667 | 0.012821 |
def get_session(key=None, username=None, password=None, cache=True,
cache_expiry=datetime.timedelta(days=7), cookie_path=COOKIE_PATH, backend='memory',
version=VERSION_GLOBAL):
"""Get Voobly API session."""
class VooblyAuth(AuthBase): # pylint: disable=too-few-public-methods
"""Voobly authorization storage."""
def __init__(self, key, username, password, cookie_path, version):
"""Init."""
self.key = key
self.username = username
self.password = password
self.cookie_path = cookie_path
self.base_url = BASE_URLS[version]
def __call__(self, r):
"""Call is no-op."""
return r
if version not in BASE_URLS:
raise ValueError('unsupported voobly version')
session = requests.session()
if cache:
session = requests_cache.core.CachedSession(expire_after=cache_expiry, backend=backend)
session.auth = VooblyAuth(key, username, password, cookie_path, version)
if os.path.exists(cookie_path):
_LOGGER.info("cookie found at: %s", cookie_path)
session.cookies = _load_cookies(cookie_path)
return session | [
"def",
"get_session",
"(",
"key",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"cache",
"=",
"True",
",",
"cache_expiry",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"7",
")",
",",
"cookie_path",
"=",
"COOKIE_PATH",
",",
"backend",
"=",
"'memory'",
",",
"version",
"=",
"VERSION_GLOBAL",
")",
":",
"class",
"VooblyAuth",
"(",
"AuthBase",
")",
":",
"# pylint: disable=too-few-public-methods",
"\"\"\"Voobly authorization storage.\"\"\"",
"def",
"__init__",
"(",
"self",
",",
"key",
",",
"username",
",",
"password",
",",
"cookie_path",
",",
"version",
")",
":",
"\"\"\"Init.\"\"\"",
"self",
".",
"key",
"=",
"key",
"self",
".",
"username",
"=",
"username",
"self",
".",
"password",
"=",
"password",
"self",
".",
"cookie_path",
"=",
"cookie_path",
"self",
".",
"base_url",
"=",
"BASE_URLS",
"[",
"version",
"]",
"def",
"__call__",
"(",
"self",
",",
"r",
")",
":",
"\"\"\"Call is no-op.\"\"\"",
"return",
"r",
"if",
"version",
"not",
"in",
"BASE_URLS",
":",
"raise",
"ValueError",
"(",
"'unsupported voobly version'",
")",
"session",
"=",
"requests",
".",
"session",
"(",
")",
"if",
"cache",
":",
"session",
"=",
"requests_cache",
".",
"core",
".",
"CachedSession",
"(",
"expire_after",
"=",
"cache_expiry",
",",
"backend",
"=",
"backend",
")",
"session",
".",
"auth",
"=",
"VooblyAuth",
"(",
"key",
",",
"username",
",",
"password",
",",
"cookie_path",
",",
"version",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cookie_path",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"cookie found at: %s\"",
",",
"cookie_path",
")",
"session",
".",
"cookies",
"=",
"_load_cookies",
"(",
"cookie_path",
")",
"return",
"session"
] | 39.433333 | 0.002475 |
def _convert_num(self, sign):
"""
Converts number registered in get_number_from_sign.
input = ["a2", "☉", "be3"]
output = ["a₂", "☉", "be₃"]
:param sign: string
:return sign: string
"""
# Check if there's a number at the end
new_sign, num = self._get_number_from_sign(sign)
if num < 2: # "ab" -> "ab"
return new_sign.replace(str(num),
self._convert_number_to_subscript(num))
if num > 3: # "buru14" -> "buru₁₄"
return new_sign.replace(str(num),
self._convert_number_to_subscript(num))
if self.two_three: # pylint: disable=no-else-return
return new_sign.replace(str(num),
self._convert_number_to_subscript(num))
else:
# "bad3" -> "bàd"
for i, character in enumerate(new_sign):
new_vowel = ''
if character in VOWELS:
if num == 2:
# noinspection PyUnusedLocal
new_vowel = character + chr(0x0301)
elif num == 3:
new_vowel = character + chr(0x0300)
break
return new_sign[:i] + normalize('NFC', new_vowel) + \
new_sign[i+1:].replace(str(num), '') | [
"def",
"_convert_num",
"(",
"self",
",",
"sign",
")",
":",
"# Check if there's a number at the end",
"new_sign",
",",
"num",
"=",
"self",
".",
"_get_number_from_sign",
"(",
"sign",
")",
"if",
"num",
"<",
"2",
":",
"# \"ab\" -> \"ab\"",
"return",
"new_sign",
".",
"replace",
"(",
"str",
"(",
"num",
")",
",",
"self",
".",
"_convert_number_to_subscript",
"(",
"num",
")",
")",
"if",
"num",
">",
"3",
":",
"# \"buru14\" -> \"buru₁₄\"",
"return",
"new_sign",
".",
"replace",
"(",
"str",
"(",
"num",
")",
",",
"self",
".",
"_convert_number_to_subscript",
"(",
"num",
")",
")",
"if",
"self",
".",
"two_three",
":",
"# pylint: disable=no-else-return",
"return",
"new_sign",
".",
"replace",
"(",
"str",
"(",
"num",
")",
",",
"self",
".",
"_convert_number_to_subscript",
"(",
"num",
")",
")",
"else",
":",
"# \"bad3\" -> \"bàd\"",
"for",
"i",
",",
"character",
"in",
"enumerate",
"(",
"new_sign",
")",
":",
"new_vowel",
"=",
"''",
"if",
"character",
"in",
"VOWELS",
":",
"if",
"num",
"==",
"2",
":",
"# noinspection PyUnusedLocal",
"new_vowel",
"=",
"character",
"+",
"chr",
"(",
"0x0301",
")",
"elif",
"num",
"==",
"3",
":",
"new_vowel",
"=",
"character",
"+",
"chr",
"(",
"0x0300",
")",
"break",
"return",
"new_sign",
"[",
":",
"i",
"]",
"+",
"normalize",
"(",
"'NFC'",
",",
"new_vowel",
")",
"+",
"new_sign",
"[",
"i",
"+",
"1",
":",
"]",
".",
"replace",
"(",
"str",
"(",
"num",
")",
",",
"''",
")"
] | 40.558824 | 0.002125 |
def post_status(self, body="", id="", parentid="", stashid=""):
"""Post a status
:param username: The body of the status
:param id: The id of the object you wish to share
:param parentid: The parentid of the object you wish to share
:param stashid: The stashid of the object you wish to add to the status
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/statuses/post', post_data={
"body":body,
"id":id,
"parentid":parentid,
"stashid":stashid
})
return response['statusid'] | [
"def",
"post_status",
"(",
"self",
",",
"body",
"=",
"\"\"",
",",
"id",
"=",
"\"\"",
",",
"parentid",
"=",
"\"\"",
",",
"stashid",
"=",
"\"\"",
")",
":",
"if",
"self",
".",
"standard_grant_type",
"is",
"not",
"\"authorization_code\"",
":",
"raise",
"DeviantartError",
"(",
"\"Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.\"",
")",
"response",
"=",
"self",
".",
"_req",
"(",
"'/user/statuses/post'",
",",
"post_data",
"=",
"{",
"\"body\"",
":",
"body",
",",
"\"id\"",
":",
"id",
",",
"\"parentid\"",
":",
"parentid",
",",
"\"stashid\"",
":",
"stashid",
"}",
")",
"return",
"response",
"[",
"'statusid'",
"]"
] | 36.571429 | 0.008883 |
def titles2marc(self, key, values):
"""Populate the ``246`` MARC field.
Also populates the ``245`` MARC field through side effects.
"""
first, rest = values[0], values[1:]
self.setdefault('245', []).append({
'a': first.get('title'),
'b': first.get('subtitle'),
'9': first.get('source'),
})
return [
{
'a': value.get('title'),
'b': value.get('subtitle'),
'9': value.get('source'),
} for value in rest
] | [
"def",
"titles2marc",
"(",
"self",
",",
"key",
",",
"values",
")",
":",
"first",
",",
"rest",
"=",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"1",
":",
"]",
"self",
".",
"setdefault",
"(",
"'245'",
",",
"[",
"]",
")",
".",
"append",
"(",
"{",
"'a'",
":",
"first",
".",
"get",
"(",
"'title'",
")",
",",
"'b'",
":",
"first",
".",
"get",
"(",
"'subtitle'",
")",
",",
"'9'",
":",
"first",
".",
"get",
"(",
"'source'",
")",
",",
"}",
")",
"return",
"[",
"{",
"'a'",
":",
"value",
".",
"get",
"(",
"'title'",
")",
",",
"'b'",
":",
"value",
".",
"get",
"(",
"'subtitle'",
")",
",",
"'9'",
":",
"value",
".",
"get",
"(",
"'source'",
")",
",",
"}",
"for",
"value",
"in",
"rest",
"]"
] | 24.65 | 0.001953 |
def basis(self, n):
"""
Chebyshev basis functions T_n.
"""
if n == 0:
return self(np.array([1.]))
vals = np.ones(n+1)
vals[1::2] = -1
return self(vals) | [
"def",
"basis",
"(",
"self",
",",
"n",
")",
":",
"if",
"n",
"==",
"0",
":",
"return",
"self",
"(",
"np",
".",
"array",
"(",
"[",
"1.",
"]",
")",
")",
"vals",
"=",
"np",
".",
"ones",
"(",
"n",
"+",
"1",
")",
"vals",
"[",
"1",
":",
":",
"2",
"]",
"=",
"-",
"1",
"return",
"self",
"(",
"vals",
")"
] | 23.444444 | 0.009132 |
def _subprocess_method(self, command):
"""Use the subprocess module to execute ipmitool commands
and and set status
"""
p = subprocess.Popen([self._ipmitool_path] + self.args + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.output, self.error = p.communicate()
self.status = p.returncode | [
"def",
"_subprocess_method",
"(",
"self",
",",
"command",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"self",
".",
"_ipmitool_path",
"]",
"+",
"self",
".",
"args",
"+",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"self",
".",
"output",
",",
"self",
".",
"error",
"=",
"p",
".",
"communicate",
"(",
")",
"self",
".",
"status",
"=",
"p",
".",
"returncode"
] | 49.142857 | 0.008571 |
def max(self, default=None):
"""
Calculate the maximum value over the time series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the maximum value or `None`.
"""
return numpy.asscalar(numpy.max(self.values)) if self.values else default | [
"def",
"max",
"(",
"self",
",",
"default",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"asscalar",
"(",
"numpy",
".",
"max",
"(",
"self",
".",
"values",
")",
")",
"if",
"self",
".",
"values",
"else",
"default"
] | 43 | 0.011396 |
def _make_container_root(name):
'''
Make the container root directory
'''
path = _root(name)
if os.path.exists(path):
__context__['retcode'] = salt.defaults.exitcodes.SALT_BUILD_FAIL
raise CommandExecutionError(
'Container {0} already exists'.format(name)
)
else:
try:
os.makedirs(path)
return path
except OSError as exc:
raise CommandExecutionError(
'Unable to make container root directory {0}: {1}'
.format(name, exc)
) | [
"def",
"_make_container_root",
"(",
"name",
")",
":",
"path",
"=",
"_root",
"(",
"name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"__context__",
"[",
"'retcode'",
"]",
"=",
"salt",
".",
"defaults",
".",
"exitcodes",
".",
"SALT_BUILD_FAIL",
"raise",
"CommandExecutionError",
"(",
"'Container {0} already exists'",
".",
"format",
"(",
"name",
")",
")",
"else",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"return",
"path",
"except",
"OSError",
"as",
"exc",
":",
"raise",
"CommandExecutionError",
"(",
"'Unable to make container root directory {0}: {1}'",
".",
"format",
"(",
"name",
",",
"exc",
")",
")"
] | 29.473684 | 0.00173 |
def get_without_extra(marker):
"""Build a new marker without the `extra == ...` part.
The implementation relies very deep into packaging's internals, but I don't
have a better way now (except implementing the whole thing myself).
This could return `None` if the `extra == ...` part is the only one in the
input marker.
"""
# TODO: Why is this very deep in the internals? Why is a better solution
# implementing it yourself when someone is already maintaining a codebase
# for this? It's literally a grammar implementation that is required to
# meet the demands of a pep... -d
if not marker:
return None
marker = Marker(str(marker))
elements = marker._markers
_strip_extra(elements)
if elements:
return marker
return None | [
"def",
"get_without_extra",
"(",
"marker",
")",
":",
"# TODO: Why is this very deep in the internals? Why is a better solution",
"# implementing it yourself when someone is already maintaining a codebase",
"# for this? It's literally a grammar implementation that is required to",
"# meet the demands of a pep... -d",
"if",
"not",
"marker",
":",
"return",
"None",
"marker",
"=",
"Marker",
"(",
"str",
"(",
"marker",
")",
")",
"elements",
"=",
"marker",
".",
"_markers",
"_strip_extra",
"(",
"elements",
")",
"if",
"elements",
":",
"return",
"marker",
"return",
"None"
] | 37.238095 | 0.001247 |
def _cb_inform_interface_change(self, msg):
"""Update the sensors and requests available."""
self._logger.debug('cb_inform_interface_change(%s)', msg)
self._interface_changed.set() | [
"def",
"_cb_inform_interface_change",
"(",
"self",
",",
"msg",
")",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"'cb_inform_interface_change(%s)'",
",",
"msg",
")",
"self",
".",
"_interface_changed",
".",
"set",
"(",
")"
] | 50.25 | 0.009804 |
def doit(self):
"""Do (most of) it function of the model class."""
print(' . doit')
lines = Lines()
lines.add(1, 'cpdef inline void doit(self, int idx) %s:' % _nogil)
lines.add(2, 'self.idx_sim = idx')
if getattr(self.model.sequences, 'inputs', None) is not None:
lines.add(2, 'self.load_data()')
if self.model.INLET_METHODS:
lines.add(2, 'self.update_inlets()')
if hasattr(self.model, 'solve'):
lines.add(2, 'self.solve()')
else:
lines.add(2, 'self.run()')
if getattr(self.model.sequences, 'states', None) is not None:
lines.add(2, 'self.new2old()')
if self.model.OUTLET_METHODS:
lines.add(2, 'self.update_outlets()')
return lines | [
"def",
"doit",
"(",
"self",
")",
":",
"print",
"(",
"' . doit'",
")",
"lines",
"=",
"Lines",
"(",
")",
"lines",
".",
"add",
"(",
"1",
",",
"'cpdef inline void doit(self, int idx) %s:'",
"%",
"_nogil",
")",
"lines",
".",
"add",
"(",
"2",
",",
"'self.idx_sim = idx'",
")",
"if",
"getattr",
"(",
"self",
".",
"model",
".",
"sequences",
",",
"'inputs'",
",",
"None",
")",
"is",
"not",
"None",
":",
"lines",
".",
"add",
"(",
"2",
",",
"'self.load_data()'",
")",
"if",
"self",
".",
"model",
".",
"INLET_METHODS",
":",
"lines",
".",
"add",
"(",
"2",
",",
"'self.update_inlets()'",
")",
"if",
"hasattr",
"(",
"self",
".",
"model",
",",
"'solve'",
")",
":",
"lines",
".",
"add",
"(",
"2",
",",
"'self.solve()'",
")",
"else",
":",
"lines",
".",
"add",
"(",
"2",
",",
"'self.run()'",
")",
"if",
"getattr",
"(",
"self",
".",
"model",
".",
"sequences",
",",
"'states'",
",",
"None",
")",
"is",
"not",
"None",
":",
"lines",
".",
"add",
"(",
"2",
",",
"'self.new2old()'",
")",
"if",
"self",
".",
"model",
".",
"OUTLET_METHODS",
":",
"lines",
".",
"add",
"(",
"2",
",",
"'self.update_outlets()'",
")",
"return",
"lines"
] | 42.315789 | 0.002433 |
def _getEngineVersionHash(self):
"""
Computes the SHA-256 hash of the JSON version details for the latest installed version of UE4
"""
versionDetails = self._getEngineVersionDetails()
hash = hashlib.sha256()
hash.update(json.dumps(versionDetails, sort_keys=True, indent=0).encode('utf-8'))
return hash.hexdigest() | [
"def",
"_getEngineVersionHash",
"(",
"self",
")",
":",
"versionDetails",
"=",
"self",
".",
"_getEngineVersionDetails",
"(",
")",
"hash",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"hash",
".",
"update",
"(",
"json",
".",
"dumps",
"(",
"versionDetails",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"0",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"hash",
".",
"hexdigest",
"(",
")"
] | 40 | 0.033639 |
def read_all(self):
"""Fetches all messages in the database.
:rtype: Generator[can.Message]
"""
result = self._cursor.execute("SELECT * FROM {}".format(self.table_name)).fetchall()
return (SqliteReader._assemble_message(frame) for frame in result) | [
"def",
"read_all",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"_cursor",
".",
"execute",
"(",
"\"SELECT * FROM {}\"",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
".",
"fetchall",
"(",
")",
"return",
"(",
"SqliteReader",
".",
"_assemble_message",
"(",
"frame",
")",
"for",
"frame",
"in",
"result",
")"
] | 40.285714 | 0.010417 |
def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0 | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"states",
"[",
":",
"]",
"=",
"0",
"self",
".",
"actions",
"[",
":",
"]",
"=",
"0",
"self",
".",
"rewards",
"[",
":",
"]",
"=",
"0",
"self",
".",
"terminate_flags",
"[",
":",
"]",
"=",
"0",
"self",
".",
"top",
"=",
"0",
"self",
".",
"size",
"=",
"0"
] | 24 | 0.008032 |
def has_publish_permission(self, request, obj=None):
"""
Determines if the user has permissions to publish.
:param request: Django request object.
:param obj: The object to determine if the user has
permissions to publish.
:return: Boolean.
"""
# If auto-publishing is enabled, no user has "permission" to publish
# because it happens automatically
if is_automatic_publishing_enabled(self.model):
return False
user_obj = request.user
if not user_obj.is_active:
return False
if user_obj.is_superuser:
return True
# Normal user with `can_publish` permission can always publish
if user_obj.has_perm('%s.can_publish' % self.opts.app_label):
return True
# Normal user with `can_republish` permission can only publish if the
# item is already published.
if user_obj.has_perm('%s.can_republish' % self.opts.app_label) and \
obj and getattr(obj, 'has_been_published', False):
return True
# User does not meet any publishing permisison requirements; reject!
return False | [
"def",
"has_publish_permission",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
")",
":",
"# If auto-publishing is enabled, no user has \"permission\" to publish",
"# because it happens automatically",
"if",
"is_automatic_publishing_enabled",
"(",
"self",
".",
"model",
")",
":",
"return",
"False",
"user_obj",
"=",
"request",
".",
"user",
"if",
"not",
"user_obj",
".",
"is_active",
":",
"return",
"False",
"if",
"user_obj",
".",
"is_superuser",
":",
"return",
"True",
"# Normal user with `can_publish` permission can always publish",
"if",
"user_obj",
".",
"has_perm",
"(",
"'%s.can_publish'",
"%",
"self",
".",
"opts",
".",
"app_label",
")",
":",
"return",
"True",
"# Normal user with `can_republish` permission can only publish if the",
"# item is already published.",
"if",
"user_obj",
".",
"has_perm",
"(",
"'%s.can_republish'",
"%",
"self",
".",
"opts",
".",
"app_label",
")",
"and",
"obj",
"and",
"getattr",
"(",
"obj",
",",
"'has_been_published'",
",",
"False",
")",
":",
"return",
"True",
"# User does not meet any publishing permisison requirements; reject!",
"return",
"False"
] | 41.821429 | 0.001669 |
def write_frame(self):
""" Writes a single frame to the movie file """
if not hasattr(self, 'mwriter'):
raise AssertionError('This plotter has not opened a movie or GIF file.')
self.mwriter.append_data(self.image) | [
"def",
"write_frame",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'mwriter'",
")",
":",
"raise",
"AssertionError",
"(",
"'This plotter has not opened a movie or GIF file.'",
")",
"self",
".",
"mwriter",
".",
"append_data",
"(",
"self",
".",
"image",
")"
] | 49 | 0.012048 |
def _convert_schema(bundle):
""" Converts schema of the dataset to resource dict ready to save to CKAN. """
# http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create
schema_csv = None
for f in bundle.dataset.files:
if f.path.endswith('schema.csv'):
contents = f.unpacked_contents
if isinstance(contents, six.binary_type):
contents = contents.decode('utf-8')
schema_csv = six.StringIO(contents)
schema_csv.seek(0)
break
ret = {
'package_id': bundle.dataset.vid.lower(),
'url': 'http://example.com',
'revision_id': '',
'description': 'Schema of the dataset tables.',
'format': 'text/csv',
'hash': '',
'name': 'schema',
'upload': schema_csv,
}
return ret | [
"def",
"_convert_schema",
"(",
"bundle",
")",
":",
"# http://docs.ckan.org/en/latest/api/#ckan.logic.action.create.resource_create",
"schema_csv",
"=",
"None",
"for",
"f",
"in",
"bundle",
".",
"dataset",
".",
"files",
":",
"if",
"f",
".",
"path",
".",
"endswith",
"(",
"'schema.csv'",
")",
":",
"contents",
"=",
"f",
".",
"unpacked_contents",
"if",
"isinstance",
"(",
"contents",
",",
"six",
".",
"binary_type",
")",
":",
"contents",
"=",
"contents",
".",
"decode",
"(",
"'utf-8'",
")",
"schema_csv",
"=",
"six",
".",
"StringIO",
"(",
"contents",
")",
"schema_csv",
".",
"seek",
"(",
"0",
")",
"break",
"ret",
"=",
"{",
"'package_id'",
":",
"bundle",
".",
"dataset",
".",
"vid",
".",
"lower",
"(",
")",
",",
"'url'",
":",
"'http://example.com'",
",",
"'revision_id'",
":",
"''",
",",
"'description'",
":",
"'Schema of the dataset tables.'",
",",
"'format'",
":",
"'text/csv'",
",",
"'hash'",
":",
"''",
",",
"'name'",
":",
"'schema'",
",",
"'upload'",
":",
"schema_csv",
",",
"}",
"return",
"ret"
] | 33.04 | 0.002353 |
def unsubscribe_list(self, list_id):
"""
Unsubscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id))) | [
"def",
"unsubscribe_list",
"(",
"self",
",",
"list_id",
")",
":",
"return",
"List",
"(",
"tweepy_list_to_json",
"(",
"self",
".",
"_client",
".",
"unsubscribe_list",
"(",
"list_id",
"=",
"list_id",
")",
")",
")"
] | 33.75 | 0.01083 |
def mslike(pop, **kwargs):
"""
Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments.
"""
import fwdpy11
if isinstance(pop, fwdpy11.DiploidPopulation) is False:
raise ValueError("incorrect pop type: " + str(type(pop)))
defaults = {'simlen': 10*pop.N,
'beg': 0.0,
'end': 1.0,
'theta': 100.0,
'pneutral': 1.0,
'rho': 100.0,
'dfe': None
}
for key, value in kwargs.items():
if key in defaults:
defaults[key] = value
import numpy as np
params = {'demography': np.array([pop.N]*defaults['simlen'],
dtype=np.uint32),
'nregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'recregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'rates': ((defaults['pneutral']*defaults['theta'])/(4.0*pop.N),
((1.0-defaults['pneutral'])*defaults['theta']) /
(4.0*pop.N),
defaults['rho']/(4.0*float(pop.N))),
'gvalue': fwdpy11.Multiplicative(2.0)
}
if defaults['dfe'] is None:
params['sregions'] = []
else:
params['sregions'] = [defaults['dfe']]
return params | [
"def",
"mslike",
"(",
"pop",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"fwdpy11",
"if",
"isinstance",
"(",
"pop",
",",
"fwdpy11",
".",
"DiploidPopulation",
")",
"is",
"False",
":",
"raise",
"ValueError",
"(",
"\"incorrect pop type: \"",
"+",
"str",
"(",
"type",
"(",
"pop",
")",
")",
")",
"defaults",
"=",
"{",
"'simlen'",
":",
"10",
"*",
"pop",
".",
"N",
",",
"'beg'",
":",
"0.0",
",",
"'end'",
":",
"1.0",
",",
"'theta'",
":",
"100.0",
",",
"'pneutral'",
":",
"1.0",
",",
"'rho'",
":",
"100.0",
",",
"'dfe'",
":",
"None",
"}",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"defaults",
":",
"defaults",
"[",
"key",
"]",
"=",
"value",
"import",
"numpy",
"as",
"np",
"params",
"=",
"{",
"'demography'",
":",
"np",
".",
"array",
"(",
"[",
"pop",
".",
"N",
"]",
"*",
"defaults",
"[",
"'simlen'",
"]",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
",",
"'nregions'",
":",
"[",
"fwdpy11",
".",
"Region",
"(",
"defaults",
"[",
"'beg'",
"]",
",",
"defaults",
"[",
"'end'",
"]",
",",
"1.0",
")",
"]",
",",
"'recregions'",
":",
"[",
"fwdpy11",
".",
"Region",
"(",
"defaults",
"[",
"'beg'",
"]",
",",
"defaults",
"[",
"'end'",
"]",
",",
"1.0",
")",
"]",
",",
"'rates'",
":",
"(",
"(",
"defaults",
"[",
"'pneutral'",
"]",
"*",
"defaults",
"[",
"'theta'",
"]",
")",
"/",
"(",
"4.0",
"*",
"pop",
".",
"N",
")",
",",
"(",
"(",
"1.0",
"-",
"defaults",
"[",
"'pneutral'",
"]",
")",
"*",
"defaults",
"[",
"'theta'",
"]",
")",
"/",
"(",
"4.0",
"*",
"pop",
".",
"N",
")",
",",
"defaults",
"[",
"'rho'",
"]",
"/",
"(",
"4.0",
"*",
"float",
"(",
"pop",
".",
"N",
")",
")",
")",
",",
"'gvalue'",
":",
"fwdpy11",
".",
"Multiplicative",
"(",
"2.0",
")",
"}",
"if",
"defaults",
"[",
"'dfe'",
"]",
"is",
"None",
":",
"params",
"[",
"'sregions'",
"]",
"=",
"[",
"]",
"else",
":",
"params",
"[",
"'sregions'",
"]",
"=",
"[",
"defaults",
"[",
"'dfe'",
"]",
"]",
"return",
"params"
] | 35.97619 | 0.000644 |
def match_host(host, pattern):
''' Match a host string against a pattern
Args:
host (str)
A hostname to compare to the given pattern
pattern (str)
A string representing a hostname pattern, possibly including
wildcards for ip address octets or ports.
This function will return ``True`` if the hostname matches the pattern,
including any wildcards. If the pattern contains a port, the host string
must also contain a matching port.
Returns:
bool
Examples:
>>> match_host('192.168.0.1:80', '192.168.0.1:80')
True
>>> match_host('192.168.0.1:80', '192.168.0.1')
True
>>> match_host('192.168.0.1:80', '192.168.0.1:8080')
False
>>> match_host('192.168.0.1', '192.168.0.2')
False
>>> match_host('192.168.0.1', '192.168.*.*')
True
>>> match_host('alice', 'alice')
True
>>> match_host('alice:80', 'alice')
True
>>> match_host('alice', 'bob')
False
>>> match_host('foo.example.com', 'foo.example.com.net')
False
>>> match_host('alice', '*')
True
>>> match_host('alice', '*:*')
True
>>> match_host('alice:80', '*')
True
>>> match_host('alice:80', '*:80')
True
>>> match_host('alice:8080', '*:80')
False
'''
if ':' in host:
host, host_port = host.rsplit(':', 1)
else:
host_port = None
if ':' in pattern:
pattern, pattern_port = pattern.rsplit(':', 1)
if pattern_port == '*':
pattern_port = None
else:
pattern_port = None
if pattern_port is not None and host_port != pattern_port:
return False
host = host.split('.')
pattern = pattern.split('.')
if len(pattern) > len(host):
return False
for h, p in zip(host, pattern):
if h == p or p == '*':
continue
else:
return False
return True | [
"def",
"match_host",
"(",
"host",
",",
"pattern",
")",
":",
"if",
"':'",
"in",
"host",
":",
"host",
",",
"host_port",
"=",
"host",
".",
"rsplit",
"(",
"':'",
",",
"1",
")",
"else",
":",
"host_port",
"=",
"None",
"if",
"':'",
"in",
"pattern",
":",
"pattern",
",",
"pattern_port",
"=",
"pattern",
".",
"rsplit",
"(",
"':'",
",",
"1",
")",
"if",
"pattern_port",
"==",
"'*'",
":",
"pattern_port",
"=",
"None",
"else",
":",
"pattern_port",
"=",
"None",
"if",
"pattern_port",
"is",
"not",
"None",
"and",
"host_port",
"!=",
"pattern_port",
":",
"return",
"False",
"host",
"=",
"host",
".",
"split",
"(",
"'.'",
")",
"pattern",
"=",
"pattern",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"pattern",
")",
">",
"len",
"(",
"host",
")",
":",
"return",
"False",
"for",
"h",
",",
"p",
"in",
"zip",
"(",
"host",
",",
"pattern",
")",
":",
"if",
"h",
"==",
"p",
"or",
"p",
"==",
"'*'",
":",
"continue",
"else",
":",
"return",
"False",
"return",
"True"
] | 25.571429 | 0.000489 |
def maybe_convert_to_index_date_type(index, date):
"""Convert a datetime-like object to the index's date type.
Datetime indexing in xarray can be done using either a pandas
DatetimeIndex or a CFTimeIndex. Both support partial-datetime string
indexing regardless of the calendar type of the underlying data;
therefore if a string is passed as a date, we return it unchanged. If a
datetime-like object is provided, it will be converted to the underlying
date type of the index. For a DatetimeIndex that is np.datetime64; for a
CFTimeIndex that is an object of type cftime.datetime specific to the
calendar used.
Parameters
----------
index : pd.Index
Input time index
date : datetime-like object or str
Input datetime
Returns
-------
date of the type appropriate for the time index of the Dataset
"""
if isinstance(date, str):
return date
if isinstance(index, pd.DatetimeIndex):
if isinstance(date, np.datetime64):
return date
else:
return np.datetime64(str(date))
else:
date_type = index.date_type
if isinstance(date, date_type):
return date
else:
if isinstance(date, np.datetime64):
# Convert to datetime.date or datetime.datetime object
date = date.item()
if isinstance(date, datetime.date):
# Convert to a datetime.datetime object
date = datetime.datetime.combine(
date, datetime.datetime.min.time())
return date_type(date.year, date.month, date.day, date.hour,
date.minute, date.second, date.microsecond) | [
"def",
"maybe_convert_to_index_date_type",
"(",
"index",
",",
"date",
")",
":",
"if",
"isinstance",
"(",
"date",
",",
"str",
")",
":",
"return",
"date",
"if",
"isinstance",
"(",
"index",
",",
"pd",
".",
"DatetimeIndex",
")",
":",
"if",
"isinstance",
"(",
"date",
",",
"np",
".",
"datetime64",
")",
":",
"return",
"date",
"else",
":",
"return",
"np",
".",
"datetime64",
"(",
"str",
"(",
"date",
")",
")",
"else",
":",
"date_type",
"=",
"index",
".",
"date_type",
"if",
"isinstance",
"(",
"date",
",",
"date_type",
")",
":",
"return",
"date",
"else",
":",
"if",
"isinstance",
"(",
"date",
",",
"np",
".",
"datetime64",
")",
":",
"# Convert to datetime.date or datetime.datetime object",
"date",
"=",
"date",
".",
"item",
"(",
")",
"if",
"isinstance",
"(",
"date",
",",
"datetime",
".",
"date",
")",
":",
"# Convert to a datetime.datetime object",
"date",
"=",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"date",
",",
"datetime",
".",
"datetime",
".",
"min",
".",
"time",
"(",
")",
")",
"return",
"date_type",
"(",
"date",
".",
"year",
",",
"date",
".",
"month",
",",
"date",
".",
"day",
",",
"date",
".",
"hour",
",",
"date",
".",
"minute",
",",
"date",
".",
"second",
",",
"date",
".",
"microsecond",
")"
] | 36.148936 | 0.000573 |
def measure(self, geometry):
"""Measure the length or the area of a geometry.
:param geometry: The geometry.
:type geometry: QgsGeometry
:return: The geometric size in the expected exposure unit.
:rtype: float
"""
message = 'Size with NaN value : geometry valid={valid}, WKT={wkt}'
feature_size = 0
if geometry.isMultipart():
# Be careful, the size calculator is not working well on a
# multipart.
# So we compute the size part per part. See ticket #3812
for single in geometry.asGeometryCollection():
if self.geometry_type == QgsWkbTypes.LineGeometry:
geometry_size = self.calculator.measureLength(single)
else:
geometry_size = self.calculator.measureArea(single)
if not isnan(geometry_size):
feature_size += geometry_size
else:
LOGGER.debug(message.format(
valid=single.isGeosValid(),
wkt=single.asWkt()))
else:
if self.geometry_type == QgsWkbTypes.LineGeometry:
geometry_size = self.calculator.measureLength(geometry)
else:
geometry_size = self.calculator.measureArea(geometry)
if not isnan(geometry_size):
feature_size = geometry_size
else:
LOGGER.debug(message.format(
valid=geometry.isGeosValid(),
wkt=geometry.asWkt()))
feature_size = round(feature_size)
if self.output_unit:
if self.output_unit != self.default_unit:
feature_size = convert_unit(
feature_size, self.default_unit, self.output_unit)
return feature_size | [
"def",
"measure",
"(",
"self",
",",
"geometry",
")",
":",
"message",
"=",
"'Size with NaN value : geometry valid={valid}, WKT={wkt}'",
"feature_size",
"=",
"0",
"if",
"geometry",
".",
"isMultipart",
"(",
")",
":",
"# Be careful, the size calculator is not working well on a",
"# multipart.",
"# So we compute the size part per part. See ticket #3812",
"for",
"single",
"in",
"geometry",
".",
"asGeometryCollection",
"(",
")",
":",
"if",
"self",
".",
"geometry_type",
"==",
"QgsWkbTypes",
".",
"LineGeometry",
":",
"geometry_size",
"=",
"self",
".",
"calculator",
".",
"measureLength",
"(",
"single",
")",
"else",
":",
"geometry_size",
"=",
"self",
".",
"calculator",
".",
"measureArea",
"(",
"single",
")",
"if",
"not",
"isnan",
"(",
"geometry_size",
")",
":",
"feature_size",
"+=",
"geometry_size",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"message",
".",
"format",
"(",
"valid",
"=",
"single",
".",
"isGeosValid",
"(",
")",
",",
"wkt",
"=",
"single",
".",
"asWkt",
"(",
")",
")",
")",
"else",
":",
"if",
"self",
".",
"geometry_type",
"==",
"QgsWkbTypes",
".",
"LineGeometry",
":",
"geometry_size",
"=",
"self",
".",
"calculator",
".",
"measureLength",
"(",
"geometry",
")",
"else",
":",
"geometry_size",
"=",
"self",
".",
"calculator",
".",
"measureArea",
"(",
"geometry",
")",
"if",
"not",
"isnan",
"(",
"geometry_size",
")",
":",
"feature_size",
"=",
"geometry_size",
"else",
":",
"LOGGER",
".",
"debug",
"(",
"message",
".",
"format",
"(",
"valid",
"=",
"geometry",
".",
"isGeosValid",
"(",
")",
",",
"wkt",
"=",
"geometry",
".",
"asWkt",
"(",
")",
")",
")",
"feature_size",
"=",
"round",
"(",
"feature_size",
")",
"if",
"self",
".",
"output_unit",
":",
"if",
"self",
".",
"output_unit",
"!=",
"self",
".",
"default_unit",
":",
"feature_size",
"=",
"convert_unit",
"(",
"feature_size",
",",
"self",
".",
"default_unit",
",",
"self",
".",
"output_unit",
")",
"return",
"feature_size"
] | 39.73913 | 0.001068 |
def parse_path(path):
"""
Get database name and database schema from path.
:param path: "/"-delimited path, parsed as
"/<database name>/<database schema>"
:return: tuple with (database or None, schema or None)
"""
if path is None:
raise ValueError("path must be a string")
parts = path.strip("/").split("/")
database = unquote_plus(parts[0]) if len(parts) else None
schema = parts[1] if len(parts) > 1 else None
return database, schema | [
"def",
"parse_path",
"(",
"path",
")",
":",
"if",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"path must be a string\"",
")",
"parts",
"=",
"path",
".",
"strip",
"(",
"\"/\"",
")",
".",
"split",
"(",
"\"/\"",
")",
"database",
"=",
"unquote_plus",
"(",
"parts",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"parts",
")",
"else",
"None",
"schema",
"=",
"parts",
"[",
"1",
"]",
"if",
"len",
"(",
"parts",
")",
">",
"1",
"else",
"None",
"return",
"database",
",",
"schema"
] | 28 | 0.002033 |
def getCocktailSum(e0, e1, eCocktail, uCocktail):
"""get the cocktail sum for a given data bin range"""
# get mask and according indices
mask = (eCocktail >= e0) & (eCocktail <= e1)
# data bin range wider than single cocktail bin
if np.any(mask):
idx = getMaskIndices(mask)
# determine coinciding flags
eCl, eCu = eCocktail[idx[0]], eCocktail[idx[1]]
not_coinc_low, not_coinc_upp = (eCl != e0), (eCu != e1)
# get cocktail sum in data bin (always w/o last bin)
uCocktailSum = fsum(uCocktail[mask[:-1]][:-1])
logging.debug(' sum: {}'.format(uCocktailSum))
# get correction for non-coinciding edges
if not_coinc_low:
eCl_bw = eCl - eCocktail[idx[0]-1]
corr_low = (eCl - e0) / eCl_bw
abs_corr_low = float(corr_low) * uCocktail[idx[0]-1]
uCocktailSum += abs_corr_low
logging.debug((' low: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e0, eCl, eCl - e0, eCl_bw, corr_low
)).format(abs_corr_low, uCocktailSum))
if not_coinc_upp:
if idx[1]+1 < len(eCocktail):
eCu_bw = eCocktail[idx[1]+1] - eCu
corr_upp = (e1 - eCu) / eCu_bw
abs_corr_upp = float(corr_upp) * uCocktail[idx[1]]
else:# catch last index (quick fix!)
abs_corr_upp = eCu_bw = corr_upp = 0
uCocktailSum += abs_corr_upp
logging.debug((' upp: %g == %g -> %g (%g) -> %g -> {} -> {}' % (
e1, eCu, e1 - eCu, eCu_bw, corr_upp
)).format(abs_corr_upp, uCocktailSum))
else:
mask = (eCocktail >= e0)
idx = getMaskIndices(mask) # only use first index
# catch if already at last index
if idx[0] == idx[1] and idx[0] == len(eCocktail)-1:
corr = (e1 - e0) / (eCocktail[idx[0]] - eCocktail[idx[0]-1])
uCocktailSum = float(corr) * uCocktail[idx[0]-1]
else: # default case
corr = (e1 - e0) / (eCocktail[idx[0]+1] - eCocktail[idx[0]])
uCocktailSum = float(corr) * uCocktail[idx[0]]
logging.debug(' sum: {}'.format(uCocktailSum))
return uCocktailSum | [
"def",
"getCocktailSum",
"(",
"e0",
",",
"e1",
",",
"eCocktail",
",",
"uCocktail",
")",
":",
"# get mask and according indices",
"mask",
"=",
"(",
"eCocktail",
">=",
"e0",
")",
"&",
"(",
"eCocktail",
"<=",
"e1",
")",
"# data bin range wider than single cocktail bin",
"if",
"np",
".",
"any",
"(",
"mask",
")",
":",
"idx",
"=",
"getMaskIndices",
"(",
"mask",
")",
"# determine coinciding flags",
"eCl",
",",
"eCu",
"=",
"eCocktail",
"[",
"idx",
"[",
"0",
"]",
"]",
",",
"eCocktail",
"[",
"idx",
"[",
"1",
"]",
"]",
"not_coinc_low",
",",
"not_coinc_upp",
"=",
"(",
"eCl",
"!=",
"e0",
")",
",",
"(",
"eCu",
"!=",
"e1",
")",
"# get cocktail sum in data bin (always w/o last bin)",
"uCocktailSum",
"=",
"fsum",
"(",
"uCocktail",
"[",
"mask",
"[",
":",
"-",
"1",
"]",
"]",
"[",
":",
"-",
"1",
"]",
")",
"logging",
".",
"debug",
"(",
"' sum: {}'",
".",
"format",
"(",
"uCocktailSum",
")",
")",
"# get correction for non-coinciding edges",
"if",
"not_coinc_low",
":",
"eCl_bw",
"=",
"eCl",
"-",
"eCocktail",
"[",
"idx",
"[",
"0",
"]",
"-",
"1",
"]",
"corr_low",
"=",
"(",
"eCl",
"-",
"e0",
")",
"/",
"eCl_bw",
"abs_corr_low",
"=",
"float",
"(",
"corr_low",
")",
"*",
"uCocktail",
"[",
"idx",
"[",
"0",
"]",
"-",
"1",
"]",
"uCocktailSum",
"+=",
"abs_corr_low",
"logging",
".",
"debug",
"(",
"(",
"' low: %g == %g -> %g (%g) -> %g -> {} -> {}'",
"%",
"(",
"e0",
",",
"eCl",
",",
"eCl",
"-",
"e0",
",",
"eCl_bw",
",",
"corr_low",
")",
")",
".",
"format",
"(",
"abs_corr_low",
",",
"uCocktailSum",
")",
")",
"if",
"not_coinc_upp",
":",
"if",
"idx",
"[",
"1",
"]",
"+",
"1",
"<",
"len",
"(",
"eCocktail",
")",
":",
"eCu_bw",
"=",
"eCocktail",
"[",
"idx",
"[",
"1",
"]",
"+",
"1",
"]",
"-",
"eCu",
"corr_upp",
"=",
"(",
"e1",
"-",
"eCu",
")",
"/",
"eCu_bw",
"abs_corr_upp",
"=",
"float",
"(",
"corr_upp",
")",
"*",
"uCocktail",
"[",
"idx",
"[",
"1",
"]",
"]",
"else",
":",
"# catch last index (quick fix!)",
"abs_corr_upp",
"=",
"eCu_bw",
"=",
"corr_upp",
"=",
"0",
"uCocktailSum",
"+=",
"abs_corr_upp",
"logging",
".",
"debug",
"(",
"(",
"' upp: %g == %g -> %g (%g) -> %g -> {} -> {}'",
"%",
"(",
"e1",
",",
"eCu",
",",
"e1",
"-",
"eCu",
",",
"eCu_bw",
",",
"corr_upp",
")",
")",
".",
"format",
"(",
"abs_corr_upp",
",",
"uCocktailSum",
")",
")",
"else",
":",
"mask",
"=",
"(",
"eCocktail",
">=",
"e0",
")",
"idx",
"=",
"getMaskIndices",
"(",
"mask",
")",
"# only use first index",
"# catch if already at last index",
"if",
"idx",
"[",
"0",
"]",
"==",
"idx",
"[",
"1",
"]",
"and",
"idx",
"[",
"0",
"]",
"==",
"len",
"(",
"eCocktail",
")",
"-",
"1",
":",
"corr",
"=",
"(",
"e1",
"-",
"e0",
")",
"/",
"(",
"eCocktail",
"[",
"idx",
"[",
"0",
"]",
"]",
"-",
"eCocktail",
"[",
"idx",
"[",
"0",
"]",
"-",
"1",
"]",
")",
"uCocktailSum",
"=",
"float",
"(",
"corr",
")",
"*",
"uCocktail",
"[",
"idx",
"[",
"0",
"]",
"-",
"1",
"]",
"else",
":",
"# default case",
"corr",
"=",
"(",
"e1",
"-",
"e0",
")",
"/",
"(",
"eCocktail",
"[",
"idx",
"[",
"0",
"]",
"+",
"1",
"]",
"-",
"eCocktail",
"[",
"idx",
"[",
"0",
"]",
"]",
")",
"uCocktailSum",
"=",
"float",
"(",
"corr",
")",
"*",
"uCocktail",
"[",
"idx",
"[",
"0",
"]",
"]",
"logging",
".",
"debug",
"(",
"' sum: {}'",
".",
"format",
"(",
"uCocktailSum",
")",
")",
"return",
"uCocktailSum"
] | 43.666667 | 0.011946 |
def compute_actor_handle_id(actor_handle_id, num_forks):
"""Deterministically compute an actor handle ID.
A new actor handle ID is generated when it is forked from another actor
handle. The new handle ID is computed as hash(old_handle_id || num_forks).
Args:
actor_handle_id (common.ObjectID): The original actor handle ID.
num_forks: The number of times the original actor handle has been
forked so far.
Returns:
An ID for the new actor handle.
"""
assert isinstance(actor_handle_id, ActorHandleID)
handle_id_hash = hashlib.sha1()
handle_id_hash.update(actor_handle_id.binary())
handle_id_hash.update(str(num_forks).encode("ascii"))
handle_id = handle_id_hash.digest()
return ActorHandleID(handle_id) | [
"def",
"compute_actor_handle_id",
"(",
"actor_handle_id",
",",
"num_forks",
")",
":",
"assert",
"isinstance",
"(",
"actor_handle_id",
",",
"ActorHandleID",
")",
"handle_id_hash",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"handle_id_hash",
".",
"update",
"(",
"actor_handle_id",
".",
"binary",
"(",
")",
")",
"handle_id_hash",
".",
"update",
"(",
"str",
"(",
"num_forks",
")",
".",
"encode",
"(",
"\"ascii\"",
")",
")",
"handle_id",
"=",
"handle_id_hash",
".",
"digest",
"(",
")",
"return",
"ActorHandleID",
"(",
"handle_id",
")"
] | 38.8 | 0.001258 |
def from_dict(cls: typing.Type[T], dikt) -> T:
"""Returns the dict as a model"""
return util.deserialize_model(dikt, cls) | [
"def",
"from_dict",
"(",
"cls",
":",
"typing",
".",
"Type",
"[",
"T",
"]",
",",
"dikt",
")",
"->",
"T",
":",
"return",
"util",
".",
"deserialize_model",
"(",
"dikt",
",",
"cls",
")"
] | 45 | 0.014599 |
def set_translation(lang):
"""Set the translation used by (some) pywws modules.
This sets the translation object ``pywws.localisation.translation``
to use a particular language.
The ``lang`` parameter can be any string of the form ``en``,
``en_GB`` or ``en_GB.UTF-8``. Anything after a ``.`` character is
ignored. In the case of a string such as ``en_GB``, the routine
will search for an ``en_GB`` language file before searching for an
``en`` one.
:param lang: language code.
:type lang: string
:return: success status.
:rtype: bool
"""
global translation
# make list of possible languages, in order of preference
langs = list()
if lang:
if '.' in lang:
lang = lang.split('.')[0]
langs += [lang, lang[:2]]
# get translation object
path = pkg_resources.resource_filename('pywws', 'lang')
codeset = locale.getpreferredencoding()
if codeset == 'ASCII':
codeset = 'UTF-8'
try:
translation = gettext.translation(
'pywws', path, languages=langs, codeset=codeset)
# Python 3 translations don't have a ugettext method
if not hasattr(translation, 'ugettext'):
translation.ugettext = translation.gettext
except IOError:
return False
return True | [
"def",
"set_translation",
"(",
"lang",
")",
":",
"global",
"translation",
"# make list of possible languages, in order of preference",
"langs",
"=",
"list",
"(",
")",
"if",
"lang",
":",
"if",
"'.'",
"in",
"lang",
":",
"lang",
"=",
"lang",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"langs",
"+=",
"[",
"lang",
",",
"lang",
"[",
":",
"2",
"]",
"]",
"# get translation object",
"path",
"=",
"pkg_resources",
".",
"resource_filename",
"(",
"'pywws'",
",",
"'lang'",
")",
"codeset",
"=",
"locale",
".",
"getpreferredencoding",
"(",
")",
"if",
"codeset",
"==",
"'ASCII'",
":",
"codeset",
"=",
"'UTF-8'",
"try",
":",
"translation",
"=",
"gettext",
".",
"translation",
"(",
"'pywws'",
",",
"path",
",",
"languages",
"=",
"langs",
",",
"codeset",
"=",
"codeset",
")",
"# Python 3 translations don't have a ugettext method",
"if",
"not",
"hasattr",
"(",
"translation",
",",
"'ugettext'",
")",
":",
"translation",
".",
"ugettext",
"=",
"translation",
".",
"gettext",
"except",
"IOError",
":",
"return",
"False",
"return",
"True"
] | 33.025641 | 0.000754 |
def get_operation_type(self, operation_name):
# type: (Optional[str]) -> Optional[str]
"""
Returns the operation type ('query', 'mutation', 'subscription' or None)
for the given operation_name.
If no operation_name is provided (and only one operation exists) it will return the
operation type for that operation
"""
operations_map = self.operations_map
if not operation_name and len(operations_map) == 1:
return next(iter(operations_map.values()))
return operations_map.get(operation_name) | [
"def",
"get_operation_type",
"(",
"self",
",",
"operation_name",
")",
":",
"# type: (Optional[str]) -> Optional[str]",
"operations_map",
"=",
"self",
".",
"operations_map",
"if",
"not",
"operation_name",
"and",
"len",
"(",
"operations_map",
")",
"==",
"1",
":",
"return",
"next",
"(",
"iter",
"(",
"operations_map",
".",
"values",
"(",
")",
")",
")",
"return",
"operations_map",
".",
"get",
"(",
"operation_name",
")"
] | 47.5 | 0.008606 |
def GetIPAddresses(self):
"""Return a list of IP addresses."""
results = []
for address in self.addresses:
human_readable_address = address.human_readable_address
if human_readable_address is not None:
results.append(human_readable_address)
return results | [
"def",
"GetIPAddresses",
"(",
"self",
")",
":",
"results",
"=",
"[",
"]",
"for",
"address",
"in",
"self",
".",
"addresses",
":",
"human_readable_address",
"=",
"address",
".",
"human_readable_address",
"if",
"human_readable_address",
"is",
"not",
"None",
":",
"results",
".",
"append",
"(",
"human_readable_address",
")",
"return",
"results"
] | 31.555556 | 0.010274 |
def getColors_Triad(hue=None, sat = 1, val = 1, spread = 60):
"""
Create a palette with one main color and two opposite color evenly spread apart from the main one.
:param hue: A 0-1 float with the starting hue value.
:param sat: A 0-1 float with the palette saturation.
:param val: A 0-1 float with the palette value.
:param val: An int with the spread in degrees from the opposite color.
:rtype: A list of :py:class:`Color` objects.
"""
palette = list()
if hue==None:
leadHue = randFloat(0, 1)
else:
leadHue = hue
palette.append(Color(0,0,0,1).set_HSV(leadHue, sat, val))
palette.append(Color(0,0,0,1).set_HSV((leadHue + 0.5 + spread/360) % 1, sat, val))
palette.append(Color(0,0,0,1).set_HSV((leadHue + 0.5 - spread/360) % 1, sat, val))
return palette | [
"def",
"getColors_Triad",
"(",
"hue",
"=",
"None",
",",
"sat",
"=",
"1",
",",
"val",
"=",
"1",
",",
"spread",
"=",
"60",
")",
":",
"palette",
"=",
"list",
"(",
")",
"if",
"hue",
"==",
"None",
":",
"leadHue",
"=",
"randFloat",
"(",
"0",
",",
"1",
")",
"else",
":",
"leadHue",
"=",
"hue",
"palette",
".",
"append",
"(",
"Color",
"(",
"0",
",",
"0",
",",
"0",
",",
"1",
")",
".",
"set_HSV",
"(",
"leadHue",
",",
"sat",
",",
"val",
")",
")",
"palette",
".",
"append",
"(",
"Color",
"(",
"0",
",",
"0",
",",
"0",
",",
"1",
")",
".",
"set_HSV",
"(",
"(",
"leadHue",
"+",
"0.5",
"+",
"spread",
"/",
"360",
")",
"%",
"1",
",",
"sat",
",",
"val",
")",
")",
"palette",
".",
"append",
"(",
"Color",
"(",
"0",
",",
"0",
",",
"0",
",",
"1",
")",
".",
"set_HSV",
"(",
"(",
"leadHue",
"+",
"0.5",
"-",
"spread",
"/",
"360",
")",
"%",
"1",
",",
"sat",
",",
"val",
")",
")",
"return",
"palette"
] | 39.894737 | 0.051546 |
def save_license(license_code):
""" Grab license, save to LICENSE/LICENSE.txt file """
desc = _get_license_description(license_code)
fname = "LICENSE"
if sys.platform == "win32":
fname += ".txt" # Windows and file exts
with open(os.path.join(os.getcwd(), fname), "w") as afile:
afile.write(desc) | [
"def",
"save_license",
"(",
"license_code",
")",
":",
"desc",
"=",
"_get_license_description",
"(",
"license_code",
")",
"fname",
"=",
"\"LICENSE\"",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
":",
"fname",
"+=",
"\".txt\"",
"# Windows and file exts",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"fname",
")",
",",
"\"w\"",
")",
"as",
"afile",
":",
"afile",
".",
"write",
"(",
"desc",
")"
] | 38.375 | 0.019108 |
def meta(self, meta):
'''Extract model metadata for lua script stdnet/lib/lua/odm.lua'''
data = meta.as_dict()
data['namespace'] = self.basekey(meta)
return data | [
"def",
"meta",
"(",
"self",
",",
"meta",
")",
":",
"data",
"=",
"meta",
".",
"as_dict",
"(",
")",
"data",
"[",
"'namespace'",
"]",
"=",
"self",
".",
"basekey",
"(",
"meta",
")",
"return",
"data"
] | 38.6 | 0.010152 |
def warnify(self, message, duration=3000, notification_clicked_slot=None, **kwargs):
"""
Displays an Application notification warning.
:param message: Notification message.
:type message: unicode
:param duration: Notification display duration.
:type duration: int
:param notification_clicked_slot: Notification clicked slot.
:type notification_clicked_slot: object
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: Method success.
:rtype: bool
"""
return self.notify(message,
duration,
notification_clicked_slot,
message_level="Warning",
color=QColor(220, 128, 64),
background_color=QColor(32, 32, 32),
border_color=QColor(220, 128, 64),
**kwargs) | [
"def",
"warnify",
"(",
"self",
",",
"message",
",",
"duration",
"=",
"3000",
",",
"notification_clicked_slot",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"notify",
"(",
"message",
",",
"duration",
",",
"notification_clicked_slot",
",",
"message_level",
"=",
"\"Warning\"",
",",
"color",
"=",
"QColor",
"(",
"220",
",",
"128",
",",
"64",
")",
",",
"background_color",
"=",
"QColor",
"(",
"32",
",",
"32",
",",
"32",
")",
",",
"border_color",
"=",
"QColor",
"(",
"220",
",",
"128",
",",
"64",
")",
",",
"*",
"*",
"kwargs",
")"
] | 39.5 | 0.009269 |
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array or IntegerArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray or IntegerArray
NumPy ndarray or IntergerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
# if we are astyping to an existing IntegerDtype we can fastpath
if isinstance(dtype, _IntegerDtype):
result = self._data.astype(dtype.numpy_dtype, copy=False)
return type(self)(result, mask=self._mask, copy=False)
# coerce
data = self._coerce_to_ndarray()
return astype_nansafe(data, dtype, copy=None) | [
"def",
"astype",
"(",
"self",
",",
"dtype",
",",
"copy",
"=",
"True",
")",
":",
"# if we are astyping to an existing IntegerDtype we can fastpath",
"if",
"isinstance",
"(",
"dtype",
",",
"_IntegerDtype",
")",
":",
"result",
"=",
"self",
".",
"_data",
".",
"astype",
"(",
"dtype",
".",
"numpy_dtype",
",",
"copy",
"=",
"False",
")",
"return",
"type",
"(",
"self",
")",
"(",
"result",
",",
"mask",
"=",
"self",
".",
"_mask",
",",
"copy",
"=",
"False",
")",
"# coerce",
"data",
"=",
"self",
".",
"_coerce_to_ndarray",
"(",
")",
"return",
"astype_nansafe",
"(",
"data",
",",
"dtype",
",",
"copy",
"=",
"None",
")"
] | 32.545455 | 0.001808 |
def Nu_cylinder_Perkins_Leppert_1964(Re, Pr, mu=None, muw=None):
r'''Calculates Nusselt number for crossflow across a single tube as shown
in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream
temperature. Recommends a viscosity exponent correction of 0.25, which is
applied only if provided. Also shown in [2]_.
.. math::
Nu = \left[0.31Re^{0.5} + 0.11Re^{0.67}\right]Pr^{0.4}
\left(\frac{\mu}{\mu_w}\right)^{0.25}
Parameters
----------
Re : float
Reynolds number with respect to cylinder diameter, [-]
Pr : float
Prandtl number at free stream temperature, [-]
mu : float, optional
Viscosity of fluid at the free stream temperature [Pa*s]
muw : float, optional
Viscosity of fluid at the wall temperature [Pa*s]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Considers new data since `Nu_cylinder_Perkins_Leppert_1962`, Re from 2E3 to
1.2E5, Pr from 1 to 7, and surface to bulk temperature differences of
11 to 66.
Examples
--------
>>> Nu_cylinder_Perkins_Leppert_1964(6071, 0.7)
53.61767038619986
References
----------
.. [1] Perkins Jr., H. C., and G. Leppert. "Local Heat-Transfer
Coefficients on a Uniformly Heated Cylinder." International Journal of
Heat and Mass Transfer 7, no. 2 (February 1964): 143-158.
doi:10.1016/0017-9310(64)90079-1.
.. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer
from a Circular Cylinder in Crossflow to Air and Liquids." International
Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805.
doi:10.1016/j.ijheatmasstransfer.2004.05.012.
'''
Nu = (0.31*Re**0.5 + 0.11*Re**0.67)*Pr**0.4
if mu and muw:
Nu *= (mu/muw)**0.25
return Nu | [
"def",
"Nu_cylinder_Perkins_Leppert_1964",
"(",
"Re",
",",
"Pr",
",",
"mu",
"=",
"None",
",",
"muw",
"=",
"None",
")",
":",
"Nu",
"=",
"(",
"0.31",
"*",
"Re",
"**",
"0.5",
"+",
"0.11",
"*",
"Re",
"**",
"0.67",
")",
"*",
"Pr",
"**",
"0.4",
"if",
"mu",
"and",
"muw",
":",
"Nu",
"*=",
"(",
"mu",
"/",
"muw",
")",
"**",
"0.25",
"return",
"Nu"
] | 35.692308 | 0.000524 |
def gather_readme(self):
"""
Return the readme file.
"""
if not os.path.exists(self.paths["readme"]):
return ""
return utils.file_to_string(self.paths["readme"]) | [
"def",
"gather_readme",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"paths",
"[",
"\"readme\"",
"]",
")",
":",
"return",
"\"\"",
"return",
"utils",
".",
"file_to_string",
"(",
"self",
".",
"paths",
"[",
"\"readme\"",
"]",
")"
] | 25.875 | 0.009346 |
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) | [
"def",
"register_hook",
"(",
"self",
",",
"event",
",",
"hook",
")",
":",
"if",
"event",
"not",
"in",
"self",
".",
"hooks",
":",
"raise",
"ValueError",
"(",
"'Unsupported event specified, with event name \"%s\"'",
"%",
"(",
"event",
")",
")",
"if",
"isinstance",
"(",
"hook",
",",
"Callable",
")",
":",
"self",
".",
"hooks",
"[",
"event",
"]",
".",
"append",
"(",
"hook",
")",
"elif",
"hasattr",
"(",
"hook",
",",
"'__iter__'",
")",
":",
"self",
".",
"hooks",
"[",
"event",
"]",
".",
"extend",
"(",
"h",
"for",
"h",
"in",
"hook",
"if",
"isinstance",
"(",
"h",
",",
"Callable",
")",
")"
] | 40.1 | 0.009756 |
def verify_space_available(self, search_pattern=r"(\d+) \w+ free"):
"""Verify sufficient space is available on destination file system (return boolean)."""
if self.direction == "put":
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif self.direction == "get":
space_avail = self.local_space_available()
if space_avail > self.file_size:
return True
return False | [
"def",
"verify_space_available",
"(",
"self",
",",
"search_pattern",
"=",
"r\"(\\d+) \\w+ free\"",
")",
":",
"if",
"self",
".",
"direction",
"==",
"\"put\"",
":",
"space_avail",
"=",
"self",
".",
"remote_space_available",
"(",
"search_pattern",
"=",
"search_pattern",
")",
"elif",
"self",
".",
"direction",
"==",
"\"get\"",
":",
"space_avail",
"=",
"self",
".",
"local_space_available",
"(",
")",
"if",
"space_avail",
">",
"self",
".",
"file_size",
":",
"return",
"True",
"return",
"False"
] | 50.555556 | 0.008639 |
def call_api(self, method_type, method_name,
valid_status_codes, resource, data,
uid, **kwargs):
"""
Make HTTP calls.
Args:
method_type: The HTTP method
method_name: The name of the python method making the HTTP call
valid_status_codes: A tuple of integer status codes
deemed acceptable as response statuses
resource: The resource class that will be generated
data: The post data being sent.
uid: The unique identifier of the resource.
Returns:
kwargs is a list of keyword arguments. Additional custom keyword
arguments can be sent into this method and will be passed into
subclass methods:
- get_url
- prepare_http_request
- get_http_headers
"""
url = resource.get_resource_url(
resource, base_url=self.Meta.base_url
)
if method_type in SINGLE_RESOURCE_METHODS:
if not uid and not kwargs:
raise MissingUidException
url = resource.get_url(
url=url, uid=uid, **kwargs)
params = {
'headers': self.get_http_headers(
self.Meta.name, method_name, **kwargs),
'url': url
}
if method_type in ['POST', 'PUT', 'PATCH'] and isinstance(data, dict):
params.update(json=data)
prepared_request = self.prepare_http_request(
method_type, params, **kwargs)
response = self.session.send(prepared_request)
return self._handle_response(response, valid_status_codes, resource) | [
"def",
"call_api",
"(",
"self",
",",
"method_type",
",",
"method_name",
",",
"valid_status_codes",
",",
"resource",
",",
"data",
",",
"uid",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"resource",
".",
"get_resource_url",
"(",
"resource",
",",
"base_url",
"=",
"self",
".",
"Meta",
".",
"base_url",
")",
"if",
"method_type",
"in",
"SINGLE_RESOURCE_METHODS",
":",
"if",
"not",
"uid",
"and",
"not",
"kwargs",
":",
"raise",
"MissingUidException",
"url",
"=",
"resource",
".",
"get_url",
"(",
"url",
"=",
"url",
",",
"uid",
"=",
"uid",
",",
"*",
"*",
"kwargs",
")",
"params",
"=",
"{",
"'headers'",
":",
"self",
".",
"get_http_headers",
"(",
"self",
".",
"Meta",
".",
"name",
",",
"method_name",
",",
"*",
"*",
"kwargs",
")",
",",
"'url'",
":",
"url",
"}",
"if",
"method_type",
"in",
"[",
"'POST'",
",",
"'PUT'",
",",
"'PATCH'",
"]",
"and",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"params",
".",
"update",
"(",
"json",
"=",
"data",
")",
"prepared_request",
"=",
"self",
".",
"prepare_http_request",
"(",
"method_type",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"self",
".",
"session",
".",
"send",
"(",
"prepared_request",
")",
"return",
"self",
".",
"_handle_response",
"(",
"response",
",",
"valid_status_codes",
",",
"resource",
")"
] | 38.255814 | 0.002371 |
def attr_fill_null(args):
"""
Assign the null sentinel value for all entities which do not have a value
for the given attributes.
see gs://broad-institute-gdac/GDAC_FC_NULL for more details
"""
NULL_SENTINEL = "gs://broad-institute-gdac/GDAC_FC_NULL"
attrs = args.attributes
if not attrs:
print("Error: provide at least one attribute to set")
return 1
if 'participant' in attrs or 'samples' in attrs:
print("Error: can't assign null to samples or participant")
return 1
# Set entity attributes
if args.entity_type is not None:
print("Collecting entity data...")
# Get existing attributes
entities = _entity_paginator(args.project, args.workspace,
args.entity_type,
page_size=1000, filter_terms=None,
sort_direction="asc")
# samples need participant_id as well
#TODO: This may need more fixing for other types
orig_attrs = list(attrs)
if args.entity_type == "sample":
attrs.insert(0, "participant_id")
header = "entity:" + args.entity_type + "_id\t" + "\t".join(attrs)
# Book keep the number of updates for each attribute
attr_update_counts = {a : 0 for a in orig_attrs}
# construct new entity data by inserting null sentinel, and counting
# the number of updates
entity_data = []
for entity_dict in entities:
name = entity_dict['name']
etype = entity_dict['entityType']
e_attrs = entity_dict['attributes']
line = name
altered = False
for attr in attrs:
if attr == "participant_id":
line += "\t" + e_attrs['participant']['entityName']
continue # This attribute is never updated by fill_null
if attr not in e_attrs:
altered = True
attr_update_counts[attr] += 1
line += "\t" + str(e_attrs.get(attr, NULL_SENTINEL))
# Improve performance by only updating records that have changed
if altered:
entity_data.append(line)
# Check to see if all entities are being set to null for any attributes
# This is usually a mistake, so warn the user
num_entities = len(entities)
prompt = "Continue? [Y\\n]: "
for attr in orig_attrs:
if num_entities == attr_update_counts[attr]:
message = "WARNING: no {0}s with attribute '{1}'\n".format(
args.entity_type, attr
)
if not args.yes and not _confirm_prompt(message, prompt):
return
# check to see if no sentinels are necessary
if not any(c != 0 for c in itervalues(attr_update_counts)):
print("No null sentinels required, exiting...")
return 0
if args.to_loadfile:
print("Saving loadfile to " + args.to_loadfile)
with open(args.to_loadfile, "w") as f:
f.write(header + '\n')
f.write("\n".join(entity_data))
return 0
updates_table = " count attribute\n"
for attr in sorted(attr_update_counts):
count = attr_update_counts[attr]
updates_table += "{0:>10} {1}\n".format(count, attr)
message = "WARNING: This will insert null sentinels for " \
"these attributes:\n" + updates_table
if not args.yes and not _confirm_prompt(message):
return 0
# Chunk the entities into batches of 500, and upload to FC
print("Batching " + str(len(entity_data)) + " updates to Firecloud...")
chunk_len = 500
total = int(len(entity_data) / chunk_len) + 1
batch = 0
for i in range(0, len(entity_data), chunk_len):
batch += 1
print("Updating samples {0}-{1}, batch {2}/{3}".format(
i+1, min(i+chunk_len, len(entity_data)), batch, total
))
this_data = header + '\n' + '\n'.join(entity_data[i:i+chunk_len])
# Now push the entity data back to firecloud
r = fapi.upload_entities(args.project, args.workspace, this_data)
fapi._check_response_code(r, 200)
return 0
else:
# TODO: set workspace attributes
print("attr_fill_null requires an entity type")
return 1 | [
"def",
"attr_fill_null",
"(",
"args",
")",
":",
"NULL_SENTINEL",
"=",
"\"gs://broad-institute-gdac/GDAC_FC_NULL\"",
"attrs",
"=",
"args",
".",
"attributes",
"if",
"not",
"attrs",
":",
"print",
"(",
"\"Error: provide at least one attribute to set\"",
")",
"return",
"1",
"if",
"'participant'",
"in",
"attrs",
"or",
"'samples'",
"in",
"attrs",
":",
"print",
"(",
"\"Error: can't assign null to samples or participant\"",
")",
"return",
"1",
"# Set entity attributes",
"if",
"args",
".",
"entity_type",
"is",
"not",
"None",
":",
"print",
"(",
"\"Collecting entity data...\"",
")",
"# Get existing attributes",
"entities",
"=",
"_entity_paginator",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
",",
"args",
".",
"entity_type",
",",
"page_size",
"=",
"1000",
",",
"filter_terms",
"=",
"None",
",",
"sort_direction",
"=",
"\"asc\"",
")",
"# samples need participant_id as well",
"#TODO: This may need more fixing for other types",
"orig_attrs",
"=",
"list",
"(",
"attrs",
")",
"if",
"args",
".",
"entity_type",
"==",
"\"sample\"",
":",
"attrs",
".",
"insert",
"(",
"0",
",",
"\"participant_id\"",
")",
"header",
"=",
"\"entity:\"",
"+",
"args",
".",
"entity_type",
"+",
"\"_id\\t\"",
"+",
"\"\\t\"",
".",
"join",
"(",
"attrs",
")",
"# Book keep the number of updates for each attribute",
"attr_update_counts",
"=",
"{",
"a",
":",
"0",
"for",
"a",
"in",
"orig_attrs",
"}",
"# construct new entity data by inserting null sentinel, and counting",
"# the number of updates",
"entity_data",
"=",
"[",
"]",
"for",
"entity_dict",
"in",
"entities",
":",
"name",
"=",
"entity_dict",
"[",
"'name'",
"]",
"etype",
"=",
"entity_dict",
"[",
"'entityType'",
"]",
"e_attrs",
"=",
"entity_dict",
"[",
"'attributes'",
"]",
"line",
"=",
"name",
"altered",
"=",
"False",
"for",
"attr",
"in",
"attrs",
":",
"if",
"attr",
"==",
"\"participant_id\"",
":",
"line",
"+=",
"\"\\t\"",
"+",
"e_attrs",
"[",
"'participant'",
"]",
"[",
"'entityName'",
"]",
"continue",
"# This attribute is never updated by fill_null",
"if",
"attr",
"not",
"in",
"e_attrs",
":",
"altered",
"=",
"True",
"attr_update_counts",
"[",
"attr",
"]",
"+=",
"1",
"line",
"+=",
"\"\\t\"",
"+",
"str",
"(",
"e_attrs",
".",
"get",
"(",
"attr",
",",
"NULL_SENTINEL",
")",
")",
"# Improve performance by only updating records that have changed",
"if",
"altered",
":",
"entity_data",
".",
"append",
"(",
"line",
")",
"# Check to see if all entities are being set to null for any attributes",
"# This is usually a mistake, so warn the user",
"num_entities",
"=",
"len",
"(",
"entities",
")",
"prompt",
"=",
"\"Continue? [Y\\\\n]: \"",
"for",
"attr",
"in",
"orig_attrs",
":",
"if",
"num_entities",
"==",
"attr_update_counts",
"[",
"attr",
"]",
":",
"message",
"=",
"\"WARNING: no {0}s with attribute '{1}'\\n\"",
".",
"format",
"(",
"args",
".",
"entity_type",
",",
"attr",
")",
"if",
"not",
"args",
".",
"yes",
"and",
"not",
"_confirm_prompt",
"(",
"message",
",",
"prompt",
")",
":",
"return",
"# check to see if no sentinels are necessary",
"if",
"not",
"any",
"(",
"c",
"!=",
"0",
"for",
"c",
"in",
"itervalues",
"(",
"attr_update_counts",
")",
")",
":",
"print",
"(",
"\"No null sentinels required, exiting...\"",
")",
"return",
"0",
"if",
"args",
".",
"to_loadfile",
":",
"print",
"(",
"\"Saving loadfile to \"",
"+",
"args",
".",
"to_loadfile",
")",
"with",
"open",
"(",
"args",
".",
"to_loadfile",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"header",
"+",
"'\\n'",
")",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"entity_data",
")",
")",
"return",
"0",
"updates_table",
"=",
"\" count attribute\\n\"",
"for",
"attr",
"in",
"sorted",
"(",
"attr_update_counts",
")",
":",
"count",
"=",
"attr_update_counts",
"[",
"attr",
"]",
"updates_table",
"+=",
"\"{0:>10} {1}\\n\"",
".",
"format",
"(",
"count",
",",
"attr",
")",
"message",
"=",
"\"WARNING: This will insert null sentinels for \"",
"\"these attributes:\\n\"",
"+",
"updates_table",
"if",
"not",
"args",
".",
"yes",
"and",
"not",
"_confirm_prompt",
"(",
"message",
")",
":",
"return",
"0",
"# Chunk the entities into batches of 500, and upload to FC",
"print",
"(",
"\"Batching \"",
"+",
"str",
"(",
"len",
"(",
"entity_data",
")",
")",
"+",
"\" updates to Firecloud...\"",
")",
"chunk_len",
"=",
"500",
"total",
"=",
"int",
"(",
"len",
"(",
"entity_data",
")",
"/",
"chunk_len",
")",
"+",
"1",
"batch",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"entity_data",
")",
",",
"chunk_len",
")",
":",
"batch",
"+=",
"1",
"print",
"(",
"\"Updating samples {0}-{1}, batch {2}/{3}\"",
".",
"format",
"(",
"i",
"+",
"1",
",",
"min",
"(",
"i",
"+",
"chunk_len",
",",
"len",
"(",
"entity_data",
")",
")",
",",
"batch",
",",
"total",
")",
")",
"this_data",
"=",
"header",
"+",
"'\\n'",
"+",
"'\\n'",
".",
"join",
"(",
"entity_data",
"[",
"i",
":",
"i",
"+",
"chunk_len",
"]",
")",
"# Now push the entity data back to firecloud",
"r",
"=",
"fapi",
".",
"upload_entities",
"(",
"args",
".",
"project",
",",
"args",
".",
"workspace",
",",
"this_data",
")",
"fapi",
".",
"_check_response_code",
"(",
"r",
",",
"200",
")",
"return",
"0",
"else",
":",
"# TODO: set workspace attributes",
"print",
"(",
"\"attr_fill_null requires an entity type\"",
")",
"return",
"1"
] | 39.274336 | 0.000879 |
def gen_reaction(args, resource, depletable=0):
"""
Returns a line of text to add to an environment file, initializing a
reaction that uses the resource specified in the first
argument to perform the associated task (resource names are expected to
be of the form "resTASK#" where "TASK" corresponds
to the task the resource is associated with and # is an integer uniquely
identifying that specific gradient resource. For
example, the first AND resource would be named resAND0). An optional
second argument (int) specifies whether or not the reaction
should deplete the resource (by default it will not).
"""
task = resource.lower()
if task[:3] == "res":
task = task[3:]
while task[-1].isdigit():
task = task[:-1]
name = resource[3:]
return "".join(["REACTION ", name, " ", task, " process:resource=",
resource, ":value=", str(args.taskValDict[task]), ":type=",
args.rxnType, ":frac=", str(args.frac), ":max=",
str(args.resMax), ":depletable=", str(int(depletable)),
" requisite:max_count=", str(args.maxCount), "\n"]) | [
"def",
"gen_reaction",
"(",
"args",
",",
"resource",
",",
"depletable",
"=",
"0",
")",
":",
"task",
"=",
"resource",
".",
"lower",
"(",
")",
"if",
"task",
"[",
":",
"3",
"]",
"==",
"\"res\"",
":",
"task",
"=",
"task",
"[",
"3",
":",
"]",
"while",
"task",
"[",
"-",
"1",
"]",
".",
"isdigit",
"(",
")",
":",
"task",
"=",
"task",
"[",
":",
"-",
"1",
"]",
"name",
"=",
"resource",
"[",
"3",
":",
"]",
"return",
"\"\"",
".",
"join",
"(",
"[",
"\"REACTION \"",
",",
"name",
",",
"\" \"",
",",
"task",
",",
"\" process:resource=\"",
",",
"resource",
",",
"\":value=\"",
",",
"str",
"(",
"args",
".",
"taskValDict",
"[",
"task",
"]",
")",
",",
"\":type=\"",
",",
"args",
".",
"rxnType",
",",
"\":frac=\"",
",",
"str",
"(",
"args",
".",
"frac",
")",
",",
"\":max=\"",
",",
"str",
"(",
"args",
".",
"resMax",
")",
",",
"\":depletable=\"",
",",
"str",
"(",
"int",
"(",
"depletable",
")",
")",
",",
"\" requisite:max_count=\"",
",",
"str",
"(",
"args",
".",
"maxCount",
")",
",",
"\"\\n\"",
"]",
")"
] | 48.125 | 0.000849 |
def template_gen(method, lowcut, highcut, samp_rate, filt_order,
length, prepick, swin, process_len=86400,
all_horiz=False, delayed=True, plot=False, debug=0,
return_event=False, min_snr=None, parallel=False,
num_cores=False, save_progress=False, **kwargs):
"""
Generate processed and cut waveforms for use as templates.
:type method: str
:param method:
Template generation method, must be one of ('from_client',
'from_seishub', 'from_sac', 'from_meta_file'). - Each method requires
associated arguments, see note below.
:type lowcut: float
:param lowcut: Low cut (Hz), if set to None will not apply a lowcut.
:type highcut: float
:param highcut: High cut (Hz), if set to None will not apply a highcut.
:type samp_rate: float
:param samp_rate: New sampling rate in Hz.
:type filt_order: int
:param filt_order: Filter level (number of corners).
:type length: float
:param length: Extract length in seconds.
:type prepick: float
:param prepick: Pre-pick time in seconds
:type swin: str
:param swin:
P, S, P_all, S_all or all, defaults to all: see note in
:func:`eqcorrscan.core.template_gen.template_gen`
:type process_len: int
:param process_len: Length of data in seconds to download and process.
:type all_horiz: bool
:param all_horiz: To use both horizontal channels even if there is only \
a pick on one of them. Defaults to False.
:type delayed: bool
:param delayed: If True, each channel will begin relative to it's own \
pick-time, if set to False, each channel will begin at the same time.
:type plot: bool
:param plot: Plot templates or not.
:type debug: int
:param debug: Level of debugging output, higher=more
:type return_event: bool
:param return_event: Whether to return the event and process length or not.
:type min_snr: float
:param min_snr:
Minimum signal-to-noise ratio for a channel to be included in the
template, where signal-to-noise ratio is calculated as the ratio of
the maximum amplitude in the template window to the rms amplitude in
the whole window given.
:type parallel: bool
:param parallel: Whether to process data in parallel or not.
:type num_cores: int
:param num_cores:
Number of cores to try and use, if False and parallel=True, will use
either all your cores, or as many traces as in the data (whichever is
smaller).
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:returns: List of :class:`obspy.core.stream.Stream` Templates
:rtype: list
.. note::
*Method specific arguments:*
- `from_client` requires:
:param str client_id: string passable by obspy to generate Client
:param `obspy.core.event.Catalog` catalog:
Catalog of events to generate template for
:param float data_pad: Pad length for data-downloads in seconds
- `from_seishub` requires:
:param str url: url to seishub database
:param `obspy.core.event.Catalog` catalog:
Catalog of events to generate template for
:param float data_pad: Pad length for data-downloads in seconds
- `from_sac` requires:
:param list sac_files:
osbpy.core.stream.Stream of sac waveforms, or list of paths to
sac waveforms.
- `from_meta_file` requires:
:param str meta_file: Path to obspy-readable event file.
:param `obspy.core.stream.Stream` st:
Stream containing waveform data for template. Note that this
should be the same length of stream as you will use for the
continuous detection, e.g. if you detect in day-long files,
give this a day-long file!
:param bool process:
Whether to process the data or not, defaults to True.
.. note::
process_len should be set to the same length as used when computing
detections using match_filter.match_filter, e.g. if you read
in day-long data for match_filter, process_len should be 86400.
.. rubric:: Example
>>> from obspy.clients.fdsn import Client
>>> from eqcorrscan.core.template_gen import template_gen
>>> client = Client('NCEDC')
>>> catalog = client.get_events(eventid='72572665', includearrivals=True)
>>> # We are only taking two picks for this example to speed up the
>>> # example, note that you don't have to!
>>> catalog[0].picks = catalog[0].picks[0:2]
>>> templates = template_gen(
... method='from_client', catalog=catalog, client_id='NCEDC',
... lowcut=2.0, highcut=9.0, samp_rate=20.0, filt_order=4, length=3.0,
... prepick=0.15, swin='all', process_len=300, all_horiz=True)
>>> templates[0].plot(equal_scale=False, size=(800,600)) # doctest: +SKIP
.. figure:: ../../plots/template_gen.from_client.png
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.core.template_gen import template_gen
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> st = read(TEST_PATH + '/WAV/TEST_/' +
... '2013-09-01-0410-35.DFDPC_024_00')
>>> quakeml = TEST_PATH + '/20130901T041115.xml'
>>> templates = template_gen(
... method='from_meta_file', meta_file=quakeml, st=st, lowcut=2.0,
... highcut=9.0, samp_rate=20.0, filt_order=3, length=2, prepick=0.1,
... swin='S', all_horiz=True)
>>> print(len(templates[0]))
10
>>> templates = template_gen(
... method='from_meta_file', meta_file=quakeml, st=st, lowcut=2.0,
... highcut=9.0, samp_rate=20.0, filt_order=3, length=2, prepick=0.1,
... swin='S_all', all_horiz=True)
>>> print(len(templates[0]))
15
.. rubric:: Example
>>> from eqcorrscan.core.template_gen import template_gen
>>> import glob
>>> # Get all the SAC-files associated with one event.
>>> sac_files = glob.glob(TEST_PATH + '/SAC/2014p611252/*')
>>> templates = template_gen(
... method='from_sac', sac_files=sac_files, lowcut=2.0, highcut=10.0,
... samp_rate=25.0, filt_order=4, length=2.0, swin='all', prepick=0.1,
... all_horiz=True)
>>> print(templates[0][0].stats.sampling_rate)
25.0
>>> print(len(templates[0]))
15
"""
client_map = {'from_client': 'fdsn', 'from_seishub': 'seishub'}
assert method in ('from_client', 'from_seishub', 'from_meta_file',
'from_sac')
if not isinstance(swin, list):
swin = [swin]
process = True
if method in ['from_client', 'from_seishub']:
catalog = kwargs.get('catalog', Catalog())
data_pad = kwargs.get('data_pad', 90)
# Group catalog into days and only download the data once per day
sub_catalogs = _group_events(
catalog=catalog, process_len=process_len, template_length=length,
data_pad=data_pad)
if method == 'from_client':
client = FDSNClient(kwargs.get('client_id', None))
available_stations = []
else:
client = SeisHubClient(kwargs.get('url', None), timeout=10)
available_stations = client.waveform.get_station_ids()
elif method == 'from_meta_file':
if isinstance(kwargs.get('meta_file'), Catalog):
catalog = kwargs.get('meta_file')
elif kwargs.get('meta_file'):
catalog = read_events(kwargs.get('meta_file'))
elif kwargs.get('catalog'):
catalog = kwargs.get('catalog')
sub_catalogs = [catalog]
st = kwargs.get('st', Stream())
process = kwargs.get('process', True)
elif method == 'from_sac':
sac_files = kwargs.get('sac_files')
if isinstance(sac_files, list):
if isinstance(sac_files[0], (Stream, Trace)):
# This is a list of streams...
st = Stream(sac_files[0])
for sac_file in sac_files[1:]:
st += sac_file
else:
sac_files = [read(sac_file)[0] for sac_file in sac_files]
st = Stream(sac_files)
else:
st = sac_files
# Make an event object...
catalog = Catalog([sactoevent(st, debug=debug)])
sub_catalogs = [catalog]
temp_list = []
process_lengths = []
if "P_all" in swin or "S_all" in swin or all_horiz:
all_channels = True
else:
all_channels = False
for sub_catalog in sub_catalogs:
if method in ['from_seishub', 'from_client']:
debug_print("Downloading data", 1, debug)
st = _download_from_client(
client=client, client_type=client_map[method],
catalog=sub_catalog, data_pad=data_pad,
process_len=process_len, available_stations=available_stations,
all_channels=all_channels, debug=debug)
debug_print('Pre-processing data', 0, debug)
st.merge()
if process:
data_len = max([len(tr.data) / tr.stats.sampling_rate
for tr in st])
if 80000 < data_len < 90000:
daylong = True
starttime = min([tr.stats.starttime for tr in st])
min_delta = min([tr.stats.delta for tr in st])
# Cope with the common starttime less than 1 sample before the
# start of day.
if (starttime + min_delta).date > starttime.date:
starttime = (starttime + min_delta)
# Check if this is stupid:
if abs(starttime - UTCDateTime(starttime.date)) > 600:
print(abs(starttime - UTCDateTime(starttime.date)))
daylong = False
starttime = starttime.date
else:
daylong = False
if daylong:
st = pre_processing.dayproc(
st=st, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, samp_rate=samp_rate, debug=debug,
parallel=parallel, starttime=UTCDateTime(starttime),
num_cores=num_cores)
else:
st = pre_processing.shortproc(
st=st, lowcut=lowcut, highcut=highcut,
filt_order=filt_order, parallel=parallel,
samp_rate=samp_rate, debug=debug, num_cores=num_cores)
data_start = min([tr.stats.starttime for tr in st])
data_end = max([tr.stats.endtime for tr in st])
for event in sub_catalog:
stations, channels, st_stachans = ([], [], [])
if len(event.picks) == 0:
debug_print('No picks for event {0}'.format(event.resource_id),
2, debug)
continue
use_event = True
# Check that the event is within the data
for pick in event.picks:
if not data_start < pick.time < data_end:
debug_print("Pick outside of data span:\nPick time %s\n"
"Start time %s\nEnd time: %s" %
(str(pick.time), str(data_start),
str(data_end)), 0, debug)
use_event = False
if not use_event:
debug_print('Event is not within data time-span', 2, debug)
continue
# Read in pick info
debug_print("I have found the following picks", 0, debug)
for pick in event.picks:
if not pick.waveform_id:
debug_print(
'Pick not associated with waveforms, will not use:'
' {0}'.format(pick), 1, debug)
continue
debug_print(pick, 0, debug)
stations.append(pick.waveform_id.station_code)
channels.append(pick.waveform_id.channel_code)
# Check to see if all picks have a corresponding waveform
for tr in st:
st_stachans.append('.'.join([tr.stats.station,
tr.stats.channel]))
# Cut and extract the templates
template = _template_gen(
event.picks, st, length, swin, prepick=prepick, plot=plot,
debug=debug, all_horiz=all_horiz, delayed=delayed,
min_snr=min_snr)
process_lengths.append(len(st[0].data) / samp_rate)
temp_list.append(template)
if save_progress:
if not os.path.isdir("eqcorrscan_temporary_templates"):
os.makedirs("eqcorrscan_temporary_templates")
for template in temp_list:
template.write(
"eqcorrscan_temporary_templates{0}{1}.ms".format(
os.path.sep, template[0].stats.starttime),
format="MSEED")
del st
if return_event:
return temp_list, catalog, process_lengths
return temp_list | [
"def",
"template_gen",
"(",
"method",
",",
"lowcut",
",",
"highcut",
",",
"samp_rate",
",",
"filt_order",
",",
"length",
",",
"prepick",
",",
"swin",
",",
"process_len",
"=",
"86400",
",",
"all_horiz",
"=",
"False",
",",
"delayed",
"=",
"True",
",",
"plot",
"=",
"False",
",",
"debug",
"=",
"0",
",",
"return_event",
"=",
"False",
",",
"min_snr",
"=",
"None",
",",
"parallel",
"=",
"False",
",",
"num_cores",
"=",
"False",
",",
"save_progress",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"client_map",
"=",
"{",
"'from_client'",
":",
"'fdsn'",
",",
"'from_seishub'",
":",
"'seishub'",
"}",
"assert",
"method",
"in",
"(",
"'from_client'",
",",
"'from_seishub'",
",",
"'from_meta_file'",
",",
"'from_sac'",
")",
"if",
"not",
"isinstance",
"(",
"swin",
",",
"list",
")",
":",
"swin",
"=",
"[",
"swin",
"]",
"process",
"=",
"True",
"if",
"method",
"in",
"[",
"'from_client'",
",",
"'from_seishub'",
"]",
":",
"catalog",
"=",
"kwargs",
".",
"get",
"(",
"'catalog'",
",",
"Catalog",
"(",
")",
")",
"data_pad",
"=",
"kwargs",
".",
"get",
"(",
"'data_pad'",
",",
"90",
")",
"# Group catalog into days and only download the data once per day",
"sub_catalogs",
"=",
"_group_events",
"(",
"catalog",
"=",
"catalog",
",",
"process_len",
"=",
"process_len",
",",
"template_length",
"=",
"length",
",",
"data_pad",
"=",
"data_pad",
")",
"if",
"method",
"==",
"'from_client'",
":",
"client",
"=",
"FDSNClient",
"(",
"kwargs",
".",
"get",
"(",
"'client_id'",
",",
"None",
")",
")",
"available_stations",
"=",
"[",
"]",
"else",
":",
"client",
"=",
"SeisHubClient",
"(",
"kwargs",
".",
"get",
"(",
"'url'",
",",
"None",
")",
",",
"timeout",
"=",
"10",
")",
"available_stations",
"=",
"client",
".",
"waveform",
".",
"get_station_ids",
"(",
")",
"elif",
"method",
"==",
"'from_meta_file'",
":",
"if",
"isinstance",
"(",
"kwargs",
".",
"get",
"(",
"'meta_file'",
")",
",",
"Catalog",
")",
":",
"catalog",
"=",
"kwargs",
".",
"get",
"(",
"'meta_file'",
")",
"elif",
"kwargs",
".",
"get",
"(",
"'meta_file'",
")",
":",
"catalog",
"=",
"read_events",
"(",
"kwargs",
".",
"get",
"(",
"'meta_file'",
")",
")",
"elif",
"kwargs",
".",
"get",
"(",
"'catalog'",
")",
":",
"catalog",
"=",
"kwargs",
".",
"get",
"(",
"'catalog'",
")",
"sub_catalogs",
"=",
"[",
"catalog",
"]",
"st",
"=",
"kwargs",
".",
"get",
"(",
"'st'",
",",
"Stream",
"(",
")",
")",
"process",
"=",
"kwargs",
".",
"get",
"(",
"'process'",
",",
"True",
")",
"elif",
"method",
"==",
"'from_sac'",
":",
"sac_files",
"=",
"kwargs",
".",
"get",
"(",
"'sac_files'",
")",
"if",
"isinstance",
"(",
"sac_files",
",",
"list",
")",
":",
"if",
"isinstance",
"(",
"sac_files",
"[",
"0",
"]",
",",
"(",
"Stream",
",",
"Trace",
")",
")",
":",
"# This is a list of streams...",
"st",
"=",
"Stream",
"(",
"sac_files",
"[",
"0",
"]",
")",
"for",
"sac_file",
"in",
"sac_files",
"[",
"1",
":",
"]",
":",
"st",
"+=",
"sac_file",
"else",
":",
"sac_files",
"=",
"[",
"read",
"(",
"sac_file",
")",
"[",
"0",
"]",
"for",
"sac_file",
"in",
"sac_files",
"]",
"st",
"=",
"Stream",
"(",
"sac_files",
")",
"else",
":",
"st",
"=",
"sac_files",
"# Make an event object...",
"catalog",
"=",
"Catalog",
"(",
"[",
"sactoevent",
"(",
"st",
",",
"debug",
"=",
"debug",
")",
"]",
")",
"sub_catalogs",
"=",
"[",
"catalog",
"]",
"temp_list",
"=",
"[",
"]",
"process_lengths",
"=",
"[",
"]",
"if",
"\"P_all\"",
"in",
"swin",
"or",
"\"S_all\"",
"in",
"swin",
"or",
"all_horiz",
":",
"all_channels",
"=",
"True",
"else",
":",
"all_channels",
"=",
"False",
"for",
"sub_catalog",
"in",
"sub_catalogs",
":",
"if",
"method",
"in",
"[",
"'from_seishub'",
",",
"'from_client'",
"]",
":",
"debug_print",
"(",
"\"Downloading data\"",
",",
"1",
",",
"debug",
")",
"st",
"=",
"_download_from_client",
"(",
"client",
"=",
"client",
",",
"client_type",
"=",
"client_map",
"[",
"method",
"]",
",",
"catalog",
"=",
"sub_catalog",
",",
"data_pad",
"=",
"data_pad",
",",
"process_len",
"=",
"process_len",
",",
"available_stations",
"=",
"available_stations",
",",
"all_channels",
"=",
"all_channels",
",",
"debug",
"=",
"debug",
")",
"debug_print",
"(",
"'Pre-processing data'",
",",
"0",
",",
"debug",
")",
"st",
".",
"merge",
"(",
")",
"if",
"process",
":",
"data_len",
"=",
"max",
"(",
"[",
"len",
"(",
"tr",
".",
"data",
")",
"/",
"tr",
".",
"stats",
".",
"sampling_rate",
"for",
"tr",
"in",
"st",
"]",
")",
"if",
"80000",
"<",
"data_len",
"<",
"90000",
":",
"daylong",
"=",
"True",
"starttime",
"=",
"min",
"(",
"[",
"tr",
".",
"stats",
".",
"starttime",
"for",
"tr",
"in",
"st",
"]",
")",
"min_delta",
"=",
"min",
"(",
"[",
"tr",
".",
"stats",
".",
"delta",
"for",
"tr",
"in",
"st",
"]",
")",
"# Cope with the common starttime less than 1 sample before the",
"# start of day.",
"if",
"(",
"starttime",
"+",
"min_delta",
")",
".",
"date",
">",
"starttime",
".",
"date",
":",
"starttime",
"=",
"(",
"starttime",
"+",
"min_delta",
")",
"# Check if this is stupid:",
"if",
"abs",
"(",
"starttime",
"-",
"UTCDateTime",
"(",
"starttime",
".",
"date",
")",
")",
">",
"600",
":",
"print",
"(",
"abs",
"(",
"starttime",
"-",
"UTCDateTime",
"(",
"starttime",
".",
"date",
")",
")",
")",
"daylong",
"=",
"False",
"starttime",
"=",
"starttime",
".",
"date",
"else",
":",
"daylong",
"=",
"False",
"if",
"daylong",
":",
"st",
"=",
"pre_processing",
".",
"dayproc",
"(",
"st",
"=",
"st",
",",
"lowcut",
"=",
"lowcut",
",",
"highcut",
"=",
"highcut",
",",
"filt_order",
"=",
"filt_order",
",",
"samp_rate",
"=",
"samp_rate",
",",
"debug",
"=",
"debug",
",",
"parallel",
"=",
"parallel",
",",
"starttime",
"=",
"UTCDateTime",
"(",
"starttime",
")",
",",
"num_cores",
"=",
"num_cores",
")",
"else",
":",
"st",
"=",
"pre_processing",
".",
"shortproc",
"(",
"st",
"=",
"st",
",",
"lowcut",
"=",
"lowcut",
",",
"highcut",
"=",
"highcut",
",",
"filt_order",
"=",
"filt_order",
",",
"parallel",
"=",
"parallel",
",",
"samp_rate",
"=",
"samp_rate",
",",
"debug",
"=",
"debug",
",",
"num_cores",
"=",
"num_cores",
")",
"data_start",
"=",
"min",
"(",
"[",
"tr",
".",
"stats",
".",
"starttime",
"for",
"tr",
"in",
"st",
"]",
")",
"data_end",
"=",
"max",
"(",
"[",
"tr",
".",
"stats",
".",
"endtime",
"for",
"tr",
"in",
"st",
"]",
")",
"for",
"event",
"in",
"sub_catalog",
":",
"stations",
",",
"channels",
",",
"st_stachans",
"=",
"(",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
")",
"if",
"len",
"(",
"event",
".",
"picks",
")",
"==",
"0",
":",
"debug_print",
"(",
"'No picks for event {0}'",
".",
"format",
"(",
"event",
".",
"resource_id",
")",
",",
"2",
",",
"debug",
")",
"continue",
"use_event",
"=",
"True",
"# Check that the event is within the data",
"for",
"pick",
"in",
"event",
".",
"picks",
":",
"if",
"not",
"data_start",
"<",
"pick",
".",
"time",
"<",
"data_end",
":",
"debug_print",
"(",
"\"Pick outside of data span:\\nPick time %s\\n\"",
"\"Start time %s\\nEnd time: %s\"",
"%",
"(",
"str",
"(",
"pick",
".",
"time",
")",
",",
"str",
"(",
"data_start",
")",
",",
"str",
"(",
"data_end",
")",
")",
",",
"0",
",",
"debug",
")",
"use_event",
"=",
"False",
"if",
"not",
"use_event",
":",
"debug_print",
"(",
"'Event is not within data time-span'",
",",
"2",
",",
"debug",
")",
"continue",
"# Read in pick info",
"debug_print",
"(",
"\"I have found the following picks\"",
",",
"0",
",",
"debug",
")",
"for",
"pick",
"in",
"event",
".",
"picks",
":",
"if",
"not",
"pick",
".",
"waveform_id",
":",
"debug_print",
"(",
"'Pick not associated with waveforms, will not use:'",
"' {0}'",
".",
"format",
"(",
"pick",
")",
",",
"1",
",",
"debug",
")",
"continue",
"debug_print",
"(",
"pick",
",",
"0",
",",
"debug",
")",
"stations",
".",
"append",
"(",
"pick",
".",
"waveform_id",
".",
"station_code",
")",
"channels",
".",
"append",
"(",
"pick",
".",
"waveform_id",
".",
"channel_code",
")",
"# Check to see if all picks have a corresponding waveform",
"for",
"tr",
"in",
"st",
":",
"st_stachans",
".",
"append",
"(",
"'.'",
".",
"join",
"(",
"[",
"tr",
".",
"stats",
".",
"station",
",",
"tr",
".",
"stats",
".",
"channel",
"]",
")",
")",
"# Cut and extract the templates",
"template",
"=",
"_template_gen",
"(",
"event",
".",
"picks",
",",
"st",
",",
"length",
",",
"swin",
",",
"prepick",
"=",
"prepick",
",",
"plot",
"=",
"plot",
",",
"debug",
"=",
"debug",
",",
"all_horiz",
"=",
"all_horiz",
",",
"delayed",
"=",
"delayed",
",",
"min_snr",
"=",
"min_snr",
")",
"process_lengths",
".",
"append",
"(",
"len",
"(",
"st",
"[",
"0",
"]",
".",
"data",
")",
"/",
"samp_rate",
")",
"temp_list",
".",
"append",
"(",
"template",
")",
"if",
"save_progress",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"\"eqcorrscan_temporary_templates\"",
")",
":",
"os",
".",
"makedirs",
"(",
"\"eqcorrscan_temporary_templates\"",
")",
"for",
"template",
"in",
"temp_list",
":",
"template",
".",
"write",
"(",
"\"eqcorrscan_temporary_templates{0}{1}.ms\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"sep",
",",
"template",
"[",
"0",
"]",
".",
"stats",
".",
"starttime",
")",
",",
"format",
"=",
"\"MSEED\"",
")",
"del",
"st",
"if",
"return_event",
":",
"return",
"temp_list",
",",
"catalog",
",",
"process_lengths",
"return",
"temp_list"
] | 44.110368 | 0.000074 |
def new_tag(self, name: str, category: str=None) -> models.Tag:
"""Create a new tag."""
new_tag = self.Tag(name=name, category=category)
return new_tag | [
"def",
"new_tag",
"(",
"self",
",",
"name",
":",
"str",
",",
"category",
":",
"str",
"=",
"None",
")",
"->",
"models",
".",
"Tag",
":",
"new_tag",
"=",
"self",
".",
"Tag",
"(",
"name",
"=",
"name",
",",
"category",
"=",
"category",
")",
"return",
"new_tag"
] | 43 | 0.022857 |
def get_solarposition(self, times, pressure=None, temperature=12,
**kwargs):
"""
Uses the :py:func:`solarposition.get_solarposition` function
to calculate the solar zenith, azimuth, etc. at this location.
Parameters
----------
times : DatetimeIndex
pressure : None, float, or array-like, default None
If None, pressure will be calculated using
:py:func:`atmosphere.alt2pres` and ``self.altitude``.
temperature : None, float, or array-like, default 12
kwargs
passed to :py:func:`solarposition.get_solarposition`
Returns
-------
solar_position : DataFrame
Columns depend on the ``method`` kwarg, but always include
``zenith`` and ``azimuth``.
"""
if pressure is None:
pressure = atmosphere.alt2pres(self.altitude)
return solarposition.get_solarposition(times, latitude=self.latitude,
longitude=self.longitude,
altitude=self.altitude,
pressure=pressure,
temperature=temperature,
**kwargs) | [
"def",
"get_solarposition",
"(",
"self",
",",
"times",
",",
"pressure",
"=",
"None",
",",
"temperature",
"=",
"12",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pressure",
"is",
"None",
":",
"pressure",
"=",
"atmosphere",
".",
"alt2pres",
"(",
"self",
".",
"altitude",
")",
"return",
"solarposition",
".",
"get_solarposition",
"(",
"times",
",",
"latitude",
"=",
"self",
".",
"latitude",
",",
"longitude",
"=",
"self",
".",
"longitude",
",",
"altitude",
"=",
"self",
".",
"altitude",
",",
"pressure",
"=",
"pressure",
",",
"temperature",
"=",
"temperature",
",",
"*",
"*",
"kwargs",
")"
] | 40.96875 | 0.002235 |
def tool(self):
"""The tool that was in use during this event.
If the caller keeps a reference to a tool, the tool object will
compare equal to the previously obtained tool object.
Note:
Physical tool tracking requires hardware support. If unavailable,
libinput creates one tool per type per tablet. See
`Tracking unique tools`_ for more details.
Returns:
~libinput.define.TabletTool: The new tool triggering this event.
"""
htablettool = self._libinput.libinput_event_tablet_tool_get_tool(
self._handle)
return TabletTool(htablettool, self._libinput) | [
"def",
"tool",
"(",
"self",
")",
":",
"htablettool",
"=",
"self",
".",
"_libinput",
".",
"libinput_event_tablet_tool_get_tool",
"(",
"self",
".",
"_handle",
")",
"return",
"TabletTool",
"(",
"htablettool",
",",
"self",
".",
"_libinput",
")"
] | 33.470588 | 0.025641 |
def render_value(self, value, **options):
"""Render value"""
renderer = self.renderers.get(type(value), lambda value, **options: value)
return renderer(value, **options) | [
"def",
"render_value",
"(",
"self",
",",
"value",
",",
"*",
"*",
"options",
")",
":",
"renderer",
"=",
"self",
".",
"renderers",
".",
"get",
"(",
"type",
"(",
"value",
")",
",",
"lambda",
"value",
",",
"*",
"*",
"options",
":",
"value",
")",
"return",
"renderer",
"(",
"value",
",",
"*",
"*",
"options",
")"
] | 47.5 | 0.015544 |
def rm(path, recursive=False):
"""Delete a specified file or directory. This function does not recursively
delete by default.
>>> if rm('/tmp/build', recursive=True):
... print('OK')
OK
"""
try:
if recursive:
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
else:
if os.path.isfile(path):
os.remove(path)
else:
os.rmdir(path)
except OSError as error:
log.error('rm: execute failed: %s (%s)' % (path, error))
return False
return True | [
"def",
"rm",
"(",
"path",
",",
"recursive",
"=",
"False",
")",
":",
"try",
":",
"if",
"recursive",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"os",
".",
"remove",
"(",
"path",
")",
"else",
":",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"else",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"os",
".",
"remove",
"(",
"path",
")",
"else",
":",
"os",
".",
"rmdir",
"(",
"path",
")",
"except",
"OSError",
"as",
"error",
":",
"log",
".",
"error",
"(",
"'rm: execute failed: %s (%s)'",
"%",
"(",
"path",
",",
"error",
")",
")",
"return",
"False",
"return",
"True"
] | 26.913043 | 0.00156 |
def _ldtpize_accessible(self, acc):
"""
Get LDTP format accessibile name
@param acc: Accessible handle
@type acc: object
@return: object type, stripped object name (associated / direct),
associated label
@rtype: tuple
"""
actual_role = self._get_role(acc)
label = self._get_title(acc)
if re.match("AXWindow", actual_role, re.M | re.U | re.L):
# Strip space and new line from window title
strip = r"( |\n)"
else:
# Strip space, colon, dot, underscore and new line from
# all other object types
strip = r"( |:|\.|_|\n)"
if label:
# Return the role type (if, not in the know list of roles,
# return ukn - unknown), strip the above characters from name
# also return labely_by string
label = re.sub(strip, u"", label)
role = abbreviated_roles.get(actual_role, "ukn")
if self._ldtp_debug and role == "ukn":
print(actual_role, acc)
return role, label | [
"def",
"_ldtpize_accessible",
"(",
"self",
",",
"acc",
")",
":",
"actual_role",
"=",
"self",
".",
"_get_role",
"(",
"acc",
")",
"label",
"=",
"self",
".",
"_get_title",
"(",
"acc",
")",
"if",
"re",
".",
"match",
"(",
"\"AXWindow\"",
",",
"actual_role",
",",
"re",
".",
"M",
"|",
"re",
".",
"U",
"|",
"re",
".",
"L",
")",
":",
"# Strip space and new line from window title",
"strip",
"=",
"r\"( |\\n)\"",
"else",
":",
"# Strip space, colon, dot, underscore and new line from",
"# all other object types",
"strip",
"=",
"r\"( |:|\\.|_|\\n)\"",
"if",
"label",
":",
"# Return the role type (if, not in the know list of roles,",
"# return ukn - unknown), strip the above characters from name",
"# also return labely_by string",
"label",
"=",
"re",
".",
"sub",
"(",
"strip",
",",
"u\"\"",
",",
"label",
")",
"role",
"=",
"abbreviated_roles",
".",
"get",
"(",
"actual_role",
",",
"\"ukn\"",
")",
"if",
"self",
".",
"_ldtp_debug",
"and",
"role",
"==",
"\"ukn\"",
":",
"print",
"(",
"actual_role",
",",
"acc",
")",
"return",
"role",
",",
"label"
] | 37.310345 | 0.001802 |
def make_parser(func_sig, description, epilog, add_nos):
'''
Given the signature of a function, create an ArgumentParser
'''
parser = ArgumentParser(description=description, epilog=epilog)
used_char_args = {'h'}
# Arange the params so that single-character arguments are first. This
# esnures they don't have to get --long versions. sorted is stable, so the
# parameters will otherwise still be in relative order.
params = sorted(
func_sig.parameters.values(),
key=lambda param: len(param.name) > 1)
for param in params:
_add_arguments(param, parser, used_char_args, add_nos)
return parser | [
"def",
"make_parser",
"(",
"func_sig",
",",
"description",
",",
"epilog",
",",
"add_nos",
")",
":",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"description",
",",
"epilog",
"=",
"epilog",
")",
"used_char_args",
"=",
"{",
"'h'",
"}",
"# Arange the params so that single-character arguments are first. This",
"# esnures they don't have to get --long versions. sorted is stable, so the",
"# parameters will otherwise still be in relative order.",
"params",
"=",
"sorted",
"(",
"func_sig",
".",
"parameters",
".",
"values",
"(",
")",
",",
"key",
"=",
"lambda",
"param",
":",
"len",
"(",
"param",
".",
"name",
")",
">",
"1",
")",
"for",
"param",
"in",
"params",
":",
"_add_arguments",
"(",
"param",
",",
"parser",
",",
"used_char_args",
",",
"add_nos",
")",
"return",
"parser"
] | 33.842105 | 0.001513 |
def get_grade_systems_by_search(self, grade_system_query, grade_system_search):
"""Pass through to provider GradeSystemSearchSession.get_grade_systems_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_grade_systems_by_search(grade_system_query, grade_system_search) | [
"def",
"get_grade_systems_by_search",
"(",
"self",
",",
"grade_system_query",
",",
"grade_system_search",
")",
":",
"# Implemented from azosid template for -",
"# osid.resource.ResourceSearchSession.get_resources_by_search_template",
"if",
"not",
"self",
".",
"_can",
"(",
"'search'",
")",
":",
"raise",
"PermissionDenied",
"(",
")",
"return",
"self",
".",
"_provider_session",
".",
"get_grade_systems_by_search",
"(",
"grade_system_query",
",",
"grade_system_search",
")"
] | 67.571429 | 0.008351 |
def _misalign_split(self,alns):
"""Requires alignment strings have been set so for each exon we have
query, target and query_quality
_has_quality will specify whether or not the quality is meaningful
"""
total = []
z = 0
for x in alns:
z += 1
exon_num = z
if self._alignment.strand == '-':
exon_num = (len(alns)-z)+1
buffer = {'query':x['query'][0],'target':x['target'][0],'query_quality':x['query_quality'][0],'exon':exon_num}
if buffer['query'] == '-': buffer['nt'] = buffer['target']
elif buffer['target'] == '-': buffer['nt'] = buffer['query']
elif buffer['query'] == buffer['target']: buffer['nt'] = buffer['query']
elif buffer['query'] != buffer['target']: buffer['nt'] = '*'
else:
sys.stderr.write("WARNING unkonwn case\n")
for i in range(1,len(x['query'])):
qchar = x['query'][i]
tchar = x['target'][i]
qualchar = x['query_quality'][i]
if qchar != tchar and (qchar != '-' and tchar != '-'):
#classic mismatch
#print 'mismatch'
#print buffer
total.append(buffer)
buffer = {'query':qchar,'target':tchar,'query_quality':qualchar,'exon':exon_num}
buffer['nt'] = '*'
elif qchar == buffer['nt'] or tchar == buffer['nt']:
# its a homopolymer match
buffer['query'] += qchar
buffer['target'] += tchar
buffer['query_quality'] += qualchar
#print 'homopoly'
else:
#print 'new thing'
#print buffer
total.append(buffer)
buffer = {'query':qchar,'target':tchar,'query_quality':qualchar,'exon':exon_num}
if qchar == '-': buffer['nt'] = tchar
else: buffer['nt'] = qchar
total.append(buffer)
result = [AlignmentErrors.HPAGroup(self,y) for y in total]
return result | [
"def",
"_misalign_split",
"(",
"self",
",",
"alns",
")",
":",
"total",
"=",
"[",
"]",
"z",
"=",
"0",
"for",
"x",
"in",
"alns",
":",
"z",
"+=",
"1",
"exon_num",
"=",
"z",
"if",
"self",
".",
"_alignment",
".",
"strand",
"==",
"'-'",
":",
"exon_num",
"=",
"(",
"len",
"(",
"alns",
")",
"-",
"z",
")",
"+",
"1",
"buffer",
"=",
"{",
"'query'",
":",
"x",
"[",
"'query'",
"]",
"[",
"0",
"]",
",",
"'target'",
":",
"x",
"[",
"'target'",
"]",
"[",
"0",
"]",
",",
"'query_quality'",
":",
"x",
"[",
"'query_quality'",
"]",
"[",
"0",
"]",
",",
"'exon'",
":",
"exon_num",
"}",
"if",
"buffer",
"[",
"'query'",
"]",
"==",
"'-'",
":",
"buffer",
"[",
"'nt'",
"]",
"=",
"buffer",
"[",
"'target'",
"]",
"elif",
"buffer",
"[",
"'target'",
"]",
"==",
"'-'",
":",
"buffer",
"[",
"'nt'",
"]",
"=",
"buffer",
"[",
"'query'",
"]",
"elif",
"buffer",
"[",
"'query'",
"]",
"==",
"buffer",
"[",
"'target'",
"]",
":",
"buffer",
"[",
"'nt'",
"]",
"=",
"buffer",
"[",
"'query'",
"]",
"elif",
"buffer",
"[",
"'query'",
"]",
"!=",
"buffer",
"[",
"'target'",
"]",
":",
"buffer",
"[",
"'nt'",
"]",
"=",
"'*'",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"WARNING unkonwn case\\n\"",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"x",
"[",
"'query'",
"]",
")",
")",
":",
"qchar",
"=",
"x",
"[",
"'query'",
"]",
"[",
"i",
"]",
"tchar",
"=",
"x",
"[",
"'target'",
"]",
"[",
"i",
"]",
"qualchar",
"=",
"x",
"[",
"'query_quality'",
"]",
"[",
"i",
"]",
"if",
"qchar",
"!=",
"tchar",
"and",
"(",
"qchar",
"!=",
"'-'",
"and",
"tchar",
"!=",
"'-'",
")",
":",
"#classic mismatch",
"#print 'mismatch'",
"#print buffer",
"total",
".",
"append",
"(",
"buffer",
")",
"buffer",
"=",
"{",
"'query'",
":",
"qchar",
",",
"'target'",
":",
"tchar",
",",
"'query_quality'",
":",
"qualchar",
",",
"'exon'",
":",
"exon_num",
"}",
"buffer",
"[",
"'nt'",
"]",
"=",
"'*'",
"elif",
"qchar",
"==",
"buffer",
"[",
"'nt'",
"]",
"or",
"tchar",
"==",
"buffer",
"[",
"'nt'",
"]",
":",
"# its a homopolymer match",
"buffer",
"[",
"'query'",
"]",
"+=",
"qchar",
"buffer",
"[",
"'target'",
"]",
"+=",
"tchar",
"buffer",
"[",
"'query_quality'",
"]",
"+=",
"qualchar",
"#print 'homopoly'",
"else",
":",
"#print 'new thing'",
"#print buffer",
"total",
".",
"append",
"(",
"buffer",
")",
"buffer",
"=",
"{",
"'query'",
":",
"qchar",
",",
"'target'",
":",
"tchar",
",",
"'query_quality'",
":",
"qualchar",
",",
"'exon'",
":",
"exon_num",
"}",
"if",
"qchar",
"==",
"'-'",
":",
"buffer",
"[",
"'nt'",
"]",
"=",
"tchar",
"else",
":",
"buffer",
"[",
"'nt'",
"]",
"=",
"qchar",
"total",
".",
"append",
"(",
"buffer",
")",
"result",
"=",
"[",
"AlignmentErrors",
".",
"HPAGroup",
"(",
"self",
",",
"y",
")",
"for",
"y",
"in",
"total",
"]",
"return",
"result"
] | 39.234043 | 0.037566 |
def all(cls, sort=None, limit=None):
"""Returns all objects of this type. Alias for where() (without filter arguments).
See `where` for documentation on the `sort` and `limit` parameters.
"""
return cls.where(sort=sort, limit=limit) | [
"def",
"all",
"(",
"cls",
",",
"sort",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"return",
"cls",
".",
"where",
"(",
"sort",
"=",
"sort",
",",
"limit",
"=",
"limit",
")"
] | 43.333333 | 0.011321 |
def mass_integral(self, x, axis_ratio):
"""Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \
circle"""
r = x * axis_ratio
return 2 * np.pi * r * self.convergence_func(x) | [
"def",
"mass_integral",
"(",
"self",
",",
"x",
",",
"axis_ratio",
")",
":",
"r",
"=",
"x",
"*",
"axis_ratio",
"return",
"2",
"*",
"np",
".",
"pi",
"*",
"r",
"*",
"self",
".",
"convergence_func",
"(",
"x",
")"
] | 51.2 | 0.011538 |
def _fix(node):
"""Fix the naive construction of the adjont.
See `fixes.py` for details.
This function also returns the result of reaching definitions analysis so
that `split` mode can use this to carry over the state from primal to
adjoint.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
node: A module with the primal and adjoint function with additional
variable definitions and such added so that pushes onto the stack and
gradient accumulations are all valid.
defined: The variables defined at the end of the primal.
reaching: The variable definitions that reach the end of the primal.
"""
# Do reaching definitions analysis on primal and adjoint
pri_cfg = cfg.CFG.build_cfg(node.body[0])
defined = cfg.Defined()
defined.visit(pri_cfg.entry)
reaching = cfg.ReachingDefinitions()
reaching.visit(pri_cfg.entry)
cfg.forward(node.body[1], cfg.Defined())
cfg.forward(node.body[1], cfg.ReachingDefinitions())
# Remove pushes of variables that were never defined
fixes.CleanStack().visit(node)
fixes.FixStack().visit(node.body[0])
# Change accumulation into definition if possible
fixes.CleanGrad().visit(node.body[1])
# Define gradients that might or might not be defined
fixes.FixGrad().visit(node.body[1])
return node, defined.exit, reaching.exit | [
"def",
"_fix",
"(",
"node",
")",
":",
"# Do reaching definitions analysis on primal and adjoint",
"pri_cfg",
"=",
"cfg",
".",
"CFG",
".",
"build_cfg",
"(",
"node",
".",
"body",
"[",
"0",
"]",
")",
"defined",
"=",
"cfg",
".",
"Defined",
"(",
")",
"defined",
".",
"visit",
"(",
"pri_cfg",
".",
"entry",
")",
"reaching",
"=",
"cfg",
".",
"ReachingDefinitions",
"(",
")",
"reaching",
".",
"visit",
"(",
"pri_cfg",
".",
"entry",
")",
"cfg",
".",
"forward",
"(",
"node",
".",
"body",
"[",
"1",
"]",
",",
"cfg",
".",
"Defined",
"(",
")",
")",
"cfg",
".",
"forward",
"(",
"node",
".",
"body",
"[",
"1",
"]",
",",
"cfg",
".",
"ReachingDefinitions",
"(",
")",
")",
"# Remove pushes of variables that were never defined",
"fixes",
".",
"CleanStack",
"(",
")",
".",
"visit",
"(",
"node",
")",
"fixes",
".",
"FixStack",
"(",
")",
".",
"visit",
"(",
"node",
".",
"body",
"[",
"0",
"]",
")",
"# Change accumulation into definition if possible",
"fixes",
".",
"CleanGrad",
"(",
")",
".",
"visit",
"(",
"node",
".",
"body",
"[",
"1",
"]",
")",
"# Define gradients that might or might not be defined",
"fixes",
".",
"FixGrad",
"(",
")",
".",
"visit",
"(",
"node",
".",
"body",
"[",
"1",
"]",
")",
"return",
"node",
",",
"defined",
".",
"exit",
",",
"reaching",
".",
"exit"
] | 35 | 0.01283 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.