function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def main(self, folder, field_name, value, as_json=False):
fields = folder.get_fields()
if as_json:
try:
value = json.loads(value)
except ValueError:
raise JirafsError(
"Value '%s' could not be decoded as JSON." % (
value,
)
)
key_dotpath = None
if '.' in field_name:
field_name, key_dotpath = field_name.split('.', 1)
if field_name not in fields:
raise JirafsError("Field '%s' does not exist." % field_name)
if key_dotpath:
data = fields[field_name]
try:
cursor = data
dotpath_parts = key_dotpath.split('.')
last_key = len(dotpath_parts) - 1
for idx, component in enumerate(dotpath_parts):
if idx == last_key:
cursor[component] = value
break
elif not isinstance(cursor.get(component), dict):
raise JirafsError(
"Key '%s' (of dotpath '%s') is not an object "
"in field '%s'." % (
component,
key_dotpath,
field_name,
)
)
else:
if component not in data:
raise JirafsError(
"Key '%s' (of dotpath '%s') could not be found "
"in field '%s'." % (
component,
key_dotpath,
field_name,
)
)
cursor = cursor[component]
except (__HOLE__, TypeError):
raise JirafsError(
"Field '%s' could not be parsed as JSON for retrieving "
"dotpath '%s'." % (
field_name,
key_dotpath,
)
)
value = data
else:
data = value
fields[field_name] = data
fields.write() | ValueError | dataset/ETHPy150Open coddingtonbear/jirafs/jirafs/commands/setfield.py/Command.main |
def parse_cpp_files(test_directory):
"""Parse cpp files looking for snippet marks.
Args:
test_directory: Directory to look for cpp files in.
Returns:
Tuple of file_list and snippet_list where file_list is a list of files that need to be
updated with documentation and snippet_list is a dictionary of lists
indexed by string tokens / identifiers. The lists in the snippet_list dictionary
contain lines of code snippets to be inserted at points identified by the
token that indexes the set of lines.
"""
file_list = []
snippet_list = {}
for path, dirs, files in os.walk(test_directory):
for cpp_file in files:
if not re.match(r'.*\.c(pp|c)$', cpp_file): continue
parse_lines = False
snippet_lines = []
token = ''
md_file = ''
cpp_file_path = os.path.join(path, cpp_file)
try:
with open(cpp_file_path, 'r') as ccfile:
for line in ccfile:
match = CPPDOXYSNIPPETSTART_RE.match(line)
if match:
parse_lines = True
group_dict = match.groupdict()
md_file = group_dict['md_file']
token = group_dict['token']
if md_file not in file_list:
file_list.append(md_file)
elif DOXYSNIPPETEND_RE.match(line):
parse_lines = False
snippet_list[token] = snippet_lines
elif parse_lines:
snippet_lines.append(line)
except __HOLE__ as e:
print 'ERROR: Failed to open file %s: %s' % (cpp_file, e.strerror)
if parse_lines is True:
print 'WARNING: Count not find end of %s. Skipping.' % (token)
return (file_list, snippet_list) | IOError | dataset/ETHPy150Open google/fplutil/docs/update_code_snippets.py/parse_cpp_files |
def update_md_files(md_directory, file_list, snippet_list):
"""Update md files from snippets.
Args:
md_directory: Directory to look for md files in.
snippet_list: Array of snippets to put into the md files.
"""
for md_file in file_list:
path = find_file(md_file, md_directory)
if not path:
print >> sys.stderr, 'WARNING: Cannot find %s, skipping.' % md_file
continue
new_file_handle = tempfile.NamedTemporaryFile(delete=False)
temp_file_name = new_file_handle.name
write_lines = True
try:
with open(path, 'r') as mdfile:
for line in mdfile:
match = MDDOXYSNIPPETSTART_RE.match(line)
if match:
token = match.groupdict()['token']
new_file_handle.write(line)
if snippet_list.has_key(token):
write_lines = False
for snippet_line in snippet_list[token]:
new_file_handle.write(snippet_line)
elif DOXYSNIPPETEND_RE.match(line):
write_lines = True
new_file_handle.write(line)
elif write_lines:
new_file_handle.write(line)
except __HOLE__ as e:
print >> sys.stderr, (
'ERROR: Failed to open file %s: %s' % (md_file, e.strerror))
os.remove(path)
continue
if write_lines is False:
print >> sys.stderr, 'WARNING: Count not find end of %s.' % (token)
new_file_handle.close()
os.remove(path)
shutil.move(temp_file_name, path) | IOError | dataset/ETHPy150Open google/fplutil/docs/update_code_snippets.py/update_md_files |
def create_project(args):
print "Creating your project..."
project_name = args.name
create_dirs = [
project_name,
os.path.join(project_name, 'app/bundles'),
os.path.join(project_name, 'app/config')
]
for d in create_dirs:
try:
os.makedirs(d)
except __HOLE__ as exception:
if exception.errno != errno.EEXIST:
raise
# creating files is :TODO:
create_files = [
]
print "Directory structure created!" | OSError | dataset/ETHPy150Open gengo/decanter/decanter/decanter-admin.py/create_project |
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must point
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity
of the callback by defining the maximum number of
times the callback will be called during the file
transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except __HOLE__:
spos = None
self.read_from_stream = False
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
# Calculate all MD5 checksums on the fly, if not already computed
if not self.base64md5:
m = md5()
else:
m = None
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 3 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 3:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024)/self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
if m:
m.update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
self.size = data_len
if m:
# Use the chunked trailer for the digest
hd = m.hexdigest()
self.md5, self.base64md5 = self.get_md5_from_hexdigest(hd)
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
response = http_conn.getresponse()
body = response.read()
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
return response
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return response
else:
raise provider.storage_response_error(
response.status, response.reason, body)
if not headers:
headers = {}
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if headers.has_key('Content-Encoding'):
self.content_encoding = headers['Content-Encoding']
if headers.has_key('Content-Type'):
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if headers['Content-Type'] is None:
# Delete null Content-Type value to skip sending that header.
del headers['Content-Type']
else:
self.content_type = headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers,
sender=sender,
query_args=query_args)
self.handle_version_headers(resp, force=True) | IOError | dataset/ETHPy150Open darcyliu/storyboard/boto/s3/key.py/Key.send_file |
def lex(self):
# Get lexer for language (use text as fallback)
try:
if self.language and unicode(self.language).lower() <> 'none':
lexer = get_lexer_by_name(self.language.lower(),
**self.custom_args
)
else:
lexer = get_lexer_by_name('text', **self.custom_args)
except __HOLE__:
log.info("no pygments lexer for %s, using 'text'" \
% self.language)
# what happens if pygment isn't present ?
lexer = get_lexer_by_name('text')
return pygments.lex(self.code, lexer) | ValueError | dataset/ETHPy150Open rst2pdf/rst2pdf/rst2pdf/pygments_code_block_directive.py/DocutilsInterface.lex |
def __iter__(self):
"""parse code string and yield "clasified" tokens
"""
try:
tokens = self.lex()
except __HOLE__:
log.info("Pygments lexer not found, using fallback")
# TODO: write message to INFO
yield ('', self.code)
return
for ttype, value in self.join(tokens):
yield (_get_ttype_class(ttype), value)
# code_block_directive
# --------------------
# :: | IOError | dataset/ETHPy150Open rst2pdf/rst2pdf/rst2pdf/pygments_code_block_directive.py/DocutilsInterface.__iter__ |
def code_block_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Parse and classify content of a code_block."""
if 'include' in options:
try:
if 'encoding' in options:
encoding = options['encoding']
else:
encoding = 'utf-8'
content = codecs.open(options['include'], 'r', encoding).read().rstrip()
except (__HOLE__, UnicodeError): # no file or problem finding it or reading it
log.error('Error reading file: "%s" L %s' % (options['include'], lineno))
content = u''
line_offset = 0
if content:
# here we define the start-at and end-at options
# so that limit is included in extraction
# this is different than the start-after directive of docutils
# (docutils/parsers/rst/directives/misc.py L73+)
# which excludes the beginning
# the reason is we want to be able to define a start-at like
# def mymethod(self)
# and have such a definition included
after_text = options.get('start-at', None)
if after_text:
# skip content in include_text before *and NOT incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-at" option of "%s" '
'code-block directive:\nText not found.' % options['start-at'])
# patch mmueller start
# Move the after_index to the beginning of the line with the
# match.
for char in content[after_index:0:-1]:
# codecs always opens binary. This works with '\n', '\r' and
# '\r\n'. We are going backwards, so '\n' is found first
# in '\r\n'.
# Going with .splitlines() seems more appropriate
# but needs a few more changes.
if char == u'\n' or char == u'\r':
break
after_index -= 1
# patch mmueller end
content = content[after_index:]
line_offset = len(content[:after_index].splitlines())
after_text = options.get('start-after', None)
if after_text:
# skip content in include_text before *and incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-after" option of "%s" '
'code-block directive:\nText not found.' % options['start-after'])
line_offset = len(content[:after_index + len(after_text)].splitlines())
content = content[after_index + len(after_text):]
# same changes here for the same reason
before_text = options.get('end-at', None)
if before_text:
# skip content in include_text after *and incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-at" option of "%s" '
'code-block directive:\nText not found.' % options['end-at'])
content = content[:before_index + len(before_text)]
before_text = options.get('end-before', None)
if before_text:
# skip content in include_text after *and NOT incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-before" option of "%s" '
'code-block directive:\nText not found.' % options['end-before'])
content = content[:before_index]
else:
line_offset = options.get('linenos_offset')
content = u'\n'.join(content)
if 'tabsize' in options:
tabw = options['tabsize']
else:
tabw = int(options.get('tab-width', 8))
content = content.replace('\t',' '*tabw)
withln = "linenos" in options
if not "linenos_offset" in options:
line_offset = 0
language = arguments[0]
# create a literal block element and set class argument
code_block = nodes.literal_block(classes=["code", language])
if withln:
lineno = 1 + line_offset
total_lines = content.count('\n') + 1 + line_offset
lnwidth = len(str(total_lines))
fstr = "\n%%%dd " % lnwidth
code_block += nodes.inline(fstr[1:] % lineno, fstr[1:] % lineno, classes=['linenumber'])
# parse content with pygments and add to code_block element
for cls, value in DocutilsInterface(content, language, options):
if withln and "\n" in value:
# Split on the "\n"s
values = value.split("\n")
# The first piece, pass as-is
code_block += nodes.Text(values[0], values[0])
# On the second and later pieces, insert \n and linenos
linenos = range(lineno, lineno + len(values))
for chunk, ln in zip(values, linenos)[1:]:
if ln <= total_lines:
code_block += nodes.inline(fstr % ln, fstr % ln, classes=['linenumber'])
code_block += nodes.Text(chunk, chunk)
lineno += len(values) - 1
elif cls in unstyled_tokens:
# insert as Text to decrease the verbosity of the output.
code_block += nodes.Text(value, value)
else:
code_block += nodes.inline(value, value, classes=["pygments-" + cls])
return [code_block]
# Custom argument validators
# --------------------------
# ::
#
# Move to separated module?? | IOError | dataset/ETHPy150Open rst2pdf/rst2pdf/rst2pdf/pygments_code_block_directive.py/code_block_directive |
def get_form(self, request, name):
try:
ret = self.forms[name]
except __HOLE__:
ret = self.form
if isinstance(ret, basestring):
return import_string(ret)
else:
return ret | KeyError | dataset/ETHPy150Open IanLewis/kay/kay/generics/crud.py/CRUDViewGroup.get_form |
def _normalize_query(uri):
query = uri.query
try:
items = urlparse.parse_qsl(query, keep_blank_values=True, strict_parsing=True)
except __HOLE__:
# If we can't parse the query string, we better preserve it as it was.
return query
# Python sorts are stable, so preserving relative ordering of items with
# the same key doesn't require any work from us
items = sorted(items, key=lambda x: x[0])
# Remove query params that are blacklisted
items = [i for i in items if not _blacklisted_query_param(i[0])]
# Normalise percent-encoding for query items
query = _normalize_queryitems(items)
return query | ValueError | dataset/ETHPy150Open hypothesis/h/h/api/uri.py/_normalize_query |
def apply_updates(self, branch, initial, pin_unpinned, close_stale_prs=False):
InitialUpdateClass = self.req_bundle.get_initial_update_class()
if initial:
# get the list of pending updates
try:
_, _, _, updates = list(
self.req_bundle.get_updates(initial=initial, pin_unpinned=pin_unpinned)
)[0]
except __HOLE__:
# need to catch the index error here in case the intial update is completely
# empty
updates = False
# if this is the initial run and the update list is empty, the repo is already
# up to date. In this case, we create an issue letting the user know that the bot is
# now set up for this repo and return early.
if not updates:
self.create_issue(
title=InitialUpdateClass.get_title(),
body=InitialUpdateClass.get_empty_update_body()
)
return
# check if we have an initial PR open. If this is the case, we attach the initial PR
# to all updates and are done. The `Initial Update` has to be merged (or at least closed)
# before we continue to do anything here.
initial_pr = next(
(pr for pr in self.pull_requests if
pr.title == InitialUpdateClass.get_title() and pr.is_open),
False
)
for title, body, update_branch, updates in self.iter_updates(initial, pin_unpinned):
if initial_pr:
pull_request = initial_pr
elif title not in [pr.title for pr in self.pull_requests]:
pull_request = self.commit_and_pull(
initial=initial,
base_branch=branch,
new_branch=update_branch,
title=title,
body=body,
updates=updates
)
else:
pull_request = next((pr for pr in self.pull_requests if pr.title == title), None)
for update in updates:
update.requirement.pull_request = pull_request
if close_stale_prs and pull_request and not initial:
self.close_stale_prs(update, pull_request) | IndexError | dataset/ETHPy150Open pyupio/pyup/pyup/bot.py/Bot.apply_updates |
def perspective_persist(self, profile='jcli-prod', scope='all'):
try:
if scope in ['all', 'groups']:
# Persist groups configuration
path = '%s/%s.router-groups' % (self.config.store_path, profile)
self.log.info('Persisting current Groups configuration to [%s] profile in %s',
profile, path)
fh = open(path, 'w')
# Write configuration with datetime stamp
fh.write('Persisted on %s [Jasmin %s]\n' % (time.strftime("%c"), jasmin.get_release()))
fh.write(pickle.dumps(self.groups, self.pickleProtocol))
fh.close()
# Set persistance state to True
self.persistenceState['groups'] = True
if scope in ['all', 'users']:
# Persist users configuration
path = '%s/%s.router-users' % (self.config.store_path, profile)
self.log.info('Persisting current Users configuration to [%s] profile in %s',
profile, path)
fh = open(path, 'w')
# Write configuration with datetime stamp
fh.write('Persisted on %s [Jasmin %s]\n' % (time.strftime("%c"), jasmin.get_release()))
fh.write(pickle.dumps(self.users, self.pickleProtocol))
fh.close()
# Set persistance state to True
self.persistenceState['users'] = True
for u in self.users:
u.mt_credential.quotas_updated = False
if scope in ['all', 'moroutes']:
# Persist moroutes configuration
path = '%s/%s.router-moroutes' % (self.config.store_path, profile)
self.log.info('Persisting current MORoutingTable to [%s] profile in %s', profile, path)
fh = open(path, 'w')
# Write configuration with datetime stamp
fh.write('Persisted on %s [Jasmin %s]\n' % (time.strftime("%c"), jasmin.get_release()))
fh.write(pickle.dumps(self.mo_routing_table, self.pickleProtocol))
fh.close()
# Set persistance state to True
self.persistenceState['moroutes'] = True
if scope in ['all', 'mtroutes']:
# Persist mtroutes configuration
path = '%s/%s.router-mtroutes' % (self.config.store_path, profile)
self.log.info('Persisting current MTRoutingTable to [%s] profile in %s', profile, path)
fh = open(path, 'w')
# Write configuration with datetime stamp
fh.write('Persisted on %s [Jasmin %s]\n' % (time.strftime("%c"), jasmin.get_release()))
fh.write(pickle.dumps(self.mt_routing_table, self.pickleProtocol))
fh.close()
# Set persistance state to True
self.persistenceState['mtroutes'] = True
if scope in ['all', 'mointerceptors']:
# Persist mointerceptors configuration
path = '%s/%s.router-mointerceptors' % (self.config.store_path, profile)
self.log.info('Persisting current MOInterceptionTable to [%s] profile in %s',
profile, path)
fh = open(path, 'w')
# Write configuration with datetime stamp
fh.write('Persisted on %s [Jasmin %s]\n' % (time.strftime("%c"), jasmin.get_release()))
fh.write(pickle.dumps(self.mo_interception_table, self.pickleProtocol))
fh.close()
# Set persistance state to True
self.persistenceState['mointerceptors'] = True
if scope in ['all', 'mtinterceptors']:
# Persist mtinterceptors configuration
path = '%s/%s.router-mtinterceptors' % (self.config.store_path, profile)
self.log.info('Persisting current MTInterceptionTable to [%s] profile in %s',
profile, path)
fh = open(path, 'w')
# Write configuration with datetime stamp
fh.write('Persisted on %s [Jasmin %s]\n' % (time.strftime("%c"), jasmin.get_release()))
fh.write(pickle.dumps(self.mt_interception_table, self.pickleProtocol))
fh.close()
# Set persistance state to True
self.persistenceState['mtinterceptors'] = True
except __HOLE__:
self.log.error('Cannot persist to %s', path)
return False
except Exception, e:
self.log.error('Unknown error occurred while persisting configuration: %s', e)
return False
return True | IOError | dataset/ETHPy150Open jookies/jasmin/jasmin/routing/router.py/RouterPB.perspective_persist |
def perspective_load(self, profile='jcli-prod', scope='all'):
try:
if scope in ['all', 'groups']:
# Load groups configuration
path = '%s/%s.router-groups' % (self.config.store_path, profile)
self.log.info('Loading/Activating [%s] profile Groups configuration from %s',
profile, path)
# Load configuration from file
fh = open(path, 'r')
lines = fh.readlines()
fh.close()
# Init migrator
cf = ConfigurationMigrator(context='groups', header=lines[0], data=''.join(lines[1:]))
# Remove current configuration
self.log.info('Removing current Groups (%d)', len(self.groups))
self.perspective_group_remove_all()
# Adding new groups
self.groups = cf.getMigratedData()
self.log.info('Added new Groups (%d)', len(self.groups))
# Set persistance state to True
self.persistenceState['groups'] = True
if scope in ['all', 'users']:
# Load users configuration
path = '%s/%s.router-users' % (self.config.store_path, profile)
self.log.info('Loading/Activating [%s] profile Users configuration from %s',
profile, path)
# Load configuration from file
fh = open(path, 'r')
lines = fh.readlines()
fh.close()
# Init migrator
cf = ConfigurationMigrator(context='users', header=lines[0], data=''.join(lines[1:]))
# Remove current configuration
self.log.info('Removing current Users (%d)', len(self.users))
self.perspective_user_remove_all()
# Adding new users
self.users = cf.getMigratedData()
self.log.info('Added new Users (%d)', len(self.users))
# Set persistance state to True
self.persistenceState['users'] = True
for u in self.users:
u.mt_credential.quotas_updated = False
if scope in ['all', 'mointerceptors']:
# Load mointerceptors configuration
path = '%s/%s.router-mointerceptors' % (self.config.store_path, profile)
self.log.info('Loading/Activating [%s] profile MO Interceptors configuration from %s',
profile, path)
# Load configuration from file
fh = open(path, 'r')
lines = fh.readlines()
fh.close()
# Init migrator
cf = ConfigurationMigrator(context='mointerceptors',
header=lines[0], data=''.join(lines[1:]))
# Adding new MO Interceptors
self.mo_interception_table = cf.getMigratedData()
self.log.info('Added new MOInterceptionTable with %d routes',
len(self.mo_interception_table.getAll()))
# Set persistance state to True
self.persistenceState['mointerceptors'] = True
if scope in ['all', 'mtinterceptors']:
# Load mtinterceptors configuration
path = '%s/%s.router-mtinterceptors' % (self.config.store_path, profile)
self.log.info('Loading/Activating [%s] profile MT Interceptors configuration from %s',
profile, path)
# Load configuration from file
fh = open(path, 'r')
lines = fh.readlines()
fh.close()
# Init migrator
cf = ConfigurationMigrator(context='mtinterceptors',
header=lines[0], data=''.join(lines[1:]))
# Adding new MT Interceptors
self.mt_interception_table = cf.getMigratedData()
self.log.info('Added new MTInterceptionTable with %d routes',
len(self.mt_interception_table.getAll()))
# Set persistance state to True
self.persistenceState['mtinterceptors'] = True
if scope in ['all', 'moroutes']:
# Load moroutes configuration
path = '%s/%s.router-moroutes' % (self.config.store_path, profile)
self.log.info('Loading/Activating [%s] profile MO Routes configuration from %s',
profile, path)
# Load configuration from file
fh = open(path, 'r')
lines = fh.readlines()
fh.close()
# Init migrator
cf = ConfigurationMigrator(context='moroutes',
header=lines[0], data=''.join(lines[1:]))
# Adding new MO Routes
self.mo_routing_table = cf.getMigratedData()
self.log.info('Added new MORoutingTable with %d routes',
len(self.mo_routing_table.getAll()))
# Set persistance state to True
self.persistenceState['moroutes'] = True
if scope in ['all', 'mtroutes']:
# Load mtroutes configuration
path = '%s/%s.router-mtroutes' % (self.config.store_path, profile)
self.log.info('Loading/Activating [%s] profile MT Routes configuration from %s',
profile, path)
# Load configuration from file
fh = open(path, 'r')
lines = fh.readlines()
fh.close()
# Init migrator
cf = ConfigurationMigrator(context='mtroutes',
header=lines[0], data=''.join(lines[1:]))
# Adding new MT Routes
self.mt_routing_table = cf.getMigratedData()
self.log.info('Added new MTRoutingTable with %d routes',
len(self.mt_routing_table.getAll()))
# Set persistance state to True
self.persistenceState['mtroutes'] = True
except __HOLE__, e:
self.log.error('Cannot load configuration from %s: %s', path, str(e))
return False
except Exception, e:
self.log.error('Unknown error occurred while loading configuration: %s', e)
return False
return True | IOError | dataset/ETHPy150Open jookies/jasmin/jasmin/routing/router.py/RouterPB.perspective_load |
def test_listrecursion(self):
x = []
x.append(x)
try:
self.dumps(x)
except __HOLE__:
pass
else:
self.fail("didn't raise ValueError on list recursion")
x = []
y = [x]
x.append(y)
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on alternating list recursion")
y = []
x = [y, y]
# ensure that the marker is cleared
self.dumps(x) | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_json/test_recursion.py/TestRecursion.test_listrecursion |
def test_dictrecursion(self):
x = {}
x["test"] = x
try:
self.dumps(x)
except __HOLE__:
pass
else:
self.fail("didn't raise ValueError on dict recursion")
x = {}
y = {"a": x, "b": x}
# ensure that the marker is cleared
self.dumps(x) | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_json/test_recursion.py/TestRecursion.test_dictrecursion |
def test_defaultrecursion(self):
class RecursiveJSONEncoder(self.json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return pyjson.JSONEncoder.default(o)
enc = RecursiveJSONEncoder()
self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"')
enc.recurse = True
try:
enc.encode(JSONTestObject)
except __HOLE__:
pass
else:
self.fail("didn't raise ValueError on default recursion") | ValueError | dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/test/test_json/test_recursion.py/TestRecursion.test_defaultrecursion |
def __getitem__(self, value):
# The type conversion is required here because in templates Django
# performs a dictionary lookup before the attribute lokups
# (when a dot is encountered).
try:
value = int(value)
except (__HOLE__, ValueError):
# A TypeError says to django to continue with an attribute lookup.
raise TypeError
if 1 <= value <= len(self):
return self._endless_page(value)
raise IndexError('page list index out of range') | TypeError | dataset/ETHPy150Open shtalinberg/django-el-pagination/el_pagination/models.py/PageList.__getitem__ |
def _after_exec(self, conn, clause, multiparams, params, results):
""" SQLAlchemy event hook """
# calculate the query time
end_time = time.time()
start_time = getattr(conn, '_sqltap_query_start_time', end_time)
# get the user's context
context = (None if not self.user_context_fn
else self.user_context_fn(
conn, clause, multiparams, params, results))
try:
text = clause.compile(dialect=conn.engine.dialect)
except __HOLE__:
text = clause
params_dict = self._extract_parameters_from_results(results)
stack = traceback.extract_stack()[:-1]
qstats = QueryStats(text, stack, start_time, end_time,
context, params_dict, results)
self.collect_fn(qstats) | AttributeError | dataset/ETHPy150Open inconshreveable/sqltap/sqltap/sqltap.py/ProfilingSession._after_exec |
def download_all_files(data_folder="{}/astrometry/data".format(os.getenv('PANDIR'))):
download_IERS_A()
for i in range(4214, 4219):
fn = 'index-{}.fits'.format(i)
dest = "{}/{}".format(data_folder, fn)
if not os.path.exists(dest):
url = "http://data.astrometry.net/4200/{}".format(fn)
df = data.download_file(url)
try:
shutil.move(df, dest)
except __HOLE__ as e:
print("Problem saving. (Maybe permissions?): {}".format(e)) | OSError | dataset/ETHPy150Open panoptes/POCS/panoptes/utils/data.py/download_all_files |
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path,'samefile'):
try:
return os.path.samefile(src, dst)
except __HOLE__:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst))) | OSError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/core/files/move.py/_samefile |
def file_move_safe(old_file_name, new_file_name, chunk_size = 1024*64, allow_overwrite=False):
"""
Moves a file from one location to another in the safest way possible.
First, tries ``os.rename``, which is simple but will break across filesystems.
If that fails, streams manually from one file to another in pure Python.
If the destination file exists and ``allow_overwrite`` is ``False``, this
function will throw an ``IOError``.
"""
# There's no reason to move if we don't have to.
if _samefile(old_file_name, new_file_name):
return
try:
os.rename(old_file_name, new_file_name)
return
except __HOLE__:
# This will happen with os.rename if moving to another filesystem
# or when moving opened files on certain operating systems
pass
# first open the old file, so that it won't go away
with open(old_file_name, 'rb') as old_file:
# now open the new file, not forgetting allow_overwrite
fd = os.open(new_file_name, os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) |
(not allow_overwrite and os.O_EXCL or 0))
try:
locks.lock(fd, locks.LOCK_EX)
current_chunk = None
while current_chunk != b'':
current_chunk = old_file.read(chunk_size)
os.write(fd, current_chunk)
finally:
locks.unlock(fd)
os.close(fd)
copystat(old_file_name, new_file_name)
try:
os.remove(old_file_name)
except OSError as e:
# Certain operating systems (Cygwin and Windows)
# fail when deleting opened files, ignore it. (For the
# systems where this happens, temporary files will be auto-deleted
# on close anyway.)
if getattr(e, 'winerror', 0) != 32 and getattr(e, 'errno', 0) != 13:
raise | OSError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/core/files/move.py/file_move_safe |
def try_import(*module_names):
for module_name in module_names:
try:
return import_module(module_name)
except __HOLE__:
continue | ImportError | dataset/ETHPy150Open mbr/flask-appconfig/flask_appconfig/util.py/try_import |
def test_group_protocols(coordinator):
# Requires a subscription
try:
coordinator.group_protocols()
except __HOLE__:
pass
else:
assert False, 'Exception not raised when expected'
coordinator._subscription.subscribe(topics=['foobar'])
assert coordinator.group_protocols() == [
('range', ConsumerProtocolMemberMetadata(
RangePartitionAssignor.version,
['foobar'],
b'')),
('roundrobin', ConsumerProtocolMemberMetadata(
RoundRobinPartitionAssignor.version,
['foobar'],
b'')),
] | AssertionError | dataset/ETHPy150Open dpkp/kafka-python/test/test_coordinator.py/test_group_protocols |
def test_fetch_committed_offsets(mocker, coordinator):
# No partitions, no IO polling
mocker.patch.object(coordinator._client, 'poll')
assert coordinator.fetch_committed_offsets([]) == {}
assert coordinator._client.poll.call_count == 0
# general case -- send offset fetch request, get successful future
mocker.patch.object(coordinator, 'ensure_coordinator_known')
mocker.patch.object(coordinator, '_send_offset_fetch_request',
return_value=Future().success('foobar'))
partitions = [TopicPartition('foobar', 0)]
ret = coordinator.fetch_committed_offsets(partitions)
assert ret == 'foobar'
coordinator._send_offset_fetch_request.assert_called_with(partitions)
assert coordinator._client.poll.call_count == 1
# Failed future is raised if not retriable
coordinator._send_offset_fetch_request.return_value = Future().failure(AssertionError)
coordinator._client.poll.reset_mock()
try:
coordinator.fetch_committed_offsets(partitions)
except __HOLE__:
pass
else:
assert False, 'Exception not raised when expected'
assert coordinator._client.poll.call_count == 1
coordinator._client.poll.reset_mock()
coordinator._send_offset_fetch_request.side_effect = [
Future().failure(Errors.RequestTimedOutError),
Future().success('fizzbuzz')]
ret = coordinator.fetch_committed_offsets(partitions)
assert ret == 'fizzbuzz'
assert coordinator._client.poll.call_count == 2 # call + retry | AssertionError | dataset/ETHPy150Open dpkp/kafka-python/test/test_coordinator.py/test_fetch_committed_offsets |
def test_commit_offsets_sync(mocker, coordinator, offsets):
mocker.patch.object(coordinator, 'ensure_coordinator_known')
mocker.patch.object(coordinator, '_send_offset_commit_request',
return_value=Future().success('fizzbuzz'))
cli = coordinator._client
mocker.patch.object(cli, 'poll')
# No offsets, no calls
assert coordinator.commit_offsets_sync({}) is None
assert coordinator._send_offset_commit_request.call_count == 0
assert cli.poll.call_count == 0
ret = coordinator.commit_offsets_sync(offsets)
assert coordinator._send_offset_commit_request.call_count == 1
assert cli.poll.call_count == 1
assert ret == 'fizzbuzz'
# Failed future is raised if not retriable
coordinator._send_offset_commit_request.return_value = Future().failure(AssertionError)
coordinator._client.poll.reset_mock()
try:
coordinator.commit_offsets_sync(offsets)
except __HOLE__:
pass
else:
assert False, 'Exception not raised when expected'
assert coordinator._client.poll.call_count == 1
coordinator._client.poll.reset_mock()
coordinator._send_offset_commit_request.side_effect = [
Future().failure(Errors.RequestTimedOutError),
Future().success('fizzbuzz')]
ret = coordinator.commit_offsets_sync(offsets)
assert ret == 'fizzbuzz'
assert coordinator._client.poll.call_count == 2 # call + retry | AssertionError | dataset/ETHPy150Open dpkp/kafka-python/test/test_coordinator.py/test_commit_offsets_sync |
def fix_uri(self, text):
"""
Validates a text as URL format, also adjusting necessary stuff for a
clearer URL that will be fetched here.
Code based on Django source:
https://code.djangoproject.com/browser/django/trunk/django/forms/
fields.py?rev=17430#L610
Arguments:
* text: the URL string that is to be validated and fixed
"""
if not text:
raise exception.UrlException(text, _("URL can not be blank"))
try:
url_fields = list(urlparse.urlsplit(text))
except __HOLE__:
raise exception.UrlException(text, _("URL does not seem valid"))
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
try:
url_fields = list(urlparse.urlsplit(
urlparse.urlunsplit(url_fields)))
except ValueError:
raise exception.UrlException(text,
_("URL does not seem valid"))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
return urlparse.urlunsplit(url_fields) | ValueError | dataset/ETHPy150Open brunobraga/termsaver/termsaverlib/screen/helper/urlfetcher.py/URLFetcherHelperBase.fix_uri |
def fetch(self, uri):
"""
Executes the fetch action toward a specified URI. This will also
try to avoid unnecessary calls to the Internet by setting the flag
`__last_fetched`. If it can not fetch again, it will simply return
the `raw` data that was previously created by a previous fetch.
Arguments:
* uri: the path to be fetched
"""
# check if we can fetch again
if self.__last_fetched and not self.raw and \
time.time() - self.__last_fetched < \
constants.Settings.FETCH_INTERVAL_SECONDS:
return self.raw
headers = {'User-Agent': "%s/%s" % (constants.App.NAME,
constants.App.VERSION)}
# separate possible querystring data from plain URL
temp = uri.split('?')
url = temp[0]
if len(temp) > 1: # old style condition for old python compatibility
data = temp[1]
else:
data = None
self.log(_("Connecting to %s ... (this could take a while)") % uri)
# execute URL fetch
req = Request(url, data, headers)
resp = None
try:
resp = urlopen(req)
except __HOLE__, e:
raise exception.UrlException(uri,
_("Fetched URL returned error %d.") % e.code)
except URLError, e:
raise exception.UrlException(uri,
_("Could not fetch URL, because %s") % e.reason)
else:
self.__last_fetched = time.time()
self.raw = resp.read()
# make sure the content is not binary (eg. image)
if self.__is_response_binary(self.raw):
raise exception.UrlException(uri, _("Fetched data is binary."))
finally:
if resp:
resp.close()
return self.raw | HTTPError | dataset/ETHPy150Open brunobraga/termsaver/termsaverlib/screen/helper/urlfetcher.py/URLFetcherHelperBase.fetch |
def save(self, delete_zip_import=True, *args, **kwargs):
"""
If a zip file is uploaded, extract any images from it and add
them to the gallery, before removing the zip file.
"""
super(BaseGallery, self).save(*args, **kwargs)
if self.zip_import:
zip_file = ZipFile(self.zip_import)
for name in zip_file.namelist():
data = zip_file.read(name)
try:
from PIL import Image
image = Image.open(BytesIO(data))
image.load()
image = Image.open(BytesIO(data))
image.verify()
except __HOLE__:
pass
except:
continue
name = os.path.split(name)[1]
# This is a way of getting around the broken nature of
# os.path.join on Python 2.x. See also the comment below.
if isinstance(name, bytes):
encoding = charsetdetect(name)['encoding']
tempname = name.decode(encoding)
else:
tempname = name
# A gallery with a slug of "/" tries to extract files
# to / on disk; see os.path.join docs.
slug = self.slug if self.slug != "/" else ""
path = os.path.join(GALLERIES_UPLOAD_DIR, slug, tempname)
try:
saved_path = default_storage.save(path, ContentFile(data))
except UnicodeEncodeError:
from warnings import warn
warn("A file was saved that contains unicode "
"characters in its path, but somehow the current "
"locale does not support utf-8. You may need to set "
"'LC_ALL' to a correct value, eg: 'en_US.UTF-8'.")
# The native() call is needed here around str because
# os.path.join() in Python 2.x (in posixpath.py)
# mixes byte-strings with unicode strings without
# explicit conversion, which raises a TypeError as it
# would on Python 3.
path = os.path.join(GALLERIES_UPLOAD_DIR, slug,
native(str(name, errors="ignore")))
saved_path = default_storage.save(path, ContentFile(data))
self.images.create(file=saved_path)
if delete_zip_import:
zip_file.close()
self.zip_import.delete(save=True) | ImportError | dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/galleries/models.py/BaseGallery.save |
@register.filter
def field(form, field):
try:
return form[field]
except __HOLE__:
return None | KeyError | dataset/ETHPy150Open nyaruka/smartmin/smartmin/templatetags/smartmin.py/field |
def restart(self, site_id, request):
filepath = MOD_WSGI_FILES.get(site_id)
if filepath:
try:
os.utime(filepath, None)
return True
except __HOLE__:
return False
return False | IOError | dataset/ETHPy150Open ojii/django-server-manager/server_manager/backends/mod_wsgi.py/ModWSGIBackend.restart |
def get_uptime(self, site_id, request):
filepath = MOD_WSGI_FILES.get(site_id)
if filepath:
try:
return datetime.fromtimestamp(os.stat(filepath)[-2])
except __HOLE__:
return None
return None | IOError | dataset/ETHPy150Open ojii/django-server-manager/server_manager/backends/mod_wsgi.py/ModWSGIBackend.get_uptime |
def set_repeated_blocks(parser):
""" helper function to initialize
the internal variable set on the parser.
"""
try:
parser._repeated_blocks
except __HOLE__:
parser._repeated_blocks = {} | AttributeError | dataset/ETHPy150Open nalourie/django-macros/build/lib/macros/templatetags/repeatedblocks.py/set_repeated_blocks |
@register.tag
def repeated_block(parser, token):
try:
tag_name, block_name = token.split_contents()
except __HOLE__:
raise template.TemplateSyntaxError(
'{0} tag takes only one argument'.format(
token.contents.split()[0]))
# initialize attribute storing block contents on parser
set_repeated_blocks(parser)
# do_block is the internal function for creating block tags
block_node = do_block(parser, token)
# store block in parser's attribute
parser._repeated_blocks[block_name] = block_node
# return a normal block node so that it behaves exactly
# as people would expect.
return block_node | ValueError | dataset/ETHPy150Open nalourie/django-macros/build/lib/macros/templatetags/repeatedblocks.py/repeated_block |
@register.tag
def repeat(parser, token):
try:
tag_name, block_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'{0} tag takes only one argument'.format(
token.contents.split()[0]))
# try to fetch the stored block
try:
block_node = parser._repeated_blocks[block_name]
except (__HOLE__, KeyError):
raise template.TemplateSyntaxError(
"No repeated block {0} tag was found before the {1} tag".format(
block_name, tag_name))
# return the block to be repeated
return block_node | AttributeError | dataset/ETHPy150Open nalourie/django-macros/build/lib/macros/templatetags/repeatedblocks.py/repeat |
def month_number_from_arg(month, error):
try:
month_number = Months.options[month]
except __HOLE__:
error(
"Months should be e.g. Jan/January or numbers "
"in range 1 (January) to 12 (Dec), not %s" % month
)
return 1
else:
return month_number | KeyError | dataset/ETHPy150Open sahana/eden/modules/ClimateDataPortal/DSL/Check.py/month_number_from_arg |
def month_filter_number_from_arg(month, error):
try:
month_number = Months.options[month]
except __HOLE__:
error(
"Months should be e.g. PrevDec/PreviousDecember/Jan/January or numbers "
"in range 0 (PreviousDecember) to 12 (Dec), not %s" % month
)
return 1
else:
return month_number | KeyError | dataset/ETHPy150Open sahana/eden/modules/ClimateDataPortal/DSL/Check.py/month_filter_number_from_arg |
@check.implementation(To)
def To_check(to_date):
to_date.errors = []
error = to_date.errors.append
year = to_date.year
if to_date.month is None:
month = 12
else:
month = to_date.month
if to_date.day is None:
day = -1
else:
day = to_date.day
if not isinstance(year, int):
error("Year should be a whole number")
if not isinstance(day, int):
error("Day should be a whole number")
if not (1900 < year < 2500):
error("Year should be in range 1900 to 2500")
month_number = month_number_from_arg(month, error)
if day is -1:
# use last day of month
_, day = calendar.monthrange(year, month_number)
try:
to_date.date = datetime.date(year, month_number, day)
except __HOLE__:
error("Invalid date: datetime.date(%i, %i, %i)" % (year, month_number, day))
return to_date.errors | ValueError | dataset/ETHPy150Open sahana/eden/modules/ClimateDataPortal/DSL/Check.py/To_check |
def get_resultspace_environment(result_space_path, base_env=None, quiet=False, cached=True, strict=True):
"""Get the environemt variables which result from sourcing another catkin
workspace's setup files as the string output of `cmake -E environment`.
This cmake command is used to be as portable as possible.
:param result_space_path: path to a Catkin result-space whose environment should be loaded, ``str``
:type result_space_path: str
:param quiet: don't throw exceptions, ``bool``
:type quiet: bool
:param cached: use the cached environment
:type cached: bool
:param strict: require the ``.catkin`` file exists in the resultspace
:type strict: bool
:returns: a dictionary of environment variables and their values
"""
# Set bae environment to the current environment
if base_env is None:
base_env = dict(os.environ)
# Get the MD5 checksums for the current env hooks
# TODO: the env hooks path should be defined somewhere
env_hooks_path = os.path.join(result_space_path, 'etc', 'catkin', 'profile.d')
if os.path.exists(env_hooks_path):
env_hooks = [
md5(open(os.path.join(env_hooks_path, path)).read().encode('utf-8')).hexdigest()
for path in os.listdir(env_hooks_path)]
else:
env_hooks = []
# Check the cache first, if desired
if cached and result_space_path in _resultspace_env_cache:
(cached_base_env, cached_env_hooks, result_env) = _resultspace_env_cache.get(result_space_path)
if env_hooks == cached_env_hooks and cached_base_env == base_env:
return dict(result_env)
# Check to make sure result_space_path is a valid directory
if not os.path.isdir(result_space_path):
if quiet:
return dict()
raise IOError(
"Cannot load environment from resultspace \"%s\" because it does not "
"exist." % result_space_path
)
# Check to make sure result_space_path contains a `.catkin` file
# TODO: `.catkin` should be defined somewhere as an atom in catkin_pkg
if strict and not os.path.exists(os.path.join(result_space_path, '.catkin')):
if quiet:
return dict()
raise IOError(
"Cannot load environment from resultspace \"%s\" because it does not "
"appear to be a catkin-generated resultspace (missing .catkin marker "
"file)." % result_space_path
)
# Determine the shell to use to source the setup file
shell_path = os.environ.get('SHELL', None)
if shell_path is None:
shell_path = DEFAULT_SHELL
if not os.path.isfile(shell_path):
raise RuntimeError(
"Cannot determine shell executable. "
"The 'SHELL' environment variable is not set and "
"the default '{0}' does not exist.".format(shell_path)
)
(_, shell_name) = os.path.split(shell_path)
# Use fallback shell if using a non-standard shell
if shell_name not in ['bash', 'zsh']:
shell_name = 'bash'
# Check to make sure result_space_path contains the appropriate setup file
setup_file_path = os.path.join(result_space_path, 'env.sh')
if not os.path.exists(setup_file_path):
if quiet:
return dict()
raise IOError(
"Cannot load environment from resultspace \"%s\" because the "
"required setup file \"%s\" does not exist." % (result_space_path, setup_file_path)
)
# Construct a command list which sources the setup file and prints the env to stdout
norc_flags = {
'bash': '--norc',
'zsh': '-f'
}
command = ' '.join([
cmd_quote(setup_file_path),
shell_path,
norc_flags[shell_name],
'-c',
'"typeset -px"'
])
# Define some "blacklisted" environment variables which shouldn't be copied
blacklisted_keys = ('_', 'PWD')
env_dict = {}
try:
# Run the command synchronously to get the resultspace environmnet
if 0:
# NOTE: This sometimes fails to get all output (returns prematurely)
lines = ''
for ret in execute_process(command, cwd=os.getcwd(), env=base_env, emulate_tty=False, shell=True):
if type(ret) is bytes:
ret = ret.decode()
if isinstance(ret, string_type):
lines += ret
else:
p = subprocess.Popen(command, cwd=os.getcwd(), env=base_env, shell=True, stdout=subprocess.PIPE)
lines, _ = p.communicate()
# Extract the environment variables
env_dict = {
k: v
for k, v in parse_env_str(lines).items()
if k not in blacklisted_keys
}
# Check to make sure we got some kind of environment
if len(env_dict) > 0:
# Cache the result
_resultspace_env_cache[result_space_path] = (base_env, env_hooks, env_dict)
else:
print("WARNING: Sourced environment from `{}` has no environment variables. Something is wrong.".format(
setup_file_path))
except __HOLE__ as err:
print("WARNING: Failed to extract environment from resultspace: {}: {}".format(
result_space_path, str(err)),
file=sys.stderr)
return dict(env_dict) | IOError | dataset/ETHPy150Open catkin/catkin_tools/catkin_tools/resultspace.py/get_resultspace_environment |
def load_resultspace_environment(result_space_path, base_env=None, cached=True):
"""Load the environemt variables which result from sourcing another
workspace path into this process's environment.
:param result_space_path: path to a Catkin result-space whose environment should be loaded, ``str``
:type result_space_path: str
:param cached: use the cached environment
:type cached: bool
"""
env_dict = get_resultspace_environment(result_space_path, base_env=base_env, cached=cached)
try:
os.environ.update(env_dict)
except TypeError:
for k, v in env_dict.items():
try:
os.environ.update({k: v.decode()})
except __HOLE__ as err:
print({k: v})
raise err | TypeError | dataset/ETHPy150Open catkin/catkin_tools/catkin_tools/resultspace.py/load_resultspace_environment |
def _CurrentKey(self):
"""Return the current key.
Returns:
str, like 'display_name'
"""
try:
return self._current_key[-1]
except __HOLE__:
if self._current_mode[-1] == 'dict':
# e.g. <string>foo</string> without <key>..</key> before it.
raise MalformedPlistError('Missing key element before value element')
else:
# Undefined error condition, traceback please.
raise | IndexError | dataset/ETHPy150Open google/simian/src/simian/mac/munki/plist.py/ApplePlist._CurrentKey |
def _BinLoadObject(self, ofs=0):
"""Load an object.
Args:
ofs: int, offset in binary
Returns:
any value of object (int, str, etc..)
"""
pos = ofs
if pos in self.__bin:
return self.__bin[pos]
objtype = ord(self._plist_bin[pos]) >> 4
objarg = ord(self._plist_bin[pos]) & 0xf
try:
x = self._type_lookup[objtype](pos, objtype, objarg)
self.__bin[pos] = x
except __HOLE__:
raise MalformedPlistError('Unknown binary objtype %d' % objtype)
except (ValueError, TypeError) as e:
raise MalformedPlistError(
'Binary struct problem offset %d: %s' % (pos, str(e)))
return x | KeyError | dataset/ETHPy150Open google/simian/src/simian/mac/munki/plist.py/ApplePlist._BinLoadObject |
def Equal(self, plist, ignore_keys=None):
"""Checks if a passed plist is the same, ignoring certain keys.
Args:
plist: ApplePlist object.
ignore_keys: optional, sequence, str keys to ignore.
Returns:
Boolean. True if the plist is the same, False otherwise.
Raises:
PlistNotParsedError: the plist was not parsed.
"""
if not hasattr(self, '_plist'):
raise PlistNotParsedError
if not ignore_keys:
return self == plist
for key in plist:
if key in ignore_keys:
continue
try:
if plist[key] != self._plist[key]:
return False
except __HOLE__:
return False
return True | KeyError | dataset/ETHPy150Open google/simian/src/simian/mac/munki/plist.py/ApplePlist.Equal |
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except __HOLE__:
pass
return name | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/creation.py/DatabaseCreation._test_database_name |
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except __HOLE__:
pass
return name | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/creation.py/DatabaseCreation._test_database_user |
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except __HOLE__:
pass
return name | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/creation.py/DatabaseCreation._test_database_passwd |
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except __HOLE__:
pass
return name | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/creation.py/DatabaseCreation._test_database_tblspace |
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except __HOLE__:
pass
return name | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/creation.py/DatabaseCreation._test_database_tblspace_tmp |
def _restore_bytes(self, formatted_size):
if not formatted_size:
return 0
m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size)
if not m:
return 0
units = m.group('units')
try:
exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper())
except __HOLE__:
return 0
size = float(m.group('size'))
return int(size * (1024 ** exponent)) | ValueError | dataset/ETHPy150Open yasoob/youtube-dl-GUI/youtube_dl/extractor/channel9.py/Channel9IE._restore_bytes |
@flow.StateHandler(next_state="CheckHash")
def ReceiveFileHash(self, responses):
"""Add hash digest to tracker and check with filestore."""
# Support old clients which may not have the new client action in place yet.
# TODO(user): Deprecate once all clients have the HashFile action.
if not responses.success and responses.request.request.name == "HashFile":
logging.debug(
"HashFile action not available, falling back to FingerprintFile.")
self.CallClient("FingerprintFile", responses.request.request.payload,
next_state="ReceiveFileHash",
request_data=responses.request_data)
return
index = responses.request_data["index"]
if not responses.success:
self.Log("Failed to hash file: %s", responses.status)
self.state.pending_hashes.pop(index, None)
self.FileFetchFailed(responses.request.request.payload.pathspec,
responses.request.request.name,
request_data=responses.request_data)
return
self.state.files_hashed += 1
response = responses.First()
if response.HasField("hash"):
hash_obj = response.hash
else:
# Deprecate this method of returning hashes.
hash_obj = rdf_crypto.Hash()
if len(response.results) < 1 or response.results[0]["name"] != "generic":
self.Log("Failed to hash file: %s", self.state.indexed_pathspecs[index])
self.state.pending_hashes.pop(index, None)
return
result = response.results[0]
try:
for hash_type in ["md5", "sha1", "sha256"]:
value = result.GetItem(hash_type)
setattr(hash_obj, hash_type, value)
except AttributeError:
self.Log("Failed to hash file: %s", self.state.indexed_pathspecs[index])
self.state.pending_hashes.pop(index, None)
return
try:
tracker = self.state.pending_hashes[index]
except __HOLE__:
# TODO(user): implement a test for this and handle the failure
# gracefully: i.e. maybe we can continue with an empty StatEntry.
self.Error("Couldn't stat the file, but got the hash (%s): %s" %
(utils.SmartStr(index), utils.SmartStr(response.pathspec)))
return
tracker.hash_obj = hash_obj
tracker.bytes_read = response.bytes_read
self.state.files_hashed_since_check += 1
if self.state.files_hashed_since_check >= self.MIN_CALL_TO_FILE_STORE:
self._CheckHashesWithFileStore() | KeyError | dataset/ETHPy150Open google/grr/grr/lib/flows/general/transfer.py/MultiGetFileMixin.ReceiveFileHash |
@flow.StateHandler()
def LoadComponentAfterFlushOldComponent(self, responses):
"""Load the component."""
request_data = responses.request_data
name = request_data["name"]
version = request_data["version"]
next_state = request_data["next_state"]
# Get the component summary.
component_urn = config_lib.CONFIG.Get(
"Config.aff4_root").Add("components").Add("%s_%s" % (name, version))
try:
fd = aff4.FACTORY.Open(component_urn, aff4_type="ComponentObject",
mode="r", token=self.token)
except __HOLE__ as e:
raise IOError("Required component not found: %s" % e)
component_summary = fd.Get(fd.Schema.COMPONENT)
if component_summary is None:
raise RuntimeError("Component %s (%s) does not exist in data store." %
(name, version))
self.CallClient("LoadComponent", summary=component_summary,
next_state="ComponentLoaded", request_data=dict(
next_state=next_state)) | IOError | dataset/ETHPy150Open google/grr/grr/lib/flows/general/transfer.py/LoadComponentMixin.LoadComponentAfterFlushOldComponent |
def laplacian_spectrum(G, weight='weight'):
"""Return eigenvalues of the Laplacian of G
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
laplacian_matrix
"""
try:
import numpy as np
except __HOLE__:
raise ImportError(
"laplacian_spectrum() requires NumPy: http://scipy.org/ ")
return np.linalg.eigvals(nx.laplacian_matrix(G,weight=weight)) | ImportError | dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/linalg/spectrum.py/laplacian_spectrum |
def adjacency_spectrum(G, weight='weight'):
"""Return eigenvalues of the adjacency matrix of G.
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
adjacency_matrix
"""
try:
import numpy as np
except __HOLE__:
raise ImportError(
"adjacency_spectrum() requires NumPy: http://scipy.org/ ")
return np.linalg.eigvals(nx.adjacency_matrix(G,weight=weight))
# fixture for nose tests | ImportError | dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/linalg/spectrum.py/adjacency_spectrum |
def list_related(self, request, pk=None, field_name=None):
"""Fetch related object(s), as if sideloaded (used to support
link objects).
This method gets mapped to `/<resource>/<pk>/<field_name>/` by
DynamicRouter for all DynamicRelationField fields. Generally,
this method probably shouldn't be overridden.
An alternative implementation would be to generate reverse queries.
For an exploration of that approach, see:
https://gist.github.com/ryochiji/54687d675978c7d96503
"""
# Explicitly disable support filtering. Applying filters to this
# endpoint would require us to pass through sideload filters, which
# can have unintended consequences when applied asynchronously.
if self.get_request_feature(self.FILTER):
raise ValidationError(
"Filtering is not enabled on relation endpoints."
)
# Prefix include/exclude filters with field_name so it's scoped to
# the parent object.
field_prefix = field_name + '.'
self._prefix_inex_params(request, self.INCLUDE, field_prefix)
self._prefix_inex_params(request, self.EXCLUDE, field_prefix)
# Filter for parent object, include related field.
self.request.query_params.add('filter{pk}', pk)
self.request.query_params.add('include[]', field_prefix)
# Get serializer and field.
serializer = self.get_serializer()
field = serializer.fields.get(field_name)
if field is None:
raise ValidationError("Unknown field: %s" % field_name)
# Query for root object, with related field prefetched
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
obj = queryset.first()
if not obj:
return Response("Not found", status=404)
# Serialize the related data. Use the field's serializer to ensure
# it's configured identically to the sideload case.
serializer = field.serializer
try:
# TODO(ryo): Probably should use field.get_attribute() but that
# seems to break a bunch of things. Investigate later.
serializer.instance = getattr(obj, field.source)
except __HOLE__:
# See:
# http://jsonapi.org/format/#fetching-relationships-responses-404
# This is a case where the "link URL exists but the relationship
# is empty" and therefore must return a 200.
return Response({}, status=200)
return Response(serializer.data) | ObjectDoesNotExist | dataset/ETHPy150Open AltSchool/dynamic-rest/dynamic_rest/viewsets.py/WithDynamicViewSetMixin.list_related |
def _set_start_end_params(request, query):
format_date_for_mongo = lambda x, datetime: datetime.strptime(
x, '%y_%m_%d_%H_%M_%S').strftime('%Y-%m-%dT%H:%M:%S')
# check for start and end params
if 'start' in request.GET or 'end' in request.GET:
query = json.loads(query) \
if isinstance(query, six.string_types) else query
query[SUBMISSION_TIME] = {}
try:
if request.GET.get('start'):
query[SUBMISSION_TIME]['$gte'] = format_date_for_mongo(
request.GET['start'], datetime)
if request.GET.get('end'):
query[SUBMISSION_TIME]['$lte'] = format_date_for_mongo(
request.GET['end'], datetime)
except __HOLE__:
raise exceptions.ParseError(
_("Dates must be in the format YY_MM_DD_hh_mm_ss")
)
else:
query = json.dumps(query)
return query | ValueError | dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/api/viewsets/xform_viewset.py/_set_start_end_params |
def ScanForFileSystem(self, source_path_spec):
"""Scans the path specification for a supported file system format.
Args:
source_path_spec: the source path specification (instance of
dfvfs.PathSpec).
Returns:
The file system path specification (instance of dfvfs.PathSpec) or None
if no supported file system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one file
system type is found.
"""
try:
type_indicators = analyzer.Analyzer.GetFileSystemTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except __HOLE__ as exception:
raise errors.BackEndError((
u'Unable to process source path specification with error: '
u'{0:s}').format(exception))
if not type_indicators:
return
type_indicator = type_indicators[0]
if len(type_indicators) > 1:
if definitions.PREFERRED_NTFS_BACK_END not in type_indicators:
raise errors.BackEndError(
u'Unsupported source found more than one file system types.')
type_indicator = definitions.PREFERRED_NTFS_BACK_END
# TODO: determine root location from file system or path specification.
if type_indicator == definitions.TYPE_INDICATOR_NTFS:
return path_spec_factory.Factory.NewPathSpec(
type_indicator, location=u'\\', parent=source_path_spec)
return path_spec_factory.Factory.NewPathSpec(
type_indicator, location=u'/', parent=source_path_spec) | RuntimeError | dataset/ETHPy150Open log2timeline/dfvfs/dfvfs/helpers/source_scanner.py/SourceScanner.ScanForFileSystem |
def ScanForStorageMediaImage(self, source_path_spec):
"""Scans the path specification for a supported storage media image format.
Args:
source_path_spec: the source path specification (instance of
dfvfs.PathSpec).
Returns:
The storage media image path specification (instance of dfvfs.PathSpec)
or None if no supported storage media image type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one storage
media image type is found.
"""
try:
type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except __HOLE__ as exception:
raise errors.BackEndError((
u'Unable to process source path specification with error: '
u'{0:s}').format(exception))
if not type_indicators:
# The RAW storage media image type cannot be detected based on
# a signature so we try to detect it based on common file naming schemas.
file_system = resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=self._resolver_context)
raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=source_path_spec)
try:
# The RAW glob function will raise a PathSpecError if the path
# specification is unsuitable for globbing.
glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec)
except errors.PathSpecError:
glob_results = None
if not glob_results:
return
return raw_path_spec
if len(type_indicators) > 1:
raise errors.BackEndError(
u'Unsupported source found more than one storage media image types.')
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], parent=source_path_spec) | RuntimeError | dataset/ETHPy150Open log2timeline/dfvfs/dfvfs/helpers/source_scanner.py/SourceScanner.ScanForStorageMediaImage |
def ScanForVolumeSystem(self, source_path_spec):
"""Scans the path specification for a supported volume system format.
Args:
source_path_spec: the source path specification (instance of
dfvfs.PathSpec).
Returns:
The volume system path specification (instance of dfvfs.PathSpec) or
None if no supported volume system type was found.
Raises:
BackEndError: if the source cannot be scanned or more than one volume
system type is found.
"""
# It is technically possible to scan for VSS-in-VSS but makes no sense
# to do so.
if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW:
return
# Check if we already have a volume system root path specification.
if source_path_spec.type_indicator in (
definitions.VOLUME_SYSTEM_TYPE_INDICATORS):
if getattr(source_path_spec, u'location', None) == u'/':
return source_path_spec
try:
type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators(
source_path_spec, resolver_context=self._resolver_context)
except (__HOLE__, RuntimeError) as exception:
raise errors.BackEndError((
u'Unable to process source path specification with error: '
u'{0:s}').format(exception))
if not type_indicators:
return
if len(type_indicators) > 1:
raise errors.BackEndError(
u'Unsupported source found more than one volume system types.')
if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and
source_path_spec.type_indicator in [
definitions.TYPE_INDICATOR_TSK_PARTITION]):
return
if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS:
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], location=u'/', parent=source_path_spec)
return path_spec_factory.Factory.NewPathSpec(
type_indicators[0], parent=source_path_spec) | IOError | dataset/ETHPy150Open log2timeline/dfvfs/dfvfs/helpers/source_scanner.py/SourceScanner.ScanForVolumeSystem |
def apply_adaptive_noise(computation_graph,
cost,
variables,
num_examples,
parameters=None,
init_sigma=1e-6,
model_cost_coefficient=1.0,
seed=None,
gradients=None,
):
"""Add adaptive noise to parameters of a model.
Each of the given variables will be replaced by a normal
distribution with learned mean and standard deviation.
A model cost is computed based on the precision of the the distributions
associated with each variable. It is added to the given cost used to
train the model.
See: A. Graves "Practical Variational Inference for Neural Networks",
NIPS 2011
Parameters
----------
computation_graph : instance of :class:`ComputationGraph`
The computation graph.
cost : :class:`~tensor.TensorVariable`
The cost without weight noise. It should be a member of the
computation_graph.
variables : :class:`~tensor.TensorVariable`
Variables to add noise to.
num_examples : int
Number of training examples. The cost of the model is divided by
the number of training examples, please see
A. Graves "Practical Variational Inference for Neural Networks"
for justification
parameters : list of :class:`~tensor.TensorVariable`
parameters of the model, if gradients are given the list will not
be used. Otherwise, it will be used to compute the gradients
init_sigma : float,
initial standard deviation of noise variables
model_cost_coefficient : float,
the weight of the model cost
seed : int, optional
The seed with which
:class:`~theano.sandbox.rng_mrg.MRG_RandomStreams` is initialized,
is set to 1 by default.
gradients : dict, optional
Adaptive weight noise introduces new parameters for which new cost
and gradients must be computed. Unless the gradients paramter is
given, it will use theano.grad to get the gradients
Returns
-------
cost : :class:`~tensor.TensorVariable`
The new cost
computation_graph : instance of :class:`ComputationGraph`
new graph with added noise.
gradients : dict
a dictionary of gradients for all parameters: the original ones
and the adaptive noise ones
noise_brick : :class:~lvsr.graph.NoiseBrick
the brick that holds all noise parameters and whose .apply method
can be used to find variables added by adaptive noise
"""
if not seed:
seed = config.default_seed
rng = MRG_RandomStreams(seed)
try:
cost_index = computation_graph.outputs.index(cost)
except __HOLE__:
raise ValueError("cost is not part of the computation_graph")
if gradients is None:
if parameters is None:
raise ValueError("Either gradients or parameters must be given")
logger.info("Taking the cost gradient")
gradients = dict(equizip(parameters,
tensor.grad(cost, parameters)))
else:
if parameters is not None:
logger.warn("Both gradients and parameters given, will ignore"
"parameters")
parameters = gradients.keys()
gradients = OrderedDict(gradients)
log_sigma_scale = 2048.0
P_noisy = variables # We will add noise to these
Beta = [] # will hold means, log_stdev and stdevs
P_with_noise = [] # will hold parames with added noise
# These don't change
P_clean = list(set(parameters).difference(P_noisy))
noise_brick = NoiseBrick()
for p in P_noisy:
p_u = p
p_val = p.get_value(borrow=True)
p_ls2 = theano.shared((numpy.zeros_like(p_val) +
numpy.log(init_sigma) * 2. / log_sigma_scale
).astype(dtype=numpy.float32))
p_ls2.name = __get_name(p_u)
noise_brick.parameters.append(p_ls2)
p_s2 = tensor.exp(p_ls2 * log_sigma_scale)
Beta.append((p_u, p_ls2, p_s2))
p_noisy = p_u + rng.normal(size=p_val.shape) * tensor.sqrt(p_s2)
p_noisy = tensor.patternbroadcast(p_noisy, p.type.broadcastable)
P_with_noise.append(p_noisy)
# compute the prior mean and variation
temp_sum = 0.0
temp_param_count = 0.0
for p_u, unused_p_ls2, unused_p_s2 in Beta:
temp_sum = temp_sum + p_u.sum()
temp_param_count = temp_param_count + p_u.shape.prod()
prior_u = tensor.cast(temp_sum / temp_param_count, 'float32')
temp_sum = 0.0
for p_u, unused_ls2, p_s2 in Beta:
temp_sum = temp_sum + (p_s2).sum() + (((p_u-prior_u)**2).sum())
prior_s2 = tensor.cast(temp_sum/temp_param_count, 'float32')
# convert everything to use the noisy parameters
full_computation_graph = ComputationGraph(computation_graph.outputs +
gradients.values())
full_computation_graph = full_computation_graph.replace(
dict(zip(P_noisy, P_with_noise)))
LC = 0.0 # model cost
for p_u, p_ls2, p_s2 in Beta:
LC = (LC +
0.5 * ((tensor.log(prior_s2) - p_ls2 * log_sigma_scale).sum()) +
1.0 / (2.0 * prior_s2) * (((p_u - prior_u)**2) + p_s2 - prior_s2
).sum()
)
LC = LC / num_examples * model_cost_coefficient
train_cost = noise_brick.apply(
full_computation_graph.outputs[cost_index].copy(), LC,
prior_u, prior_s2)
gradients = OrderedDict(
zip(gradients.keys(),
full_computation_graph.outputs[-len(gradients):]))
#
# Delete the gradients form the computational graph
#
del full_computation_graph.outputs[-len(gradients):]
new_grads = {p: gradients.pop(p) for p in P_clean}
#
# Warning!!!
# This only works for batch size 1 (we want that the sum of squares
# be the square of the sum!
#
diag_hessian_estimate = {p: g**2 for p, g in gradients.iteritems()}
for p_u, p_ls2, p_s2 in Beta:
p_grad = gradients[p_u]
p_u_grad = (model_cost_coefficient * (p_u - prior_u) /
(num_examples*prior_s2) + p_grad)
p_ls2_grad = (numpy.float32(model_cost_coefficient *
0.5 / num_examples * log_sigma_scale) *
(p_s2/prior_s2 - 1.0) +
(0.5*log_sigma_scale) * p_s2 * diag_hessian_estimate[p_u]
)
new_grads[p_u] = p_u_grad
new_grads[p_ls2] = p_ls2_grad
return train_cost, full_computation_graph, new_grads, noise_brick | ValueError | dataset/ETHPy150Open rizar/attention-lvcsr/lvsr/graph.py/apply_adaptive_noise |
def serialWriteEvent(self):
try:
dataToWrite = self.outQueue.pop(0)
except __HOLE__:
self.writeInProgress = 0
return
else:
win32file.WriteFile(self._serial.hComPort, dataToWrite, self._overlappedWrite) | IndexError | dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/internet/_win32serialport.py/SerialPort.serialWriteEvent |
def run_and_read(view, cmd):
out, err = subprocess.Popen([cmd],
stdout=PIPE,
stderr=PIPE,
shell=True).communicate()
try:
return (out or err).decode('utf-8')
except __HOLE__:
return '' | AttributeError | dataset/ETHPy150Open guillermooo/Vintageous/ex/plat/linux.py/run_and_read |
def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, gap_chars):
substitution_score = 0
for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
if aln1_char in gap_chars or aln2_char in gap_chars:
substitution_score += gap_substitution_score
else:
try:
substitution_score += \
substitution_matrix[aln1_char][aln2_char]
except __HOLE__:
offending_chars = \
[c for c in (aln1_char, aln2_char)
if c not in substitution_matrix]
raise ValueError(
"One of the sequences contains a character that is "
"not contained in the substitution matrix. Are you "
"using an appropriate substitution matrix for your "
"sequence type (e.g., a nucleotide substitution "
"matrix does not make sense for aligning protein "
"sequences)? Does your sequence contain invalid "
"characters? The offending character(s) is: "
" %s." % ', '.join(offending_chars))
substitution_score /= (len(aln1_chars) * len(aln2_chars))
return substitution_score | KeyError | dataset/ETHPy150Open biocore/scikit-bio/skbio/alignment/_pairwise.py/_compute_substitution_score |
def __init__(self):
self.nodes = []
self.metadata = self.config.get('metadata', {})
try:
creator = self.metadata['creator']
except __HOLE__:
raise UsageError("Must specify creator metadata.")
if not creator.isalnum():
raise UsageError(
"Creator must be alphanumeric. Found {!r}".format(creator)
)
self.creator = creator
self.metadata.update(self.identity.metadata)
self.metadata['distribution'] = self.distribution
# Try to make names unique even if the same creator is starting
# multiple clusters at the same time. This lets other code use the
# name as a way to identify nodes. This is only necessary in one
# place, the node creation code, to perform cleanup when the create
# operation fails in a way such that it isn't clear if the instance has
# been created or not.
self.random_tag = b32encode(os.urandom(8)).lower().strip("\n=") | KeyError | dataset/ETHPy150Open ClusterHQ/flocker/admin/acceptance.py/LibcloudRunner.__init__ |
def dataset_backend(self):
"""
Get the storage driver the acceptance testing nodes will use.
:return: A ``BackendDescription`` matching the name of the backend
chosen by the command-line options.
"""
configuration = self.dataset_backend_configuration()
# Avoid requiring repetition of the backend name when it is the same as
# the name of the configuration section. But allow it so that there
# can be "great-openstack-provider" and "better-openstack-provider"
# sections side-by-side that both use "openstack" backend but configure
# it slightly differently.
dataset_backend_name = configuration.get(
"backend", self["dataset-backend"]
)
try:
return backend_loader.get(dataset_backend_name)
except __HOLE__:
raise UsageError(
"Unknown dataset backend: {}".format(
dataset_backend_name
)
) | ValueError | dataset/ETHPy150Open ClusterHQ/flocker/admin/acceptance.py/CommonOptions.dataset_backend |
def postOptions(self):
if self['distribution'] is None:
raise UsageError("Distribution required.")
if self['config-file'] is not None:
config_file = FilePath(self['config-file'])
self['config'] = yaml.safe_load(config_file.getContent())
else:
self['config'] = {}
if self.get('cert-directory') is None:
self['cert-directory'] = FilePath(mkdtemp())
if self.get('provider') is None:
raise UsageError("Provider required.")
provider = self['provider'].lower()
provider_config = self['config'].get(provider, {})
try:
get_runner = getattr(self, "_runner_" + provider.upper())
except __HOLE__:
raise UsageError(
"Provider {!r} not supported. Available providers: {}".format(
provider, ', '.join(
name.lower() for name in self._get_provider_names()
)
)
)
else:
self.runner = get_runner(
package_source=self.package_source(),
dataset_backend=self.dataset_backend(),
provider_config=provider_config,
) | AttributeError | dataset/ETHPy150Open ClusterHQ/flocker/admin/acceptance.py/CommonOptions.postOptions |
def _libcloud_runner(self, package_source, dataset_backend,
provider, provider_config):
"""
Run some nodes using ``libcloud``.
By default, two nodes are run. This can be overridden by using
the ``--number-of-nodes`` command line option.
:param PackageSource package_source: The source of omnibus packages.
:param BackendDescription dataset_backend: The description of the
dataset backend the nodes are configured with.
:param provider: The name of the cloud provider of nodes for the tests.
:param provider_config: The ``managed`` section of the acceptance
:returns: ``LibcloudRunner``.
"""
if provider_config is None:
self._provider_config_missing(provider)
provider_factory = CLOUD_PROVIDERS[provider]
try:
provisioner = provider_factory(**provider_config)
except __HOLE__:
try:
validate_signature_against_kwargs(provider_factory,
set(provider_config.keys()))
except InvalidSignature as e:
raise SystemExit(
"Missing or incorrect configuration for provider '{}'.\n"
"Missing Keys: {}\n"
"Unexpected Keys: {}\n"
"Optional Missing Keys: {}".format(
provider,
", ".join(e.missing_arguments) or "<None>",
", ".join(e.unexpected_arguments) or "<None>",
", ".join(e.missing_optional_arguments) or "<None>",
)
)
raise
return LibcloudRunner(
config=self['config'],
top_level=self.top_level,
distribution=self['distribution'],
package_source=package_source,
provisioner=provisioner,
dataset_backend=dataset_backend,
dataset_backend_configuration=self.dataset_backend_configuration(),
variants=self['variants'],
num_nodes=self['number-of-nodes'],
identity=self._make_cluster_identity(dataset_backend),
cert_path=self['cert-directory'],
) | TypeError | dataset/ETHPy150Open ClusterHQ/flocker/admin/acceptance.py/CommonOptions._libcloud_runner |
def parse_line(self, line):
"""
Given a line with an Eliot message, it inserts the hostname
and the system name into the message
:param line: The line read from the tail output that was identified
as an Eliot message
"""
try:
message = json.loads(line)
except __HOLE__:
# Docker log messages are not JSON
message = dict(message=line)
message[u"_HOSTNAME"] = self._host
message[u"_PROCESS_NAME"] = self._service
return message | ValueError | dataset/ETHPy150Open ClusterHQ/flocker/admin/acceptance.py/TailFormatter.parse_line |
def journald_json_formatter(output_file):
"""
Create an output handler which turns journald's export format back into
Eliot JSON with extra fields to identify the log origin.
"""
accumulated = {}
# XXX Factoring the parsing code separately from the IO would make this
# whole thing nicer.
def handle_output_line(line):
if line:
key, value = line.split(b"=", 1)
accumulated[key] = value
else:
if accumulated:
raw_message = accumulated.get(b"MESSAGE", b"{}")
try:
message = json.loads(raw_message)
except __HOLE__:
# Docker log messages are not JSON
message = dict(message=raw_message)
message[u"_HOSTNAME"] = accumulated.get(
b"_HOSTNAME", b"<no hostname>"
)
message[u"_PROCESS_NAME"] = accumulated.get(
b"_SYSTEMD_UNIT", b"<no unit>"
)
output_file.write(json.dumps(message) + b"\n")
accumulated.clear()
return handle_output_line | ValueError | dataset/ETHPy150Open ClusterHQ/flocker/admin/acceptance.py/journald_json_formatter |
def read_basic_config(self):
"""Read basic options from the daemon config file"""
self.config_filename = self.options.config_filename
cp = ConfigParser.ConfigParser()
cp.read([self.config_filename])
self.config_parser = cp
try:
self.uid, self.gid = get_uid_gid(cp, self.section)
except __HOLE__, e:
sys.exit(str(e))
self.pidfile = cp.get(self.section, 'pidfile')
self.logfile = cp.get(self.section, 'logfile')
self.loglevel = cp.get(self.section, 'loglevel') | ValueError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/Daemon.read_basic_config |
def start(self):
"""Initialize and run the daemon"""
# The order of the steps below is chosen carefully.
# - don't proceed if another instance is already running.
self.check_pid()
# - start handling signals
self.add_signal_handlers()
# - create log file and pid file directories if they don't exist
self.prepare_dirs()
# - start_logging must come after check_pid so that two
# processes don't write to the same log file, but before
# setup_root so that work done with root privileges can be
# logged.
self.start_logging()
try:
# - set up with root privileges
self.setup_root()
# - drop privileges
self.set_uid()
# - check_pid_writable must come after set_uid in order to
# detect whether the daemon user can write to the pidfile
self.check_pid_writable()
# - set up with user privileges before daemonizing, so that
# startup failures can appear on the console
self.setup_user()
# - daemonize
if self.options.daemonize:
daemonize()
except:
logging.exception("failed to start due to an exception")
raise
# - write_pid must come after daemonizing since the pid of the
# long running process is known only after daemonizing
self.write_pid()
try:
logging.info("started")
try:
self.run()
except (__HOLE__, SystemExit):
pass
except:
logging.exception("stopping with an exception")
raise
finally:
self.remove_pid()
logging.info("stopped") | KeyboardInterrupt | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/Daemon.start |
def stop(self):
"""Stop the running process"""
if self.pidfile and os.path.exists(self.pidfile):
pid = int(open(self.pidfile).read())
os.kill(pid, signal.SIGTERM)
# wait for a moment to see if the process dies
for n in range(10):
time.sleep(0.25)
try:
# poll the process state
os.kill(pid, 0)
except __HOLE__, why:
if why[0] == errno.ESRCH:
# process has died
break
else:
raise
else:
sys.exit("pid %d did not die" % pid)
else:
sys.exit("not running") | OSError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/Daemon.stop |
def set_uid(self):
"""Drop root privileges"""
if self.gid:
try:
os.setgid(self.gid)
except __HOLE__, (code, message):
sys.exit("can't setgid(%d): %s, %s" %
(self.gid, code, message))
if self.uid:
try:
os.setuid(self.uid)
except OSError, (code, message):
sys.exit("can't setuid(%d): %s, %s" %
(self.uid, code, message)) | OSError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/Daemon.set_uid |
def chown(self, fn):
"""Change the ownership of a file to match the daemon uid/gid"""
if self.uid or self.gid:
uid = self.uid
if not uid:
uid = os.stat(fn).st_uid
gid = self.gid
if not gid:
gid = os.stat(fn).st_gid
try:
os.chown(fn, uid, gid)
except __HOLE__, (code, message):
sys.exit("can't chown(%s, %d, %d): %s, %s" %
(repr(fn), uid, gid, code, message)) | OSError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/Daemon.chown |
def start_logging(self):
"""Configure the logging module"""
try:
level = int(self.loglevel)
except __HOLE__:
level = int(logging.getLevelName(self.loglevel.upper()))
handlers = []
if self.logfile:
handlers.append(logging.FileHandler(self.logfile))
self.chown(self.logfile)
if not self.options.daemonize:
# also log to stderr
handlers.append(logging.StreamHandler())
log = logging.getLogger()
log.setLevel(level)
for h in handlers:
h.setFormatter(logging.Formatter(
"%(asctime)s %(process)d %(levelname)s %(message)s"))
log.addHandler(h) | ValueError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/Daemon.start_logging |
def check_pid(self):
"""Check the pid file.
Stop using sys.exit() if another instance is already running.
If the pid file exists but no other instance is running,
delete the pid file.
"""
if not self.pidfile:
return
# based on twisted/scripts/twistd.py
if os.path.exists(self.pidfile):
try:
pid = int(open(self.pidfile).read().strip())
except __HOLE__:
msg = 'pidfile %s contains a non-integer value' % self.pidfile
sys.exit(msg)
try:
os.kill(pid, 0)
except OSError, (code, text):
if code == errno.ESRCH:
# The pid doesn't exist, so remove the stale pidfile.
os.remove(self.pidfile)
else:
msg = ("failed to check status of process %s "
"from pidfile %s: %s" % (pid, self.pidfile, text))
sys.exit(msg)
else:
msg = ('another instance seems to be running (pid %s), '
'exiting' % pid)
sys.exit(msg) | ValueError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/Daemon.check_pid |
def get_uid_gid(cp, section):
"""Get a numeric uid/gid from a configuration file.
May return an empty uid and gid.
"""
uid = cp.get(section, 'uid')
if uid:
try:
int(uid)
except __HOLE__:
# convert user name to uid
try:
uid = pwd.getpwnam(uid)[2]
except KeyError:
raise ValueError("user is not in password database: %s" % uid)
gid = cp.get(section, 'gid')
if gid:
try:
int(gid)
except ValueError:
# convert group name to gid
try:
gid = grp.getgrnam(gid)[2]
except KeyError:
raise ValueError("group is not in group database: %s" % gid)
return uid, gid | ValueError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/get_uid_gid |
def daemonize():
"""Detach from the terminal and continue as a daemon"""
# swiped from twisted/scripts/twistd.py
# See http://www.erlenstar.demon.co.uk/unix/faq_toc.html#TOC16
if os.fork(): # launch child and...
os._exit(0) # kill off parent
os.setsid()
if os.fork(): # launch child and...
os._exit(0) # kill off parent again.
os.umask(077)
null=os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except __HOLE__, e:
if e.errno != errno.EBADF:
raise
os.close(null) | OSError | dataset/ETHPy150Open kiberpipa/Intranet/pipa/ltsp/src/ltsp_usage_monitor/daemon.py/daemonize |
def assertListsEqual(self, list1, list2):
try:
# Python 3.4
self.assertCountEqual(list1, list2)
except __HOLE__:
# Python 2.7
self.assertItemsEqual(list1, list2) | AttributeError | dataset/ETHPy150Open mwarkentin/django-watchman/tests/test_utils.py/TestWatchman.assertListsEqual |
def show(self):
if not self.window:
self.init_ui()
try:
core.showWindow(self.window)
except __HOLE__:
self.init_ui
core.showWindow(self.window)
core.window(self.window, edit=True, w=self.width, h=self.height) | RuntimeError | dataset/ETHPy150Open eoyilmaz/anima/anima/env/mayaEnv/ui.py/PrevisUI.show |
def _deactivate(method):
def deactivate(self):
try:
method(self)
finally:
try:
self.socket.shutdown(SHUT_RDWR)
except (EOFError, __HOLE__):
pass
if not self.is_active:
self.socket.close()
return deactivate | IOError | dataset/ETHPy150Open osrg/ryu/ryu/controller/controller.py/_deactivate |
@_deactivate
def _recv_loop(self):
buf = bytearray()
required_len = ofproto_common.OFP_HEADER_SIZE
count = 0
while self.state != DEAD_DISPATCHER:
ret = ""
try:
ret = self.socket.recv(required_len)
except SocketTimeout:
continue
except ssl.SSLError:
# eventlet throws SSLError (which is a subclass of IOError)
# on SSL socket read timeout; re-try the loop in this case.
continue
except (EOFError, __HOLE__):
break
if len(ret) == 0:
break
buf += ret
while len(buf) >= required_len:
(version, msg_type, msg_len, xid) = ofproto_parser.header(buf)
required_len = msg_len
if len(buf) < required_len:
break
msg = ofproto_parser.msg(
self, version, msg_type, msg_len, xid, buf[:msg_len])
# LOG.debug('queue msg %s cls %s', msg, msg.__class__)
if msg:
ev = ofp_event.ofp_msg_to_ev(msg)
self.ofp_brick.send_event_to_observers(ev, self.state)
dispatchers = lambda x: x.callers[ev.__class__].dispatchers
handlers = [handler for handler in
self.ofp_brick.get_handlers(ev) if
self.state in dispatchers(handler)]
for handler in handlers:
handler(ev)
buf = buf[required_len:]
required_len = ofproto_common.OFP_HEADER_SIZE
# We need to schedule other greenlets. Otherwise, ryu
# can't accept new switches or handle the existing
# switches. The limit is arbitrary. We need the better
# approach in the future.
count += 1
if count > 2048:
count = 0
hub.sleep(0) | IOError | dataset/ETHPy150Open osrg/ryu/ryu/controller/controller.py/Datapath._recv_loop |
def _send_loop(self):
try:
while self.state != DEAD_DISPATCHER:
buf = self.send_q.get()
self._send_q_sem.release()
self.socket.sendall(buf)
except SocketTimeout:
LOG.debug("Socket timed out while sending data to switch at address %s",
self.address)
except __HOLE__ as ioe:
# Convert ioe.errno to a string, just in case it was somehow set to None.
errno = "%s" % ioe.errno
LOG.debug("Socket error while sending data to switch at address %s: [%s] %s",
self.address, errno, ioe.strerror)
finally:
q = self.send_q
# First, clear self.send_q to prevent new references.
self.send_q = None
# Now, drain the send_q, releasing the associated semaphore for each entry.
# This should release all threads waiting to acquire the semaphore.
try:
while q.get(block=False):
self._send_q_sem.release()
except hub.QueueEmpty:
pass
# Finally, ensure the _recv_loop terminates.
self.close() | IOError | dataset/ETHPy150Open osrg/ryu/ryu/controller/controller.py/Datapath._send_loop |
def reserve_experiment(self, experiment_id, serialized_client_initial_data, serialized_consumer_data, client_address, core_server_universal_id ):
# Put user information in the session
self.get_user_information()
self._session['experiment_id'] = experiment_id
reservation_info = self._session['reservation_information'] = {}
reservation_info['user_agent'] = weblab_api.user_agent
reservation_info['referer'] = weblab_api.referer
reservation_info['mobile'] = weblab_api.is_mobile
reservation_info['locale'] = weblab_api.locale
reservation_info['facebook'] = weblab_api.is_facebook
reservation_info['route'] = self._server_route or 'no-route-found'
reservation_info['username'] = self.username
reservation_info['from_ip'] = client_address
reservation_info['from_direct_ip'] = client_address
# reservation_info['full_name'] = self._session['user_information'].full_name
reservation_info['role'] = self._session['db_session_id'].role
try:
client_initial_data = json.loads(serialized_client_initial_data)
except ValueError:
# TODO: to be tested
raise core_exc.WebLabCoreError( "Invalid client_initial_data provided: a json-serialized object expected" )
if self.is_access_forward_enabled():
try:
consumer_data = json.loads(serialized_consumer_data)
for forwarded_key in FORWARDED_KEYS:
if forwarded_key in consumer_data:
if consumer_data[forwarded_key] is not None:
reservation_info[forwarded_key] = consumer_data[forwarded_key]
server_uuids = consumer_data.get(SERVER_UUIDS, [])
for server_uuid, server_uuid_human in server_uuids:
if server_uuid == core_server_universal_id:
return 'replicated'
reservation_info[SERVER_UUIDS] = server_uuids
except __HOLE__:
raise core_exc.WebLabCoreError( "Invalid serialized_consumer_data provided: a json-serialized object expected" )
else:
consumer_data = {}
experiments = self._db_manager.list_experiments(self.username, experiment_id.exp_name, experiment_id.cat_name)
if len(experiments) == 0:
raise core_exc.UnknownExperimentIdError( "User can't access that experiment (or that experiment type does not exist)" )
experiment_allowed = experiments[0]
try:
# Retrieve the most restrictive values between what was requested and what was permitted:
#
# The smallest time allowed
time_allowed = min(experiment_allowed.time_allowed, consumer_data.get('time_allowed', experiment_allowed.time_allowed))
#
# The lowest priority (lower number is higher)
# TODO: whenever possible, there should be an argument in the permission as
# a parameter to the access_forward, such as:
# "how much you want to decrement the requested priority to this user"
priority = max(experiment_allowed.priority, consumer_data.get('priority', experiment_allowed.priority))
#
# Don't take into account initialization unless both agree
initialization_in_accounting = experiment_allowed.initialization_in_accounting and consumer_data.get('initialization_in_accounting', experiment_allowed.initialization_in_accounting)
reservation_info['permission_scope'] = experiment_allowed.permission_scope
reservation_info['permission_id'] = experiment_allowed.permission_id
status, reservation_id = self._coordinator.reserve_experiment(
experiment_allowed.experiment.to_experiment_id(),
time_allowed,
priority,
initialization_in_accounting,
client_initial_data,
reservation_info,
consumer_data
)
except coord_exc.ExperimentNotFoundError:
raise core_exc.NoAvailableExperimentFoundError(
"No experiment of type <%s,%s> is currently deployed" % (
experiment_id.exp_name,
experiment_id.cat_name
)
)
self._session['reservation_information'].pop('from_ip', None)
self._session['reservation_id'] = reservation_id
return status | ValueError | dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/weblab/core/user_processor.py/UserProcessor.reserve_experiment |
def run_elastalert(self, rule, args):
""" Creates an ElastAlert instance and run's over for a specific rule using either real or mock data. """
# Mock configuration. Nothing here is used except run_every
conf = {'rules_folder': 'rules',
'run_every': datetime.timedelta(minutes=5),
'buffer_time': datetime.timedelta(minutes=45),
'alert_time_limit': datetime.timedelta(hours=24),
'es_host': 'es',
'es_port': 14900,
'writeback_index': 'wb',
'max_query_size': 10000,
'old_query_limit': datetime.timedelta(weeks=1),
'disable_rules_on_error': False}
# Load and instantiate rule
load_options(rule, conf)
load_modules(rule)
conf['rules'] = [rule]
# If using mock data, make sure it's sorted and find appropriate time range
timestamp_field = rule.get('timestamp_field', '@timestamp')
if args.json:
if not self.data:
return None
try:
self.data.sort(key=lambda x: x[timestamp_field])
starttime = ts_to_dt(self.data[0][timestamp_field])
endtime = self.data[-1][timestamp_field]
endtime = ts_to_dt(endtime) + datetime.timedelta(seconds=1)
except __HOLE__ as e:
print("All documents must have a timestamp and _id: %s" % (e), file=sys.stderr)
return None
# Create mock _id for documents if it's missing
used_ids = []
def get_id():
_id = ''.join([random.choice(string.letters) for i in range(16)])
if _id in used_ids:
return get_id()
used_ids.append(_id)
return _id
for doc in self.data:
doc.update({'_id': doc.get('_id', get_id())})
else:
endtime = ts_now()
starttime = endtime - datetime.timedelta(days=args.days)
# Set run_every to cover the entire time range unless use_count_query or use_terms_query is set
# This is to prevent query segmenting which unnecessarily slows down tests
if not rule.get('use_terms_query') and not rule.get('use_count_query'):
conf['run_every'] = endtime - starttime
# Instantiate ElastAlert to use mock config and special rule
with mock.patch('elastalert.elastalert.get_rule_hashes'):
with mock.patch('elastalert.elastalert.load_rules') as load_conf:
load_conf.return_value = conf
if args.alert:
client = ElastAlerter(['--verbose'])
else:
client = ElastAlerter(['--debug'])
# Replace get_hits_* functions to use mock data
if args.json:
self.mock_elastalert(client)
# Mock writeback for both real data and json data
client.writeback_es = None
with mock.patch.object(client, 'writeback') as mock_writeback:
client.run_rule(rule, endtime, starttime)
if mock_writeback.call_count:
print("\nWould have written the following documents to elastalert_status:\n")
for call in mock_writeback.call_args_list:
print("%s - %s\n" % (call[0][0], call[0][1])) | KeyError | dataset/ETHPy150Open Yelp/elastalert/elastalert/test_rule.py/MockElastAlerter.run_elastalert |
def port (tokeniser):
if not tokeniser.tokens:
raise ValueError('a port number is required')
value = tokeniser()
try:
return int(value)
except __HOLE__:
raise ValueError('"%s" is an invalid port' % value)
if value < 0:
raise ValueError('the port must positive')
if value >= pow(2,16):
raise ValueError('the port must be smaller than %d' % pow(2,16))
return value | ValueError | dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/configuration/parser.py/port |
def asn (tokeniser, value=None):
if value is None:
if not tokeniser.tokens:
raise ValueError('an asn is required')
value = tokeniser()
try:
if value.count('.'):
high,low = value.split('.',1)
as_number = (int(high) << 16) + int(low)
else:
as_number = int(value)
return ASN(as_number)
except __HOLE__:
raise ValueError('"%s" is an invalid ASN' % value) | ValueError | dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/configuration/parser.py/asn |
def ip (tokeniser):
if not tokeniser.tokens:
raise ValueError('an ip address is required')
value = tokeniser()
try:
return IP.create(value)
except (__HOLE__,ValueError,socket.error):
raise ValueError('"%s" is an invalid IP address' % value) | IndexError | dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/configuration/parser.py/ip |
def emit(self, record):
"""Emits logged message by delegating it
"""
try:
formated = self._serializable_record(record)
self._delegate_emit(formated)
except (KeyboardInterrupt, __HOLE__):
raise
# This should really catch every other exception!
except Exception: # pylint: disable=broad-except
self.handleError(record) | SystemExit | dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/Logger.py/MPRotLogHandler.emit |
def getOpenIDStore(filestore_path, table_prefix):
"""
Returns an OpenID association store object based on the database
engine chosen for this Django application.
* If no database engine is chosen, a filesystem-based store will
be used whose path is filestore_path.
* If a database engine is chosen, a store object for that database
type will be returned.
* If the chosen engine is not supported by the OpenID library,
raise ImproperlyConfigured.
* If a database store is used, this will create the tables
necessary to use it. The table names will be prefixed with
table_prefix. DO NOT use the same table prefix for both an
OpenID consumer and an OpenID server in the same database.
The result of this function should be passed to the Consumer
constructor as the store parameter.
"""
db_engine = settings.DATABASES['default']['ENGINE']
if not db_engine:
return FileOpenIDStore(filestore_path)
# Possible side-effect: create a database connection if one isn't
# already open.
connection.cursor()
# Create table names to specify for SQL-backed stores.
tablenames = {
'associations_table': table_prefix + 'openid_associations',
'nonces_table': table_prefix + 'openid_nonces',
}
types = {
'django.db.backends.postgresql_psycopg2': sqlstore.PostgreSQLStore,
'django.db.backends.mysql': sqlstore.MySQLStore,
'django.db.backends.sqlite3': sqlstore.SQLiteStore,
}
if db_engine not in types:
raise ImproperlyConfigured(
"Database engine %s not supported by OpenID library" % db_engine)
s = types[db_engine](connection.connection, **tablenames)
try:
s.createTables()
except (SystemExit, __HOLE__, MemoryError):
raise
except:
# XXX This is not the Right Way to do this, but because the
# underlying database implementation might differ in behavior
# at this point, we can't reliably catch the right
# exception(s) here. Ideally, the SQL store in the OpenID
# library would catch exceptions that it expects and fail
# silently, but that could be bad, too. More ideally, the SQL
# store would not attempt to create tables it knows already
# exists.
pass
return s | KeyboardInterrupt | dataset/ETHPy150Open necaris/python3-openid/examples/djopenid/util.py/getOpenIDStore |
def _parseHeaders(self, header_file):
header_file.seek(0)
# Remove the status line from the beginning of the input
unused_http_status_line = header_file.readline()
lines = [line.strip() for line in header_file]
# and the blank line from the end
empty_line = lines.pop()
if empty_line:
raise HTTPError("No blank line at end of headers: %r" % (line,))
headers = {}
for line in lines:
try:
name, value = line.split(':', 1)
except __HOLE__:
raise HTTPError(
"Malformed HTTP header line in response: %r" % (line,))
value = value.strip()
# HTTP headers are case-insensitive
name = name.lower()
headers[name] = value
return headers | ValueError | dataset/ETHPy150Open CollabQ/CollabQ/openid/fetchers.py/CurlHTTPFetcher._parseHeaders |
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by httplib2
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
method = 'POST'
else:
method = 'GET'
# httplib2 doesn't check to make sure that the URL's scheme is
# 'http' so we do it here.
if not (url.startswith('http://') or url.startswith('https://')):
raise ValueError('URL is not a HTTP URL: %r' % (url,))
httplib2_response, content = self.httplib2.request(
url, method, body=body, headers=headers)
# Translate the httplib2 response to our HTTP response abstraction
# When a 400 is returned, there is no "content-location"
# header set. This seems like a bug to me. I can't think of a
# case where we really care about the final URL when it is an
# error response, but being careful about it can't hurt.
try:
final_url = httplib2_response['content-location']
except __HOLE__:
# We're assuming that no redirects occurred
assert not httplib2_response.previous
# And this should never happen for a successful response
assert httplib2_response.status != 200
final_url = url
return HTTPResponse(
body=content,
final_url=final_url,
headers=dict(httplib2_response.items()),
status=httplib2_response.status,
) | KeyError | dataset/ETHPy150Open CollabQ/CollabQ/openid/fetchers.py/HTTPLib2Fetcher.fetch |
def _get_shift_key(self, ctx):
try:
return self._shift_keys[ctx._id_obj]
except __HOLE__:
pass
# create the net shift of self and the context's shift set (applying
# self to the ctx's shift set so any re-shifted nodes take the values
# from the shift set).
net_shift_set = cython.declare(dict)
net_shift_set = dict(ctx._shift_set)
net_shift_set.update(self)
# get the shift key and add it to the cache
shift_key = _make_shift_key(net_shift_set)
self._shift_keys[ctx._id_obj] = shift_key
return shift_key | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/context.py/ShiftSet._get_shift_key |
def _shift(self, shift_set_, cache_context=True):
"""
see shift - this is the implementation called from C, split
out so other C functions can call this directly
"""
# if there's nothing to shift return this context
if not shift_set_:
return self
# check the type of shift_set_ and construct a new ShiftSet if required
shift_set = cython.declare(ShiftSet)
if not isinstance(shift_set_, ShiftSet):
shift_set = ShiftSet(shift_set_)
else:
shift_set = shift_set_
# if a context already exists for this shift set then return it.
parent = cython.declare(MDFContext)
parent = self._parent or self
try:
shift_key = shift_set._get_shift_key(self)
return parent._shifted_cache[shift_key]
except __HOLE__:
pass
# return a new shifted context
return MDFContext(self._now,
_shift_parent=self,
_shift_set=shift_set,
_cache_shifted=cache_context) | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/context.py/MDFContext._shift |
def is_shift_of(self, other):
"""
returns True if this context's shift set is a super-set of
the other context's shift set
"""
try:
return self._is_shift_of_cache[other._id_obj]
except __HOLE__:
pass
if self._parent is not (other._parent or other) \
and self is not other:
return False
self_key = self._shift_key_set
other_key = other._shift_key_set
# if the keys are the same both contexts are shifts of each other
if self_key == other_key:
self._is_shift_of_cache[other._id_obj] = True
other._is_shift_of_cache[self._id_obj] = True
return True
# if self is a shift of other then other may not be a shift of self
if self_key.issuperset(other_key):
self._is_shift_of_cache[other._id_obj] = True
other._is_shift_of_cache[self._id_obj] = False
return True
# self is not a shift of other, but other may be a shift of self
self._is_shift_of_cache[other._id_obj] = False
return False | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/context.py/MDFContext.is_shift_of |
def _set_date(self, date):
"""
implementation for set_date
"""
# unwrap if necessary
if isinstance(date, NowNodeValue):
tmp = cython.declare(NowNodeValue)
tmp = date
date = tmp.value
if date == self._now:
return
# remember the date before it's changed
prev_date = self._now
# don't allow the date to be changed on a shifted context as it will
# potentially update values in the context below
if self._shift_set and _now_node not in self._shift_set:
raise Exception("Can't change the date on a shifted context")
ctx = cython.declare(MDFContext)
parent = cython.declare(MDFContext)
prev_ctx = cython.declare(MDFContext)
shifted_ctx = cython.declare(MDFContext)
cookie = cython.declare(Cookie)
node = cython.declare(MDFNodeBase)
# get the prev_ctx and thread_id by activating the current context
cookie = self._activate(None, None)
thread_id = cookie.thread_id
prev_ctx = cookie.prev_context
self._deactivate(cookie)
# get a list of all the contexts that are shifted by the same now
# node as this context
parent = self._parent if self._parent is not None else self
all_shifted_contexts = parent.get_shifted_contexts()
# create all_contexts as a list with the max number of elements potentially required
all_contexts = cython.declare(list)
all_contexts = [None] * (len(all_shifted_contexts) + 1)
all_contexts[0] = self
num_contexts = cython.declare(int)
num_contexts = 1
contexts_with_set_date_callbacks = cython.declare(list, [])
have_on_set_date_callbacks = cython.declare(int, False)
if self._has_nodes_requiring_set_date_callback:
contexts_with_set_date_callbacks.append(self)
have_on_set_date_callbacks = True
if _now_node in self._shift_set:
shifted_now = self._shift_set[_now_node]
for shifted_ctx in all_shifted_contexts:
shifted_shift_set = shifted_ctx.get_shift_set()
try:
if shifted_shift_set[_now_node] is shifted_now:
if shifted_ctx is not self:
all_contexts[num_contexts] = shifted_ctx
num_contexts += 1
if shifted_ctx._has_nodes_requiring_set_date_callback:
contexts_with_set_date_callbacks.append(shifted_ctx)
have_on_set_date_callbacks = True
except __HOLE__:
pass
else:
# now hasn't been shifted in this context so include all
# other contexts also not shifted by now
for shifted_ctx in all_shifted_contexts:
if _now_node not in shifted_ctx.get_shift_set():
if shifted_ctx is not self:
all_contexts[num_contexts] = shifted_ctx
num_contexts += 1
if shifted_ctx._has_nodes_requiring_set_date_callback:
contexts_with_set_date_callbacks.append(shifted_ctx)
have_on_set_date_callbacks = True
# trim any unused slots
all_contexts = all_contexts[:num_contexts]
# call the 'on_set_date' callback on any nodes needing it before
# actually setting the date on the context.
# If on_set_date returns True that indicates the node will become dirty
# once the date has been changed.
on_set_date_dirty = cython.declare(list)
on_set_date_dirty_count = cython.declare(int, 0)
if have_on_set_date_callbacks:
on_set_date_dirty = []
for ctx in contexts_with_set_date_callbacks:
# get the calling node and activate the context once and for all nodes
calling_node = ctx._get_calling_node(prev_ctx)
cookie = ctx._activate(prev_ctx, thread_id)
try:
# call the callbacks (this may call other nodes and so might
# modify the set of nodes with callbacks)
for node in ctx._nodes_requiring_set_date_callback.keys():
cqueue_push(ctx._node_eval_stack, node)
try:
with ctx._profile(node) as timer:
dirty = node.on_set_date(ctx, date)
if dirty:
on_set_date_dirty.append((node, ctx))
on_set_date_dirty_count += 1
finally:
cqueue_pop(ctx._node_eval_stack)
finally:
ctx._deactivate(cookie)
# now all the on_set_date callbacks have been called update the date
# for each context and mark any incrementally updated nodes as dirty.
for ctx in all_contexts:
# set now on the context
ctx._now = date
# mark any incrementally updated nodes as dirty
if ctx._has_incrementally_updated_nodes:
for node in ctx._incrementally_updated_nodes.iterkeys():
node.set_dirty(ctx, DIRTY_FLAGS_TIME)
# mark any nodes that indicated they would become dirty after calling 'on_set_date'
if on_set_date_dirty_count > 0:
for node, ctx in on_set_date_dirty:
node.set_dirty(ctx, DIRTY_FLAGS_TIME)
# set the now node value in the least shifted context
# (anything dependent on now will be dependent on
# it in this context so no need to touch it in the shifted contexts)
alt_ctx = _now_node.get_alt_context(self)
alt_ctx.set_value(_now_node, date)
if date < prev_date:
# if setting the date to a date in the past clear any incrementally
# updated nodes so they'll start from their initial values again
for ctx in all_contexts:
# clear any cached values for the incrementally updated nodes
for node in ctx._incrementally_updated_nodes.iterkeys():
node.clear_value(ctx)
# these will be re-established as the date is incremented
ctx._incrementally_updated_nodes.clear()
ctx._has_incrementally_updated_nodes = False
ctx._nodes_requiring_set_date_callback.clear()
ctx._has_nodes_requiring_set_date_callback = False
return
# Evaluate any nodes that have to be updated incrementally each timestep.
# They're marked as dirty first as they should be called every timestep
# regardless of whether anything underneath has changed.
#
# this is done in two phases, setting flags then updating so that
# if one node depends on another and causes it to get evaluated
# it doesn't get called twice.
# (the flags are already set in the loop previous to this one)
for ctx in all_contexts:
if not ctx._has_incrementally_updated_nodes:
continue
# get the calling node and activate the context once and for all nodes
calling_node = ctx._get_calling_node(prev_ctx)
cookie = ctx._activate(prev_ctx, thread_id)
try:
# get the value to trigger the update
for node in ctx._incrementally_updated_nodes.keys():
ctx._get_node_value(node, calling_node, ctx, thread_id)
finally:
ctx._deactivate(cookie) | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/context.py/MDFContext._set_date |
def _activate(self, prev_ctx=None, thread_id=None):
"""set self as the current context"""
# activate this context if different from the previous context
thread_id = thread_id if thread_id is not None else PyThread_get_thread_ident()
if prev_ctx is None:
try:
prev_ctx = _current_contexts[thread_id]
except __HOLE__:
pass
_current_contexts[thread_id] = self
return Cookie(thread_id, prev_ctx) | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/context.py/MDFContext._activate |
def _get_calling_node(self, prev_ctx=None):
if prev_ctx is None:
thread_id = PyThread_get_thread_ident()
try:
prev_ctx = _current_contexts[thread_id]
except __HOLE__:
prev_ctx = None
if prev_ctx is None:
prev_ctx = self
if len(prev_ctx._node_eval_stack) > 0:
return prev_ctx._node_eval_stack[-1]
return None | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/context.py/MDFContext._get_calling_node |
def _get_current_context(thread_id=None):
"""returns the current context during node evaluation"""
if thread_id is None:
thread_id = PyThread_get_thread_ident()
try:
ctx = _current_contexts[thread_id]
except __HOLE__:
ctx = None
if ctx is None:
raise NoCurrentContextError()
return ctx | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/context.py/_get_current_context |
def __init__ (self,logfile=None,loggername=None,level=logging.INFO):
# default is to locate loggername from the logfile if avail.
if not logfile:
#loggername='console'
#handler=logging.StreamHandler()
#handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
logfile = "/var/log/sfa.log"
if not loggername:
loggername=os.path.basename(logfile)
try:
handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=5)
except __HOLE__:
# This is usually a permissions error becaue the file is
# owned by root, but httpd is trying to access it.
tmplogfile=os.getenv("TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile)
# In strange uses, 2 users on same machine might use same code,
# meaning they would clobber each others files
# We could (a) rename the tmplogfile, or (b)
# just log to the console in that case.
# Here we default to the console.
if os.path.exists(tmplogfile) and not os.access(tmplogfile, os.W_OK):
loggername = loggername + "-console"
handler = logging.StreamHandler()
else:
handler=logging.handlers.RotatingFileHandler(tmplogfile,maxBytes=1000000, backupCount=5)
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
self.logger=logging.getLogger(loggername)
self.logger.setLevel(level)
# check if logger already has the handler we're about to add
handler_exists = False
for l_handler in self.logger.handlers:
if l_handler.baseFilename == handler.baseFilename and \
l_handler.level == handler.level:
handler_exists = True
if not handler_exists:
self.logger.addHandler(handler)
self.loggername=loggername | IOError | dataset/ETHPy150Open fp7-ofelia/ocf/ofam/src/src/ext/sfa/util/sfalogging.py/_SfaLogger.__init__ |
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except __HOLE__:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')] | ValueError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Pygments-1.3.1/external/rst-directive-old.py/pygments_directive |
def item(title, url, children=None, url_as_pattern=True, hint='', alias='', description='',
in_menu=True, in_breadcrumbs=True, in_sitetree=True,
access_loggedin=False, access_guest=False,
access_by_perms=None, perms_mode_all=True, **kwargs):
"""Dynamically creates and returns a sitetree item object.
:param str title:
:param str url:
:param list, set children: a list of children for tree item. Children should also be created by `item` function.
:param bool url_as_pattern: consider URL as a name of a named URL
:param str hint: hints are usually shown to users
:param str alias: item name to address it from templates
:param str description: additional information on item (usually is not shown to users)
:param bool in_menu: show this item in menus
:param bool in_breadcrumbs: show this item in breadcrumbs
:param bool in_sitetree: show this item in sitetrees
:param bool access_loggedin: show item to logged in users only
:param bool access_guest: show item to guest users only
:param list, str, int, Permission access_by_perms: restrict access to users with these permissions
:param bool perms_mode_all: permissions set interpretation rule:
True - user should have all the permissions;
False - user should have any of chosen permissions.
:return:
"""
item_obj = get_tree_item_model()(title=title, url=url, urlaspattern=url_as_pattern,
hint=hint, alias=alias, description=description, inmenu=in_menu,
insitetree=in_sitetree, inbreadcrumbs=in_breadcrumbs,
access_loggedin=access_loggedin, access_guest=access_guest, **kwargs)
item_obj.id = generate_id_for(item_obj)
item_obj.is_dynamic = True
item_obj.dynamic_children = []
cleaned_permissions = []
if access_by_perms:
# Make permissions a list if currently a single object
if not isinstance(access_by_perms, list):
access_by_perms = [access_by_perms]
for perm in access_by_perms:
if isinstance(perm, six.string_types):
# Get permission object from string
try:
app, codename = perm.split('.')
except __HOLE__:
raise ValueError(
'Wrong permission string format: supplied - `%s`; '
'expected - `<app_name>.<permission_name>`.' % perm)
try:
perm = Permission.objects.get(codename=codename, content_type__app_label=app)
except Permission.DoesNotExist:
raise ValueError('Permission `%s.%s` does not exist.' % (app, codename))
elif not isinstance(perm, (int, Permission)):
raise ValueError('Permissions must be given as strings, ints, or `Permission` instances.')
cleaned_permissions.append(perm)
item_obj.permissions = cleaned_permissions or []
item_obj.access_perm_type = item_obj.PERM_TYPE_ALL if perms_mode_all else item_obj.PERM_TYPE_ANY
if item_obj.permissions:
item_obj.access_restricted = True
if children is not None:
for child in children:
child.parent = item_obj
item_obj.dynamic_children.append(child)
return item_obj | ValueError | dataset/ETHPy150Open idlesign/django-sitetree/sitetree/utils.py/item |