function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except __HOLE__:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now | AttributeError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_strftime.py/StrftimeTest._update_variables |
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except __HOLE__:
import locale
locale.setlocale(locale.LC_TIME, 'C') | ImportError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_strftime.py/StrftimeTest.setUp |
def strftest1(self, now):
if test_support.verbose:
print "strftime test for", time.ctime(now)
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except __HOLE__, error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result)) | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_strftime.py/StrftimeTest.strftest1 |
def strftest2(self, now):
nowsecs = str(long(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except __HOLE__, result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if test_support.verbose:
print msg
continue
if re.match(escapestr(e[1], self.ampm), result):
if test_support.verbose:
print "Supports nonstandard '%s' format (%s)" % (e[0], e[2])
elif not result or result[0] == '%':
if test_support.verbose:
print "Does not appear to support '%s' format (%s)" % \
(e[0], e[2])
else:
if test_support.verbose:
print "Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2])
print " Expected %s, but got %s" % (e[1], result) | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_strftime.py/StrftimeTest.strftest2 |
def _abstract_atom_init(deftype, defvalue):
"""Return a constructor for an abstract `Atom` class."""
defitemsize = split_type(deftype)[1]
def __init__(self, itemsize=defitemsize, shape=(), dflt=defvalue):
assert self.kind in atom_map
try:
atomclass = atom_map[self.kind][itemsize]
except __HOLE__:
raise _invalid_itemsize_error(self.kind, itemsize,
atom_map[self.kind])
self.__class__ = atomclass
atomclass.__init__(self, shape, dflt)
return __init__ | KeyError | dataset/ETHPy150Open PyTables/PyTables/tables/atom.py/_abstract_atom_init |
def _normalize_shape(shape):
"""Check that the `shape` is safe to be used and return it as a tuple."""
if isinstance(shape, (int, numpy.integer, int)):
if shape < 1:
raise ValueError("shape value must be greater than 0: %d"
% shape)
shape = (shape,) # N is a shorthand for (N,)
try:
shape = tuple(shape)
except __HOLE__:
raise TypeError("shape must be an integer or sequence: %r"
% (shape,))
# XXX Get from HDF5 library if possible.
# HDF5 does not support ranks greater than 32
if len(shape) > 32:
raise ValueError(
"shapes with rank > 32 are not supported: %r" % (shape,))
return tuple(SizeType(s) for s in shape) | TypeError | dataset/ETHPy150Open PyTables/PyTables/tables/atom.py/_normalize_shape |
def _normalize_default(value, dtype):
"""Return `value` as a valid default of NumPy type `dtype`."""
# Create NumPy objects as defaults
# This is better in order to serialize them as attributes
if value is None:
value = 0
basedtype = dtype.base
try:
default = numpy.array(value, dtype=basedtype)
except __HOLE__:
array = numpy.array(value)
if array.shape != basedtype.shape:
raise
# Maybe nested dtype with "scalar" value.
default = numpy.array(value, dtype=basedtype.base)
# 0-dim arrays will be representented as NumPy scalars
# (PyTables attribute convention)
if default.shape == ():
default = default[()]
return default | ValueError | dataset/ETHPy150Open PyTables/PyTables/tables/atom.py/_normalize_default |
def _cmp_dispatcher(other_method_name):
"""Dispatch comparisons to a method of the *other* object.
Returns a new *rich comparison* method which dispatches calls to
the method `other_method_name` of the *other* object. If there is
no such method in the object, ``False`` is returned.
This is part of the implementation of a double dispatch pattern.
"""
def dispatched_cmp(self, other):
try:
other_method = getattr(other, other_method_name)
except __HOLE__:
return False
return other_method(self)
return dispatched_cmp
# Helper classes
# ============== | AttributeError | dataset/ETHPy150Open PyTables/PyTables/tables/atom.py/_cmp_dispatcher |
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments.
This implementation works on classes which use the same names
for both constructor arguments and instance attributes.
"""
# @COMPATIBILITY: inspect.getargspec has been deprecated since
# Python 3.5
try:
# inspect.signature is new in Python 3.5
signature = inspect.signature(self.__init__)
except __HOLE__:
args = inspect.getargspec(self.__init__)[0]
else:
parameters = signature.parameters
args = [arg for arg, p in parameters.items()
if p.kind is p.POSITIONAL_OR_KEYWORD]
return dict((arg, getattr(self, arg)) for arg in args if arg != 'self') | AttributeError | dataset/ETHPy150Open PyTables/PyTables/tables/atom.py/Atom._get_init_args |
def _checkbase(self, base):
"""Check the `base` storage atom."""
if base.kind == 'enum':
raise TypeError("can not use an enumerated atom "
"as a storage atom: %r" % base)
# Check whether the storage atom can represent concrete values
# in the enumeration...
basedtype = base.dtype
pyvalues = [value for (name, value) in self.enum]
try:
npgenvalues = numpy.array(pyvalues)
except __HOLE__:
raise TypeError("concrete values are not uniformly-shaped")
try:
npvalues = numpy.array(npgenvalues, dtype=basedtype.base)
except ValueError:
raise TypeError("storage atom type is incompatible with "
"concrete values in the enumeration")
if npvalues.shape[1:] != basedtype.shape:
raise TypeError("storage atom shape does not match that of "
"concrete values in the enumeration")
if npvalues.tolist() != npgenvalues.tolist():
raise TypeError("storage atom type lacks precision for "
"concrete values in the enumeration")
# ...with some implementation limitations.
if not npvalues.dtype.kind in ['i', 'u']:
raise NotImplementedError("only integer concrete values "
"are supported for the moment, sorry")
if len(npvalues.shape) > 1:
raise NotImplementedError("only scalar concrete values "
"are supported for the moment, sorry") | ValueError | dataset/ETHPy150Open PyTables/PyTables/tables/atom.py/EnumAtom._checkbase |
def sendMetrics(self, stats, prefix):
for key, value in stats.iteritems():
try:
float(value)
except (__HOLE__, ValueError):
continue
if key in ('version', 'pid'):
continue
path = '%s.%s' % (prefix, key)
self.protocol.sendMetric(path, value, time.time()) | TypeError | dataset/ETHPy150Open mochi/vor/vor/beanstalk.py/BeanstalkGraphiteService.sendMetrics |
def autodiscover():
"""
Goes and imports the permissions submodule of every app in INSTALLED_APPS
to make sure the permission set classes are registered correctly.
"""
global LOADING
if LOADING:
return
LOADING = True
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
try:
__import__(app)
app_path = sys.modules[app].__path__
except AttributeError:
continue
try:
imp.find_module('permissions', app_path)
except __HOLE__:
continue
__import__("%s.permissions" % app)
app_path = sys.modules["%s.permissions" % app]
LOADING = False | ImportError | dataset/ETHPy150Open jazzband/django-authority/authority/__init__.py/autodiscover |
def to_python(self, value):
try:
return ObjectId(urlsafe_b64decode(value))
except (InvalidId, ValueError, __HOLE__):
raise ValidationError() | TypeError | dataset/ETHPy150Open pandemicsyn/stalker/stalkerweb/stalkerweb/stutils.py/ObjectIDConverter.to_python |
def __str__(self):
try:
import cStringIO as StringIO
except __HOLE__:
import StringIO
output = StringIO.StringIO()
output.write(Exception.__str__(self))
# Check if we wrapped an exception and print that too.
if hasattr(self, 'exc_info'):
import traceback
output.write('\n\nOriginal ')
e = self.exc_info
traceback.print_exception(e[0], e[1], e[2], 500, output)
return output.getvalue() | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/template/__init__.py/TemplateSyntaxError.__str__ |
def parse(self, parse_until=None):
if parse_until is None: parse_until = []
nodelist = self.create_nodelist()
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_TEXT:
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == TOKEN_VAR:
if not token.contents:
self.empty_variable(token)
filter_expression = self.compile_filter(token.contents)
var_node = self.create_variable_node(filter_expression)
self.extend_nodelist(nodelist, var_node,token)
elif token.token_type == TOKEN_BLOCK:
if token.contents in parse_until:
# put token back on token list so calling code knows why it terminated
self.prepend_token(token)
return nodelist
try:
command = token.contents.split()[0]
except __HOLE__:
self.empty_block_tag(token)
# execute callback function for this tag and append resulting node
self.enter_command(command, token)
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command)
try:
compiled_result = compile_func(self, token)
except TemplateSyntaxError, e:
if not self.compile_function_error(token, e):
raise
self.extend_nodelist(nodelist, compiled_result, token)
self.exit_command()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist | IndexError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/template/__init__.py/Parser.parse |
def args_check(name, func, provided):
provided = list(provided)
plen = len(provided)
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, varargs, varkw, defaults = getargspec(func)
# First argument is filter input.
args.pop(0)
if defaults:
nondefs = args[:-len(defaults)]
else:
nondefs = args
# Args without defaults must be provided.
try:
for arg in nondefs:
provided.pop(0)
except IndexError:
# Not enough
raise TemplateSyntaxError, "%s requires %d arguments, %d provided" % (name, len(nondefs), plen)
# Defaults can be overridden.
defaults = defaults and list(defaults) or []
try:
for parg in provided:
defaults.pop(0)
except __HOLE__:
# Too many.
raise TemplateSyntaxError, "%s requires %d arguments, %d provided" % (name, len(nondefs), plen)
return True | IndexError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/template/__init__.py/FilterExpression.args_check |
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context. The variable may be a hard-coded string (if it begins
and ends with single or double quote marks).
>>> c = {'article': {'section':'News'}}
>>> resolve_variable('article.section', c)
'News'
>>> resolve_variable('article', c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
>>> resolve_variable('article.section', c)
'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
if number_re.match(path):
number_type = '.' in path and float or int
current = number_type(path)
elif path[0] in ('"', "'") and path[0] == path[-1]:
current = path[1:-1]
else:
current = context
bits = path.split(VARIABLE_ATTRIBUTE_SEPARATOR)
while bits:
try: # dictionary lookup
current = current[bits[0]]
except (TypeError, __HOLE__, KeyError):
try: # attribute lookup
current = getattr(current, bits[0])
if callable(current):
if getattr(current, 'alters_data', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
try: # method call (assuming no args required)
current = current()
except TypeError: # arguments *were* required
# GOTCHA: This will also catch any TypeError
# raised in the function itself.
current = settings.TEMPLATE_STRING_IF_INVALID # invalid method call
except Exception, e:
if getattr(e, 'silent_variable_failure', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
raise
except (TypeError, AttributeError):
try: # list-index lookup
current = current[int(bits[0])]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bits[0])` key
TypeError, # unsubscriptable object
):
raise VariableDoesNotExist("Failed lookup for key [%s] in %r", (bits[0], current)) # missing attribute
except Exception, e:
if getattr(e, 'silent_variable_failure', False):
current = settings.TEMPLATE_STRING_IF_INVALID
else:
raise
del bits[0]
return current | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/template/__init__.py/resolve_variable |
def get_library(module_name):
lib = libraries.get(module_name, None)
if not lib:
try:
mod = __import__(module_name, {}, {}, [''])
except __HOLE__, e:
raise InvalidTemplateLibrary, "Could not load template library from %s, %s" % (module_name, e)
try:
lib = mod.register
libraries[module_name] = lib
except AttributeError:
raise InvalidTemplateLibrary, "Template library %s does not have a variable named 'register'" % module_name
return lib | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/template/__init__.py/get_library |
def RAW(self, message):
try:
#Join up the message parts
if isinstance(message, (list, tuple)):
message = ' '.join(message)
#Raw Send but don't allow empty spam
if message is not None:
#Clean up messages
message = re.sub(r'[\r\n]', '', message).expandtabs(4).rstrip()
if len(message):
self.client.socket.writeline(message)
#Fire raw send event for debug if exists [] instead of ()
self.events['IRC_RAW_SEND'](self, message)
except __HOLE__:
#Somebody tried to raw a None or something just print exception
print("Bad RAW message: %r" % repr(message))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback)
# Set our nick | TypeError | dataset/ETHPy150Open facebook/pyaib/pyaib/irc.py/Context.RAW |
def retrieve(self, url, destination, callback=None):
self.size = 0
time.clock()
try: urllib.urlretrieve(url, destination, self.progress)
except __HOLE__:
print '\n~ Download cancelled'
print '~'
for i in range(5):
try:
os.remove(destination)
break
except:
time.sleep(.1)
else: raise
if callback: callback()
sys.exit()
print ''
return self.size | KeyboardInterrupt | dataset/ETHPy150Open eBay/restcommander/play-1.2.4/framework/pym/play/commands/modulesrepo.py/Downloader.retrieve |
def create_default_config(config_dir, detect_location=True):
"""Create a default configuration file in given configuration directory.
Return path to new config file if success, None if failed.
"""
config_path = os.path.join(config_dir, YAML_CONFIG_FILE)
info = {attr: default for attr, default, _, _ in DEFAULT_CONFIG}
location_info = detect_location and loc_util.detect_location_info()
if location_info:
if location_info.use_fahrenheit:
info[CONF_TEMPERATURE_UNIT] = 'F'
for attr, default, prop, _ in DEFAULT_CONFIG:
if prop is None:
continue
info[attr] = getattr(location_info, prop) or default
# Writing files with YAML does not create the most human readable results
# So we're hard coding a YAML template.
try:
with open(config_path, 'w') as config_file:
config_file.write("homeassistant:\n")
for attr, _, _, description in DEFAULT_CONFIG:
if info[attr] is None:
continue
elif description:
config_file.write(" # {}\n".format(description))
config_file.write(" {}: {}\n".format(attr, info[attr]))
config_file.write("\n")
for component, description in DEFAULT_COMPONENTS.items():
config_file.write("# {}\n".format(description))
config_file.write("{}\n\n".format(component))
return config_path
except __HOLE__:
print('Unable to create default configuration file', config_path)
return None | IOError | dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/config.py/create_default_config |
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
try:
return self[name]
except __HOLE__:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) | KeyError | dataset/ETHPy150Open samuel/kokki/kokki/utils.py/AttributeDictionary.__getattr__ |
def main():
try:
cmd = TCeleryCommand()
cmd.execute_from_commandline()
except __HOLE__:
pass | KeyboardInterrupt | dataset/ETHPy150Open mher/tornado-celery/tcelery/__main__.py/main |
def _get_container(self, thread_id, document_html, container, index):
if not document_html:
document_html = self.get_thread(thread_id).get("html")
if not document_html:
return None
tree = self.parse_document_html(document_html)
lists = list(tree.iter(container))
if not lists:
return None
try:
return lists[index]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open quip/quip-api/python/quip.py/QuipClient._get_container |
def modified_time(self, name):
try:
modified = self.__get_blob_properties(name)['last-modified']
except (__HOLE__, KeyError):
return super(AzureStorage, self).modified_time(name)
modified = time.strptime(modified, '%a, %d %b %Y %H:%M:%S %Z')
modified = datetime.fromtimestamp(mktime(modified))
return modified | TypeError | dataset/ETHPy150Open jschneier/django-storages/storages/backends/azure_storage.py/AzureStorage.modified_time |
@staticmethod
def parse_spec(opts, spec):
"""Parse the comma-separated key=value configuration from the gen spec.
Names and semantics were inspired from subset of mcsoda parameters."""
cfg = {'cur-ops': 0,
'cur-gets': 0,
'cur-sets': 0,
'cur-items': 0,
'exit-after-creates': 0,
'max-items': 10000,
'min-value-size': 10,
'prefix': "",
'ratio-sets': 0.05,
'json': 0}
for kv in spec[len("gen:"):].split(','):
if kv:
k = kv.split('=')[0].strip()
v = kv.split('=')[1].strip()
try:
if k in cfg:
cfg[k] = type(cfg[k])(v)
else:
return "error: unknown workload gen parameter: %s" % (k), None
except __HOLE__:
return "error: could not parse value from: %s" % (kv), None
return 0, cfg | ValueError | dataset/ETHPy150Open membase/membase-cli/pump_gen.py/GenSource.parse_spec |
def output(self, **kwargs):
options = dict(self.options)
options['outfile'] = kwargs['outfile']
infiles = []
for infile in kwargs['content_meta']:
# type, full_filename, relative_filename
# In debug mode we use the full path so that in development we see changes without having to call
# collectstatic. This breaks the sourcemaps. In production, we want sourcemaps to work so we
# use relative path which will take files from `staticfiles` automatically.
if settings.DEBUG:
infiles.append(infile[1])
else:
infiles.append(infile[2])
options['infiles'] = ' '.join(f for f in infiles)
options['mapfile'] = kwargs['outfile'].replace('.js', '.map.js')
options['mapurl'] = '{}{}'.format(
settings.STATIC_URL, options['mapfile']
)
options['maproot'] = settings.STATIC_URL
self.cwd = kwargs['root_location']
try:
command = fstr(self.command).format(**options)
proc = subprocess.Popen(
command, shell=True, cwd=self.cwd, stdout=self.stdout,
stdin=self.stdin, stderr=self.stderr)
err = proc.communicate()
except (IOError, __HOLE__), e:
raise FilterError('Unable to apply %s (%r): %s' %
(self.__class__.__name__, self.command, e))
else:
# If the process doesn't return a 0 success code, throw an error
if proc.wait() != 0:
if not err:
err = ('Unable to apply %s (%s)' %
(self.__class__.__name__, self.command))
raise FilterError(err)
if self.verbose:
self.logger.debug(err) | OSError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/style/uglify.py/UglifySourcemapFilter.output |
@register.filter
def JSON(obj):
# json.dumps does not properly convert QueryDict array parameter to json
if isinstance(obj, QueryDict):
obj = dict(obj)
try:
return mark_safe(escape_script_tags(json.dumps(obj, default=json_handler)))
except __HOLE__ as e:
msg = ("Unserializable data was sent to the `|JSON` template tag. "
"If DEBUG is off, Django will silently swallow this error. "
"{}".format(e.message))
soft_assert(notify_admins=True)(False, msg)
raise e | TypeError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/hqwebapp/templatetags/hq_shared_tags.py/JSON |
@register.filter
def BOOL(obj):
try:
obj = obj.to_json()
except __HOLE__:
pass
return 'true' if obj else 'false' | AttributeError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/hqwebapp/templatetags/hq_shared_tags.py/BOOL |
@register.tag(name='captureas')
def do_captureas(parser, token):
"""
Assign to a context variable from within a template
{% captureas my_context_var %}<!-- anything -->{% endcaptureas %}
<h1>Nice job capturing {{ my_context_var }}</h1>
"""
try:
tag_name, args = token.contents.split(None, 1)
except __HOLE__:
raise template.TemplateSyntaxError("'captureas' node requires a "
"variable name.")
nodelist = parser.parse(('endcaptureas',))
parser.delete_first_token()
return CaptureasNode(nodelist, args) | ValueError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/hqwebapp/templatetags/hq_shared_tags.py/do_captureas |
@register.simple_tag
def maintenance_alert():
try:
alert = (MaintenanceAlert.objects
.filter(active=True)
.order_by('-modified'))[0]
except __HOLE__:
return ''
else:
return format_html(
'<div class="alert alert-warning" style="text-align: center; margin-bottom: 0;">{}</div>',
mark_safe(alert.html),
) | IndexError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/hqwebapp/templatetags/hq_shared_tags.py/maintenance_alert |
def _transstat(status, grouppath, dictpath, line):
"""Executes processing steps when reading a line"""
if status == 0:
raise MTLParseError(
"Status should not be '%s' after reading line:\n%s"
% (STATUSCODE[status], line))
elif status == 1:
currentdict = dictpath[-1]
currentgroup = _getgroupname(line)
grouppath.append(currentgroup)
currentdict[currentgroup] = {}
dictpath.append(currentdict[currentgroup])
elif status == 2:
currentdict = dictpath[-1]
newkey, newval = _getmetadataitem(line)
# USGS has started quoting the scene center time. If this
# happens strip quotes before post processing.
if newkey == 'SCENE_CENTER_TIME' and newval.startswith('"') \
and newval.endswith('"'):
logging.warning('Strip quotes off SCENE_CENTER_TIME.')
newval = newval[1:-1]
currentdict[newkey] = _postprocess(newval)
elif status == 3:
oldgroup = _getendgroupname(line)
if oldgroup != grouppath[-1]:
raise MTLParseError(
"Reached line '%s' while reading group '%s'."
% (line.strip(), grouppath[-1]))
del grouppath[-1]
del dictpath[-1]
try:
currentgroup = grouppath[-1]
except __HOLE__:
currentgroup = None
elif status == 4:
if grouppath:
raise MTLParseError(
"Reached end before end of group '%s'" % grouppath[-1])
return grouppath, dictpath
# Identifying data type of a metadata item and | IndexError | dataset/ETHPy150Open landsat-pds/landsat_ingestor/ingestor/mtlutils.py/_transstat |
def _postprocess(valuestr):
"""
Takes value as str, returns str, int, float, date, datetime, or time
"""
# USGS has started quoting time sometimes. Grr, strip quotes in this case
intpattern = re.compile(r'^\-?\d+$')
floatpattern = re.compile(r'^\-?\d+\.\d+(E[+-]?\d\d+)?$')
datedtpattern = '%Y-%m-%d'
datedttimepattern = '%Y-%m-%dT%H:%M:%SZ'
timedtpattern = '%H:%M:%S.%f'
timepattern = re.compile(r'^\d{2}:\d{2}:\d{2}(\.\d{6})?')
if valuestr.startswith('"') and valuestr.endswith('"'):
# it's a string
return valuestr[1:-1]
elif re.match(intpattern, valuestr):
# it's an integer
return int(valuestr)
elif re.match(floatpattern, valuestr):
# floating point number
return float(valuestr)
# now let's try the datetime objects; throws exception if it doesn't match
try:
return datetime.datetime.strptime(valuestr, datedtpattern).date()
except __HOLE__:
pass
try:
return datetime.datetime.strptime(valuestr, datedttimepattern)
except ValueError:
pass
# time parsing is complicated: Python's datetime module only accepts
# fractions of a second only up to 6 digits
mat = re.match(timepattern, valuestr)
if mat:
test = mat.group(0)
try:
return datetime.datetime.strptime(test, timedtpattern).time()
except ValueError:
pass
# If we get here, we still haven't returned anything.
logging.info(
"The value %s couldn't be parsed as " % valuestr
+ "int, float, date, time, datetime. Returning it as string.")
return valuestr | ValueError | dataset/ETHPy150Open landsat-pds/landsat_ingestor/ingestor/mtlutils.py/_postprocess |
def check_honeypot(request, form):
"""
Make sure that the hidden form field is empty, using django-honeypot.
"""
try:
from honeypot.decorators import verify_honeypot_value
return verify_honeypot_value(request, '') is None
except __HOLE__: # pragma: no cover
return True | ImportError | dataset/ETHPy150Open zsiciarz/django-envelope/envelope/spam_filters.py/check_honeypot |
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
if self.settings.embed_stylesheet:
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except __HOLE__ as err:
msg = "Cannot embed stylesheet '%s': %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '<--- %s --->\n' % msg
return self.embedded_stylesheet % content
# else link to style file:
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return self.stylesheet_link % self.encode(path) | IOError | dataset/ETHPy150Open zackw/header-survey/sphinx/ext/html5_output.py/BaseTranslator.stylesheet_call |
def set_class_on_child(self, node, class_, index=0):
"""
Set class `class_` on the visible child no. index of `node`.
Do nothing if node has fewer children than `index`.
"""
children = [n for n in node if not isinstance(n, nodes.Invisible)]
try:
child = children[index]
except __HOLE__:
return
child['classes'].append(class_) | IndexError | dataset/ETHPy150Open zackw/header-survey/sphinx/ext/html5_output.py/BaseTranslator.set_class_on_child |
def visit_image(self, node):
atts = {}
uri = node['uri']
# SVG works in <img> now
# place SWF images in an <object> element
types = {'.swf': 'application/x-shockwave-flash'}
ext = os.path.splitext(uri)[1].lower()
if ext == '.swf':
atts['data'] = uri
atts['type'] = types[ext]
else:
atts['src'] = uri
atts['alt'] = node.get('alt', uri)
# image size
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if (PIL and not ('width' in node and 'height' in node)
and self.settings.file_insertion_enabled):
imagepath = urllib.request.url2pathname(uri)
try:
img = PIL.Image.open(
imagepath.encode(sys.getfilesystemencoding()))
except (__HOLE__, UnicodeEncodeError):
pass # TODO: warn?
else:
self.settings.record_dependencies.add(
imagepath.replace('\\', '/'))
if 'width' not in atts:
atts['width'] = str(img.size[0])
if 'height' not in atts:
atts['height'] = str(img.size[1])
del img
for att_name in 'width', 'height':
if att_name in atts:
match = re.match(r'([0-9.]+)(\S*)$', atts[att_name])
assert match
atts[att_name] = '%s%s' % (
float(match.group(1)) * (float(node['scale']) / 100),
match.group(2))
style = []
for att_name in 'width', 'height':
if att_name in atts:
if re.match(r'^[0-9.]+$', atts[att_name]):
# Interpret unitless values as pixels.
atts[att_name] += 'px'
style.append('%s: %s;' % (att_name, atts[att_name]))
del atts[att_name]
if style:
atts['style'] = ' '.join(style)
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
self.context.append('')
if ext == '.swf': # place in an object element,
# do NOT use an empty tag: incorrect rendering in browsers
self.body.append(self.starttag(node, 'object', suffix, **atts) +
node.get('alt', uri) + '</object>' + suffix)
else:
self.body.append(self.emptytag(node, 'img', suffix, **atts)) | IOError | dataset/ETHPy150Open zackw/header-survey/sphinx/ext/html5_output.py/BaseTranslator.visit_image |
def visit_image(self, node):
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
node['uri'] = posixpath.join(self.builder.imgpath,
self.builder.images[olduri])
if 'scale' in node:
# Try to figure out image height and width. Docutils does that too,
# but it tries the final file name, which does not necessarily exist
# yet at the time the HTML file is written.
if PIL and not ('width' in node and 'height' in node):
try:
im = PIL.Image.open(os.path.join(self.builder.srcdir,
olduri))
except (__HOLE__, # Source image can't be found or opened
UnicodeError): # PIL doesn't like Unicode paths.
pass
else:
if 'width' not in node:
node['width'] = str(im.size[0])
if 'height' not in node:
node['height'] = str(im.size[1])
del im
BaseTranslator.visit_image(self, node) | IOError | dataset/ETHPy150Open zackw/header-survey/sphinx/ext/html5_output.py/HTML5Translator.visit_image |
def RetrieveIPInfo(self, ip):
if not ip:
return (IPInfo.UNKNOWN, "No ip information.")
ip_str = utils.SmartStr(ip)
try:
return self.cache.Get(ip_str)
except KeyError:
pass
try:
ip = ipaddr.IPAddress(ip_str)
except __HOLE__:
return (IPInfo.UNKNOWN, "No ip information.")
if ip.version == 6:
res = self.RetrieveIP6Info(ip)
else:
res = self.RetrieveIP4Info(ip)
self.cache.Put(ip_str, res)
return res | ValueError | dataset/ETHPy150Open google/grr/grr/lib/ip_resolver.py/IPResolver.RetrieveIPInfo |
def create(self, **metadata):
metadata['created_at'] = NOW_GLANCE_FORMAT
metadata['updated_at'] = NOW_GLANCE_FORMAT
self._images.append(FakeImage(metadata))
try:
image_id = str(metadata['id'])
except __HOLE__:
# auto-generate an id if one wasn't provided
image_id = str(len(self._images))
self._images[-1].id = image_id
return self._images[-1] | KeyError | dataset/ETHPy150Open openstack/ironic/ironic/tests/unit/stubs.py/StubGlanceClient.create |
def __getattr__(self, key):
try:
return self.__dict__['raw'][key]
except __HOLE__:
raise AttributeError(key) | KeyError | dataset/ETHPy150Open openstack/ironic/ironic/tests/unit/stubs.py/FakeImage.__getattr__ |
def __setattr__(self, key, value):
try:
self.__dict__['raw'][key] = value
except __HOLE__:
raise AttributeError(key) | KeyError | dataset/ETHPy150Open openstack/ironic/ironic/tests/unit/stubs.py/FakeImage.__setattr__ |
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except __HOLE__:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value))
)
return True | AttributeError | dataset/ETHPy150Open wbond/oscrypto/tests/_unittest_compat.py/_AssertRaisesContext.__exit__ |
def handle_noargs(self, **options):
center = options['center']
pattern = options['pattern']
size = options['size']
speed = options['speed']
steps = options['steps']
wrap = options['wrap']
if pattern is None:
states = [[None] * size] * size
else:
states = self.parse_pattern(pattern, size, center)
clients = [run(row, col, size, wrap, speed, steps, states[row][col])
for row in range(size) for col in range(size)]
try:
asyncio.get_event_loop().run_until_complete(reset(size))
asyncio.get_event_loop().run_until_complete(asyncio.wait(clients))
except __HOLE__:
pass | KeyboardInterrupt | dataset/ETHPy150Open aaugustin/django-c10k-demo/gameoflife/management/commands/gameoflife.py/Command.handle_noargs |
def run(self):
self.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.sock.bind(('localhost',self.port))
except:
# If I can't bind, there is nothing I cand do
return
self.sock.listen(5)
while True:
s, address = self.sock.accept()
print("Conection from %s" % str(address))
realio = sys.stdout, sys.stdin, sys.stderr
socketio = RtDebugger(s, realio)
sys.stdout,sys.stdin,sys.stderr = socketio,socketio,socketio
try:
try:
code.interact(banner=self.banner, local = {
'list' : servers, 'servers' : servers, 'get_server' : get_server, 'get' : get_server
})
finally:
sys.stdout, sys.stdin, sys.stderr = realio
except Exception as e:
print(e)
except __HOLE__ as e:
print(e)
try:
s.close()
except:
pass | SystemExit | dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/voodoo/rt_debugger.py/Debugger.run |
def pgcli_line_magic(line):
_logger.debug('pgcli magic called: %r', line)
parsed = sql.parse.parse(line, {})
conn = sql.connection.Connection.get(parsed['connection'])
try:
# A corresponding pgcli object already exists
pgcli = conn._pgcli
_logger.debug('Reusing existing pgcli')
except __HOLE__:
# I can't figure out how to get the underylying psycopg2 connection
# from the sqlalchemy connection, so just grab the url and make a
# new connection
pgcli = PGCli()
u = conn.session.engine.url
_logger.debug('New pgcli: %r', str(u))
pgcli.connect(u.database, u.host, u.username, u.port, u.password)
conn._pgcli = pgcli
# For convenience, print the connection alias
print('Connected: {}'.format(conn.name))
try:
pgcli.run_cli()
except SystemExit:
pass
if not pgcli.query_history:
return
q = pgcli.query_history[-1]
if not q.successful:
_logger.debug('Unsuccessful query - ignoring')
return
if q.meta_changed or q.db_changed or q.path_changed:
_logger.debug('Dangerous query detected -- ignoring')
return
ipython = get_ipython()
return ipython.run_cell_magic('sql', line, q.query) | AttributeError | dataset/ETHPy150Open dbcli/pgcli/pgcli/magic.py/pgcli_line_magic |
def bayesdb_read_pandas_df(bdb, table, df, create=False, ifnotexists=False,
index=None):
"""Read data from a pandas dataframe into a table.
:param bayeslite.BayesDB bdb: BayesDB instance
:param str table: name of table
:param pandas.DataFrame df: pandas dataframe
:param bool create: if true and `table` does not exist, create it
:param bool ifnotexists: if true, and `create` is true` and `table`
exists, read data into it anyway
:param str index: name of column for index
If `index` is `None`, then the dataframe's index dtype must be
convertible to int64, and it is mapped to the table's rowids. If
the dataframe's index dtype is not convertible to int64, you must
specify `index` to give a primary key for the table.
"""
if not create:
if ifnotexists:
raise ValueError('Not creating table whether or not exists!')
column_names = [str(column) for column in df.columns]
if index is None:
create_column_names = column_names
insert_column_names = ['_rowid_'] + column_names
try:
key_index = df.index.astype('int64')
except __HOLE__:
raise ValueError('Must specify index name for non-integral index!')
else:
if index in df.columns:
raise ValueError('Index name collides with column name: %r'
% (index,))
create_column_names = [index] + column_names
insert_column_names = create_column_names
key_index = df.index
with bdb.savepoint():
if core.bayesdb_has_table(bdb, table):
if create and not ifnotexists:
raise ValueError('Table already exists: %s' % (repr(table),))
core.bayesdb_table_guarantee_columns(bdb, table)
unknown = set(name for name in create_column_names
if not core.bayesdb_table_has_column(bdb, table, name))
if len(unknown) != 0:
raise ValueError('Unknown columns: %s' % (list(unknown),))
elif create:
qccns = map(sqlite3_quote_name, create_column_names)
def column_schema(column_name, qcn):
if column_name == index:
return '%s NUMERIC PRIMARY KEY' % (qcn,)
else:
return '%s NUMERIC' % (qcn,)
schema = ','.join(column_schema(ccn, qccn)
for ccn, qccn in zip(create_column_names, qccns))
qt = sqlite3_quote_name(table)
bdb.sql_execute('CREATE TABLE %s(%s)' % (qt, schema))
core.bayesdb_table_guarantee_columns(bdb, table)
else:
raise ValueError('No such table: %s' % (repr(table),))
qt = sqlite3_quote_name(table)
qicns = map(sqlite3_quote_name, insert_column_names)
sql = 'INSERT INTO %s (%s) VALUES (%s)' % \
(qt, ','.join(qicns), ','.join('?' for _qicn in qicns))
for key, i in zip(key_index, df.index):
bdb.sql_execute(sql, (key,) + tuple(df.ix[i])) | ValueError | dataset/ETHPy150Open probcomp/bayeslite/src/read_pandas.py/bayesdb_read_pandas_df |
def save_rib(self, file_name, bg=0, resolution=None, resfactor=1.0):
"""Save scene to a RenderMan RIB file.
Keyword Arguments:
file_name -- File name to save to.
bg -- Optional background option. If 0 then no background is
saved. If non-None then a background is saved. If left alone
(defaults to None) it will result in a pop-up window asking
for yes/no.
resolution -- Specify the resolution of the generated image in
the form of a tuple (nx, ny).
resfactor -- The resolution factor which scales the resolution.
"""
if resolution is None:
# get present window size
Nx, Ny = self.render_window.size
else:
try:
Nx, Ny = resolution
except __HOLE__:
raise TypeError(
"Resolution (%s) should be a sequence with two elements"%resolution
)
if len(file_name) == 0:
return
f_pref = os.path.splitext(file_name)[0]
ex = tvtk.RIBExporter()
ex.size = int(resfactor*Nx), int(resfactor*Ny)
ex.file_prefix = f_pref
ex.texture_prefix = f_pref + "_tex"
self._lift()
ex.render_window = self._renwin
ex.background = bg
if VTK_VER[:3] in ['4.2', '4.4']:
# The vtkRIBExporter is broken in respect to VTK light
# types. Therefore we need to convert all lights into
# scene lights before the save and later convert them
# back.
########################################
# Internal functions
def x3to4(x):
# convert 3-vector to 4-vector (w=1 -> point in space)
return (x[0], x[1], x[2], 1.0 )
def x4to3(x):
# convert 4-vector to 3-vector
return (x[0], x[1], x[2])
def cameralight_transform(light, xform, light_type):
# transform light by 4x4 matrix xform
origin = x3to4(light.position)
focus = x3to4(light.focal_point)
neworigin = xform.multiply_point(origin)
newfocus = xform.multiply_point(focus)
light.position = x4to3(neworigin)
light.focal_point = x4to3(newfocus)
light.light_type = light_type
########################################
save_lights_type=[]
for light in self.light_manager.lights:
save_lights_type.append(light.source.light_type)
# Convert lights to scene lights.
cam = self.camera
xform = tvtk.Matrix4x4()
xform.deep_copy(cam.camera_light_transform_matrix)
for light in self.light_manager.lights:
cameralight_transform(light.source, xform, "scene_light")
# Write the RIB file.
self._exporter_write(ex)
# Now re-convert lights to camera lights.
xform.invert()
for i,light in enumerate(self.light_manager.lights):
cameralight_transform(light.source, xform, save_lights_type[i])
# Change the camera position. Otherwise VTK would render
# one broken frame after the export.
cam.roll(0.5)
cam.roll(-0.5)
else:
self._exporter_write(ex) | TypeError | dataset/ETHPy150Open enthought/mayavi/tvtk/pyface/tvtk_scene.py/TVTKScene.save_rib |
def cancel(self):
''' Cancels the callback if it was scheduled to be called.
'''
if self._is_triggered:
self._is_triggered = False
try:
self.clock._events[self.cid].remove(self)
except __HOLE__:
pass | ValueError | dataset/ETHPy150Open kivy/kivy/kivy/clock.py/ClockEvent.cancel |
def tick(self, curtime, remove):
# timeout happened ? (check also if we would miss from 5ms) this
# 5ms increase the accuracy if the timing of animation for
# example.
if curtime - self._last_dt < self.timeout - 0.005:
return True
# calculate current timediff for this event
self._dt = curtime - self._last_dt
self._last_dt = curtime
loop = self.loop
# get the callback
callback = self.get_callback()
if callback is None:
self._is_triggered = False
try:
remove(self)
except ValueError:
pass
return False
# if it's a trigger, allow to retrigger inside the callback
# we have to remove event here, otherwise, if we remove later, the user
# might have canceled in the callback and then re-triggered. That'd
# result in the removal of the re-trigger
if not loop:
self._is_triggered = False
try:
remove(self)
except ValueError:
pass
# call the callback
ret = callback(self._dt)
# if the user returns False explicitly, remove the event
if loop and ret is False:
self._is_triggered = False
try:
remove(self)
except __HOLE__:
pass
return False
return loop | ValueError | dataset/ETHPy150Open kivy/kivy/kivy/clock.py/ClockEvent.tick |
def check_dependencies(settings):
# Some of our checks require access to django.conf.settings, so
# tell Django about our settings.
#
from djblets.util.filesystem import is_exe_in_path
from reviewboard.admin.import_utils import has_module
dependency_error = settings.dependency_error
# Python 2.6
if sys.version_info[0] < 2 or \
(sys.version_info[0] == 2 and sys.version_info[1] < 6):
dependency_error('Python 2.6 or newer is required.')
# django-evolution
if not has_module('django_evolution'):
dependency_error("django_evolution is required.\n"
"http://code.google.com/p/django-evolution/")
# PIL
if not has_module('PIL') and not has_module('Image'):
dependency_error('The Python Imaging Library (Pillow or PIL) '
'is required.')
# The following checks are non-fatal warnings, since these dependencies are
# merely recommended, not required.
def dependency_warning(string):
sys.stderr.write('Warning: %s\n' % string)
global warnings_found
warnings_found += 1
if not has_module('pysvn') and not has_module('subvertpy'):
dependency_warning('Neither subvertpy nor pysvn found. '
'SVN integration will not work.')
if has_module('P4'):
try:
subprocess.call(['p4', '-h'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except __HOLE__:
dependency_error('p4 command not found. Perforce integration '
'will not work.')
else:
dependency_warning('p4python (>=07.3) not found. Perforce integration '
'will not work.')
if not has_module('mercurial'):
dependency_warning('hg not found. Mercurial integration will not '
'work.')
if not has_module('bzrlib'):
dependency_warning('bzrlib not found. Bazaar integration will not '
'work.')
if not is_exe_in_path('cvs'):
dependency_warning('cvs binary not found. CVS integration '
'will not work.')
if not is_exe_in_path('git'):
dependency_warning('git binary not found. Git integration '
'will not work.')
if not is_exe_in_path('mtn'):
dependency_warning('mtn binary not found. Monotone integration '
'will not work.')
# Django will print warnings/errors for database backend modules and flup
# if the configuration requires it.
if warnings_found:
sys.stderr.write(settings.install_help)
sys.stderr.write('\n\n') | OSError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/manage.py/check_dependencies |
def run():
# Add the parent directory of 'manage.py' to the python path, so
# manage.py can be run from any directory.
# From http://www.djangosnippets.org/snippets/281/
sys.path.insert(0, dirname(dirname(abspath(__file__))))
# Python may insert the directory that manage.py is in into the Python
# path, which can cause conflicts with other modules (such as Python's
# "site" module). We don't want this, so it's important that we remove
# this directory from the path.
try:
sys.path.remove(dirname(abspath(__file__)))
except ValueError:
pass
if b'DJANGO_SETTINGS_MODULE' not in os.environ:
in_subprocess = False
os.environ.setdefault(b'DJANGO_SETTINGS_MODULE',
b'reviewboard.settings')
else:
in_subprocess = True
try:
from reviewboard import settings
except __HOLE__ as e:
sys.stderr.write("Error: Can't find the file 'settings.py' in the "
"directory containing %r. It appears you've "
"customized things.\n"
"You'll have to run django-admin.py, passing it your "
"settings module.\n"
"(If the file settings.py does indeed exist, it's "
"causing an ImportError somehow.)\n" % __file__)
sys.stderr.write("The error we got was: %s\n" % e)
sys.exit(1)
main(settings, in_subprocess) | ImportError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/manage.py/run |
def handle(self, *args, **options):
models_to_import = [Realm, Stream, UserProfile, Recipient, Subscription,
Client, Message, UserMessage, Huddle, DefaultStream, RealmAlias,
RealmFilter]
self.chunk_size = options["chunk_size"] # type: int # ignore mypy options bug
encoding = sys.getfilesystemencoding()
if len(args) == 0:
print("Please provide at least one database dump file name.")
exit(1)
if not options["destroy_rebuild_database"]:
for model in models_to_import:
self.new_instance_check(model)
else:
db_name = settings.DATABASES['default']['NAME']
self.do_destroy_and_rebuild_database(db_name)
# maps relationship between realm id and notifications_stream_id
# generally, there should be only one realm per dump, but the code
# doesn't make that assumption
realm_notification_map = dict() # type: Dict[int, int]
# maping between table name and a total expected number of rows across
# all input json files
row_counter = dict() # type: Dict[str, int]
for file_name in args:
try:
fp = open(file_name, 'r')
except __HOLE__:
print("File not found: '%s'" % (file_name,))
exit(1)
print("Processing file: %s ..." % (file_name,))
# parse the database dump and load in memory
# TODO: change this to a streaming parser to support loads > RAM size
database_dump = json.load(fp, encoding)
for model in models_to_import:
self.increment_row_counter(row_counter, database_dump, model)
self.import_table(database_dump, realm_notification_map, model)
print("")
# set notifications_stream_id on realm objects to correct value now
# that foreign keys are in streams table
if len(realm_notification_map):
print("Setting realm notification stream...")
for id, notifications_stream_id in realm_notification_map.items():
Realm.objects \
.filter(id=id) \
.update(notifications_stream = notifications_stream_id)
print("")
print("Testing data import: ")
# test that everything from all json dumps made it into the database
for model in models_to_import:
self.test_table_row_count(row_counter, model) | IOError | dataset/ETHPy150Open zulip/zulip/zerver/management/commands/import_dump.py/Command.handle |
def processor_for(content_model_or_slug, exact_page=False):
"""
Decorator that registers the decorated function as a page
processor for the given content model or slug.
When a page exists that forms the prefix of custom urlpatterns
in a project (eg: the blog page and app), the page will be
added to the template context. Passing in ``True`` for the
``exact_page`` arg, will ensure that the page processor is not
run in this situation, requiring that the loaded page object
is for the exact URL currently being viewed.
"""
content_model = None
slug = ""
if isinstance(content_model_or_slug, (str, _str)):
try:
parts = content_model_or_slug.split(".", 1)
content_model = apps.get_model(*parts)
except (__HOLE__, ValueError, LookupError):
slug = content_model_or_slug
elif issubclass(content_model_or_slug, Page):
content_model = content_model_or_slug
else:
raise TypeError("%s is not a valid argument for page_processor, "
"which should be a model subclass of Page in class "
"or string form (app.model), or a valid slug" %
content_model_or_slug)
def decorator(func):
parts = (func, exact_page)
if content_model:
model_name = content_model._meta.object_name.lower()
processors[model_name].insert(0, parts)
else:
processors["slug:%s" % slug].insert(0, parts)
return func
return decorator | TypeError | dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/pages/page_processors.py/processor_for |
def autodiscover():
"""
Taken from ``django.contrib.admin.autodiscover`` and used to run
any calls to the ``processor_for`` decorator.
"""
global LOADED
if LOADED:
return
LOADED = True
for app in get_app_name_list():
try:
module = import_module(app)
except __HOLE__:
pass
else:
try:
import_module("%s.page_processors" % app)
except:
if module_has_submodule(module, "page_processors"):
raise | ImportError | dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/pages/page_processors.py/autodiscover |
def wrap(self, stream):
try:
while True:
ch = stream.next()
self.line.append(ch)
if ch == '\n':
for tok in self.emit_line():
yield tok
except __HOLE__:
for tok in self.emit_line():
yield tok | StopIteration | dataset/ETHPy150Open brehaut/picoparse/picoparse/text.py/TextDiagnostics.wrap |
def define_process_title(proc_title='twork'):
"""Define Custom Process Title
"""
try:
import setproctitle
setproctitle.setproctitle(proc_title)
except __HOLE__ as e:
gen_logger.error(e) | ImportError | dataset/ETHPy150Open bufferx/twork/twork/utils/common.py/define_process_title |
def rm_fstab(name, device, config='/etc/fstab'):
'''
Remove the mount point from the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.rm_fstab /mnt/foo /dev/sdg
'''
modified = False
criteria = _fstab_entry(name=name, device=device)
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
try:
if criteria.match(line):
modified = True
else:
lines.append(line)
except _fstab_entry.ParseError:
lines.append(line)
except (IOError, OSError) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
if modified:
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (__HOLE__, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
# Note: not clear why we always return 'True'
# --just copying previous behavior at this point...
return True | IOError | dataset/ETHPy150Open saltstack/salt/salt/modules/mount.py/rm_fstab |
def set_fstab(
name,
device,
fstype,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
test=False,
match_on='auto',
**kwargs):
'''
Verify that this mount is represented in the fstab, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
# preserve arguments for updating
entry_args = {
'name': name,
'device': device,
'fstype': fstype,
'opts': opts,
'dump': dump,
'pass_num': pass_num,
}
lines = []
ret = None
# Transform match_on into list--items will be checked later
if isinstance(match_on, list):
pass
elif not isinstance(match_on, six.string_types):
msg = 'match_on must be a string or list of strings'
raise CommandExecutionError(msg)
elif match_on == 'auto':
# Try to guess right criteria for auto....
# NOTE: missing some special fstypes here
specialFSes = frozenset([
'none',
'tmpfs',
'sysfs',
'proc',
'fusectl',
'debugfs',
'securityfs',
'devtmpfs',
'cgroup',
'btrfs'])
if fstype in specialFSes:
match_on = ['name']
else:
match_on = ['device']
else:
match_on = [match_on]
# generate entry and criteria objects, handle invalid keys in match_on
entry = _fstab_entry(**entry_args)
try:
criteria = entry.pick(match_on)
except KeyError:
filterFn = lambda key: key not in _fstab_entry.fstab_keys
invalid_keys = filter(filterFn, match_on)
msg = 'Unrecognized keys in match_on: "{0}"'.format(invalid_keys)
raise CommandExecutionError(msg)
# parse file, use ret to cache status
if not os.path.isfile(config):
raise CommandExecutionError('Bad config file "{0}"'.format(config))
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
try:
if criteria.match(line):
# Note: If ret isn't None here,
# we've matched multiple lines
ret = 'present'
if entry.match(line):
lines.append(line)
else:
ret = 'change'
lines.append(str(entry))
else:
lines.append(line)
except _fstab_entry.ParseError:
lines.append(line)
except (IOError, __HOLE__) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
# add line if not present or changed
if ret is None:
lines.append(str(entry))
ret = 'new'
if ret != 'present': # ret in ['new', 'change']:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return ret | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/mount.py/set_fstab |
def rm_automaster(name, device, config='/etc/auto_salt'):
'''
Remove the mount point from the auto_master
CLI Example:
.. code-block:: bash
salt '*' mount.rm_automaster /mnt/foo /dev/sdg
'''
contents = automaster(config)
if name not in contents:
return True
# The entry is present, get rid of it
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
lines.append(line)
continue
comps = line.split()
prefix = "/.."
name_chk = comps[0].replace(prefix, "")
device_fmt = comps[2].split(":")
if device:
if name_chk == name and device_fmt[1] == device:
continue
else:
if name_chk == name:
continue
lines.append(line)
except (IOError, __HOLE__) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (IOError, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
# Update automount
__salt__['cmd.run']('automount -cv')
return True | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/mount.py/rm_automaster |
def set_automaster(
name,
device,
fstype,
opts='',
config='/etc/auto_salt',
test=False,
**kwargs):
'''
Verify that this mount is represented in the auto_salt, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_automaster /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
lines = []
change = False
present = False
automaster_file = "/etc/auto_master"
if not os.path.isfile(config):
__salt__['file.touch'](config)
__salt__['file.append'](automaster_file, "/-\t\t\t{0}".format(config))
name = "/..{0}".format(name)
device_fmt = "{0}:{1}".format(fstype, device)
type_opts = "-fstype={0},{1}".format(fstype, opts)
if fstype == 'smbfs':
device_fmt = device_fmt.replace(fstype, "")
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 3:
# Invalid entry
lines.append(line)
continue
if comps[0] == name or comps[2] == device_fmt:
# check to see if there are changes
# and fix them if there are any
present = True
if comps[0] != name:
change = True
comps[0] = name
if comps[1] != type_opts:
change = True
comps[1] = type_opts
if comps[2] != device_fmt:
change = True
comps[2] = device_fmt
if change:
log.debug(
'auto_master entry for mount point {0} needs to be '
'updated'.format(name)
)
newline = (
'{0}\t{1}\t{2}\n'.format(
name, type_opts, device_fmt)
)
lines.append(newline)
else:
lines.append(line)
except (IOError, OSError) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
if change:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, __HOLE__):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return 'change'
if not change:
if present:
# The right entry is already here
return 'present'
else:
if not salt.utils.test_mode(test=test, **kwargs):
# The entry is new, add it to the end of the fstab
newline = (
'{0}\t{1}\t{2}\n'.format(
name, type_opts, device_fmt)
)
lines.append(newline)
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
raise CommandExecutionError(
'File not writable {0}'.format(
config
)
)
return 'new' | OSError | dataset/ETHPy150Open saltstack/salt/salt/modules/mount.py/set_automaster |
def parse_Config(config_path):
"""
Parse PETRglobals.ConfigFileName. The file should be ; the default is PETR_config.ini
in the working directory but this can be changed using the -c option in the command
line. Most of the entries are obvious (but will eventually be documented) with the
exception of
1. actorfile_list and textfile_list are comma-delimited lists. Per the usual rules
for Python config files, these can be continued on the next line provided the
the first char is a space or tab.
2. If both textfile_list and textfile_name are present, textfile_list takes priority.
textfile_list should be the name of a file containing text file names; # is allowed
as a comment delimiter at the beginning of individual lines and following the file
name.
3. For additional info on config files, see
http://docs.python.org/3.4/library/configparser.html
or try Google, but basically, it is fairly simple, and you can probably just
follow the examples.
"""
def get_config_boolean(optname):
""" Checks for the option optname, prints outcome and returns the result.
If optname not present, returns False """
if parser.has_option('Options', optname):
try:
result = parser.getboolean('Options', optname)
print(optname, "=", result)
return result
except ValueError:
print(
"Error in config.ini: " +
optname +
" value must be `true' or `false'")
raise
else:
return False
print('\n', end=' ')
parser = ConfigParser()
confdat = parser.read(config_path)
if len(confdat) == 0:
print(
"\aError: Could not find the config file:",
PETRglobals.ConfigFileName)
print("Terminating program")
sys.exit()
try:
PETRglobals.VerbFileName = parser.get('Dictionaries', 'verbfile_name')
PETRglobals.AgentFileName = parser.get(
'Dictionaries',
'agentfile_name')
PETRglobals.DiscardFileName = parser.get(
'Dictionaries',
'discardfile_name')
direct = parser.get('StanfordNLP', 'stanford_dir')
PETRglobals.stanfordnlp = os.path.expanduser(direct)
filestring = parser.get('Dictionaries', 'actorfile_list')
PETRglobals.ActorFileList = filestring.split(', ')
# otherwise this was set in command line
if len(PETRglobals.TextFileList) == 0:
if parser.has_option('Options', 'textfile_list'): # takes priority
filestring = parser.get('Options', 'textfile_list')
PETRglobals.TextFileList = filestring.split(', ')
else:
filename = parser.get('Options', 'textfile_name')
try:
fpar = open(filename, 'r')
except IOError:
print(
"\aError: Could not find the text file list file:",
filename)
print("Terminating program")
sys.exit()
PETRglobals.TextFileList = []
line = fpar.readline()
while len(line) > 0: # go through the entire file
if '#' in line:
line = line[:line.find('#')]
line = line.strip()
if len(line) > 0:
PETRglobals.TextFileList.append(line)
line = fpar.readline()
fpar.close()
if parser.has_option('Dictionaries', 'issuefile_name'):
PETRglobals.IssueFileName = parser.get(
'Dictionaries',
'issuefile_name')
if parser.has_option('Options', 'new_actor_length'):
try:
PETRglobals.NewActorLength = parser.getint(
'Options',
'new_actor_length')
except __HOLE__:
print(
"Error in config.ini Option: new_actor_length value must be an integer")
raise
print("new_actor_length =", PETRglobals.NewActorLength)
PETRglobals.StoponError = get_config_boolean('stop_on_error')
PETRglobals.WriteActorRoot = get_config_boolean('write_actor_root')
PETRglobals.WriteActorText = get_config_boolean('write_actor_text')
if parser.has_option(
'Options', 'require_dyad'): # this one defaults to True
PETRglobals.RequireDyad = get_config_boolean('require_dyad')
else:
PETRglobals.RequireDyad = True
# otherwise this was set in command line
if len(PETRglobals.EventFileName) == 0:
PETRglobals.EventFileName = parser.get('Options', 'eventfile_name')
PETRglobals.CodeBySentence = parser.has_option(
'Options',
'code_by_sentence')
print("code-by-sentence", PETRglobals.CodeBySentence)
PETRglobals.PauseBySentence = parser.has_option(
'Options',
'pause_by_sentence')
print("pause_by_sentence", PETRglobals.PauseBySentence)
PETRglobals.PauseByStory = parser.has_option(
'Options',
'pause_by_story')
print("pause_by_story", PETRglobals.PauseByStory)
try:
if parser.has_option('Options', 'comma_min'):
PETRglobals.CommaMin = parser.getint('Options', 'comma_min')
elif parser.has_option('Options', 'comma_max'):
PETRglobals.CommaMax = parser.getint('Options', 'comma_max')
elif parser.has_option('Options', 'comma_bmin'):
PETRglobals.CommaBMin = parser.getint('Options', 'comma_bmin')
elif parser.has_option('Options', 'comma_bmax'):
PETRglobals.CommaBMax = parser.getint('Options', 'comma_bmax')
elif parser.has_option('Options', 'comma_emin'):
PETRglobals.CommaEMin = parser.getint('Options', 'comma_emin')
elif parser.has_option('Options', 'comma_emax'):
PETRglobals.CommaEMax = parser.getint('Options', 'comma_emax')
except ValueError:
print(
"Error in config.ini Option: comma_* value must be an integer")
raise
print("Comma-delimited clause elimination:")
print("Initial :", end=' ')
if PETRglobals.CommaBMax == 0:
print("deactivated")
else:
print(
"min =",
PETRglobals.CommaBMin,
" max =",
PETRglobals.CommaBMax)
print("Internal:", end=' ')
if PETRglobals.CommaMax == 0:
print("deactivated")
else:
print(
"min =",
PETRglobals.CommaMin,
" max =",
PETRglobals.CommaMax)
print("Terminal:", end=' ')
if PETRglobals.CommaEMax == 0:
print("deactivated")
else:
print(
"min =",
PETRglobals.CommaEMin,
" max =",
PETRglobals.CommaEMax)
except Exception as e:
print(
'parse_config() encountered an error: check the options in',
PETRglobals.ConfigFileName)
print("Terminating program")
sys.exit()
# ================== PRIMARY INPUT USING FIN ================== # | ValueError | dataset/ETHPy150Open openeventdata/petrarch/petrarch/PETRreader.py/parse_Config |
def open_FIN(filename, descrstr):
# opens the global input stream fin using filename;
# descrstr provides information about the file in the event it isn't found
global FIN
global FINline, FINnline, CurrentFINname
try:
FIN = io.open(filename, 'r', encoding='utf-8')
CurrentFINname = filename
FINnline = 0
except __HOLE__:
print("\aError: Could not find the", descrstr, "file:", filename)
print("Terminating program")
sys.exit() | IOError | dataset/ETHPy150Open openeventdata/petrarch/petrarch/PETRreader.py/open_FIN |
def close_FIN():
# closes the global input stream fin.
# IOError should only happen during debugging or if something has seriously gone wrong
# with the system, so exit if this occurs.
global FIN
try:
FIN.close()
except __HOLE__:
print("\aError: Could not close the input file")
print("Terminating program")
sys.exit() | IOError | dataset/ETHPy150Open openeventdata/petrarch/petrarch/PETRreader.py/close_FIN |
def read_verb_dictionary(verb_path):
""" Reads the verb dictionary from VerbFileName """
"""
======= VERB DICTIONARY ORGANIZATION =======
The verb dictionary consists of a set of synsets followed by a series of verb
synonyms and patterns.
VERB SYNONYM BLOCKS AND PATTERNS
A verb synonym block is a set of verbs which are synonymous (or close enough) with
respect to the patterns. The program automatically generates the regular forms of the
verb -- endings of 'S','ED' and 'ING' -- if it is regular (and, implicitly, English);
otherwise the irregular forms can be specified in {...} following the primary verb.
Note that if irregular forms are provided in {...}, ALL forms need to be included,
even if some of those are the same as the regular form (in other words, no
alternative forms are generated when {...} is present). An optional code for the
isolated verb can follow in [...].
The verb block begins with a comment of the form
--- <GENERAL DESCRIPTION> [<CODE>] ---
where the "---" signals the beginning of a new block. The code in [...] is the
primary code -- typically a two-digit+0 cue-category code -- for the block, and this
will be used for all other verbs unless these have their own code. If no code is
present, this defaults to the null code "---" which indicates that the isolated verb
does not generate an event. The null code also can be used as a secondary code.
This is followed by a set of patterns -- these begin with '-' -- which generally
follow the same syntax as TABARI patterns. The pattern set is terminated with a
blank line.
====== EXAMPLE =====
&CURRENCY
+DOLLARS
+EUROS
+AUSTRIAN FLORIN
+GOLDEN_GOBLIN_GALLEONS_
+PESO
+KRONER_
+YUM YENNYEN
+JAVANESE YEN
+SWISS FRANCS
+YEN
&ALTCURR
+BITCOIN
+PIRATE GOLD_
+LEPRECHAUN GOLD_
&AUXVERB3_
+HAVE
+HAS
+HAD
--- GRANT [070] ---
GRANT
GIVE {GAVE GIVEN GIVING } # jw 11/14/91
CONTRIBUTE # tony 3/12/91
- * &CURRENCY [903] # -PAS 12.01.12
- * &ALTCURR [904] # -PAS 14.05.08
- * RUPEES [071]
--- EXPLAIN_VERBAL [010] ---
EXPLAIN
COMMENT
ASSERT
SAY {SAID SAYING }
CLARIFY {CLARIFIES CLARIFIED CLARIFYING} [040]
CLEAR_UP
- * RESTORATION RELATIONS [050:050] # ANNOUNCE <ab 02 Dec 2005>
- * COMMIT &MILITARY TO + [0332] # SAY <sls 13 Mar 2008>
- * ATTACK ON + AS &CRIME [018] # DESCRIBE <ab 31 Dec 2005>
- * &CURRENCY DEBT_RELIEF [0331] # ANNOUNCE <ab 02 Dec 2005> , ANNOUNCE
- * WELCOMED OFFER FROM + [050] # ANNOUNCE <ab 02 Dec 2005>
- * + THAT $ WILL PULLOUT [0356] # INFORM <sms 30 Nov 2007>
- * POSSIBILITY OF &FIGHT [138] # MENTION <OY 11 Mar 2006>
- * AGREED JOIN COALITION [031] # ANNOUNCE <OY 15 Mar 2006>
- * TRACES RESPONSIBILITY [112] # REPORT
- CONFIRMED * OF BOMBINGS [010] # REPORT
- * INITIATIVE END &FIGHT [036] # ANNOUNCE <ab 02 Dec 2005>
&TESTSYN3
+TO THE END
+TO THE DEATH
+UNTIL HELL FREEZES OVER
&TESTSYN4
+TO THE END OF THE EARTH
+TO THE DEATH
--- VOW [170] ---
VOW ;tony 3/9/91
- * RESIST &TESTSYN3 [113] ; pas 4/20/03
- * RESIST &TESTSYN4 [115] ; pas 4/20/03
- * RESISTANCE TO THE INVADING [114] ; pas 4/20/03
- * RESIST [112] ;tony 4/29/91
- * WAR [173] ;tony 4/22/91
STORAGE STRUCTURE
The verbs are stored withing the PETRglobals.VerbDict in two different subdictionaries.
The first, "verbs", contains the pattern of the base verb. For most verbs, this is just
the word itself, but some verbs like "cordon off" and "wire tap" are multi-word verbs that
function as single words. These words are stored as a search tree, with the base root as
the word that will be marked as a verb (e.g. cordon and tap). Following these will be a tree
structure of all the words in the compound verb that follow the verb, with a # on the end to
indicate the end of the search. After the #, all the words before the marked verb are in a similar
tree structure in reverse order. A dictionary containing the list [arrest, wire tap, cordon off]
would look like:
'CORDON' --- ' OFF' --- '#' --- '#'
/
/
/
PETRglobals.VerbDict --- 'verbs'------ 'ARREST' --- '#' --- '#'
\ \
| \
| \
| 'TAP --- '#' --- 'WIRE' --- '#'
|
|
'patterns'
After the final '#' there is a dictionary with two entries: meaning and code. The meaning
is used to find the entry in the patterns dictionary, and the code stores the specific code
for that verb if it differs from the code of the meaning. The patterns dictionary stores all
the pattern information contained in the files after the synonyms, in a similar
verb-after-#-before-#-info dictionary.
PROGRAMMING NOTES
Notes
1. TABARI allowed recursive synsets -- that is, synsetS embedded in patterns and other
synsets. It should be possible to do this fairly easily, at least with basic
synsets as elements (not as patterns) but a simple call in syn_match(isupperseq)
was not sufficient, so this needs more work.
2. For TABARI legacy purposes, the construction "XXXX_ " is converted to "XXXX ",
an open match. However, per the comments below, generally TABARI dictionaries
should be converted before being used with PETRARCH.
VERB DICTIONARY DIFFERENCES FROM TABARI
On the *very* remote chance -- see Note 1 -- that you are trying to modify a TABARI
.verbs dictionary to the PETRARCH format, the main thing you will need to eliminate
are stemmed words: PETRARCH only works with complete words. On the positive side,
PETRARCH will only look at string as a "verb" if it has been identified as such by
the parser -- that is, it is preceded with (VP and a tag that starts with (VB, so
the [numerous] patterns required for noun/verb disambiguation are no longer
needed. PETRARCH also does not allow disconjunctive sets in patterns: to accommodate
legacy dictionaries, patterns containing these are skipped, but in order to work,
these should be replaced with synsets. Also see additional remarks at the beginning
of the file.
The other big difference between PETRARCH and TABARI is verb-noun disambiguation:
the pattern-based approach of TABARI needed a lot of information to insure that a
word that *might* be a verb was, in fact, a verb (or was a noun that occurred in a
context where it indicated an event anyway: TABARI's [in]famous tendency to code the
right thing for the wrong reason). PETRARCH, in contrast, only looks as a verb when
the parsing has identified it as, in fact, a verb. This dramatically reduces false
positives and eliminates the need for any pattern which was required simply for
disambiguation, but it also means that PETRARCH is a lot more discriminating about
what actually constitutes an event. The big difference here is that verb-only
codes are the norm in PETRARCH dictionaries but the exception in TABARI dictionaries.
The active PETRARCH verbs dictionary has been extensively reorganized into both
verb and noun synonym sets, and you are probably better off adding vocabulary to
this [see Note 1] than converting a dictionary, but it can be done. An unconverted
TABARI dictionary, on the other hand, will generally not work particularly well with
PETRARCH.
Note 1.
Yeah, right. Every project we've encountered -- including those lavishly funded by
multiple millions of taxpayers dollars and those allegedly producing multiple millions
of events -- has regarded the NSF-funded CAMEO verbs dictionaries as a sacred artifact
of the [infamous] Data Fairy, lowered from Asgaard along the lines of this
http://www.wikiart.org/en/jacob-jordaens/allegory-of-the-peace-of-westphalia-1654
[not exactly sure where the .verbs file is in that painting, but it must be in
there somewhere]
but then subsequently subject said dictionaries to bitter complaints that they aren't
coding comprehensively.
Look, dudes and dudettes, these dictionaries have been open source for about as long
as the US has been at war in Afghanistan -- which is to say, a really long time -- and
if you don't like how the coding is being done, add some new open-source vocabulary
to the dictionaries instead of merely parasitizing the existing work. Dudes.
The *real* problem, one suspects, is embodied in the following nugget of wisdom:
Opportunity is missed by most people because it is dressed in overalls and looks
like work.
Thomas A. Edison
Dudes.
"""
global theverb, verb
PETRglobals.VerbDict = {'verbs': {}, 'phrases': {}}
def add_dict_tree(targ, verb, meaning="", code='---',
upper=[], synset=False, dict='phrases', line=""):
prev = verb
list = PETRglobals.VerbDict[dict].setdefault(verb, {})
while targ != []:
if targ[0] in [' ', '']:
targ = targ[1:]
continue
# Put synset trees in their own bin so we can
if targ[0][0] == '&':
list = list.setdefault(
'synsets',
{}) # consider this case later
list = list.setdefault(targ[0], {})
targ = targ[1:]
list["#"] = list.setdefault(
"#",
{}) # termination symbol for the lower phrase
list = list["#"]
targ = upper
while targ != []:
if targ[-1] in [' ', '']:
targ = targ[:-1]
continue
# Put synset trees in their own bin so we can
if targ[-1][0] == '&':
list = list.setdefault(
'synsets',
{}) # consider this case later
list = list.setdefault(targ[-1], {})
targ = targ[:-1]
list['#'] = {'meaning': meaning, 'code': code, 'line': line}
def make_phrase_list(thepat):
""" Converts a pattern phrase into a list of alternating words and connectors """
if len(thepat) == 0:
return []
phlist = []
start = 0
maxlen = len(thepat) + 1 # this is just a telltail
while start < len(thepat): # break phrase on ' ' and '_'
spfind = thepat.find(' ', start)
if spfind == -1:
spfind = maxlen
unfind = thepat.find('_', start)
if unfind == -1:
unfind = maxlen
# somehow think I don't need this check...well, I just need the
# terminating point, still need to see which is lower
if unfind < spfind:
phlist.append(thepat[start:unfind])
start = unfind + 1
else:
phlist.append(thepat[start:spfind])
phlist.append(' ')
start = spfind + 1
# check for missing synsets
ka = 0
while ka < len(phlist):
if len(phlist[ka]) > 0:
if (phlist[ka][0] == '&') and (
phlist[ka] not in PETRglobals.VerbDict):
print("WTF", phlist[ka])
print(sorted(PETRglobals.VerbDict.keys()))
exit()
logger.warning("Synset " + phlist[ka] +
" has not been defined; pattern skipped")
raise ValueError # this will do...
ka += 2
return phlist
def get_verb_forms(loccode, line):
""" Read the irregular forms of a verb. """
# need error checking here
global verb, theverb
forms = verb[verb.find('{') + 1:verb.find('}')].split()
for wrd in forms:
vscr = wrd
add_dict_tree([], vscr, theverb, loccode, dict='verbs', line=line)
def store_multi_word_verb(loccode, line):
""" Store a multi-word verb and optional irregular forms. """
global verb, theverb
if '{' in verb:
forms = verb[verb.find('{') + 1:verb.find('}')].split()
forms.append(verb[:verb.find('{')].strip())
else:
forms = [verb]
# add the regular forms to the verb designated by '+'
plind = verb.index('+') + 1
if verb.find('_', plind) > 0:
vroot = verb[plind:verb.find('_', plind)]
else:
vroot = verb[plind:]
forms.append(verb.replace(vroot, vroot + "S"))
if vroot[-1] == 'E': # root ends in 'E'
forms.append(verb.replace(vroot, vroot + "D"))
forms.append(verb.replace(vroot, vroot[:-1] + "ING"))
else:
forms.append(verb.replace(vroot, vroot + "ED"))
forms.append(verb.replace(vroot, vroot + "ING"))
for phrase in forms:
if '+' in phrase: # otherwise not in correct form so skip it
words = phrase.split('_')
multilist = []
upper = []
phrase
if words[0].startswith('+'):
multilist = [True]
for ka in range(1, len(words)):
multilist.append(words[ka])
targverb = words[0][1:]
else:
upper = [False]
for ka in range(2, len(words) + 1):
upper.append(words[len(words) - ka])
targverb = words[len(words) - 1][1:]
add_dict_tree(
multilist[
1:], targverb, theverb, loccode, upper=upper[
1:], dict='verbs', line=line)
else:
logger.warning(
'Error in read_verb_dictionary()/store_multi_word_verb(): ' +
phrase +
' in ' +
verb +
' is part of a multi-word verb and should contain a +; this was skipped')
def make_verb_forms(loccode, line):
""" Create the regular forms of a verb. """
global verb, theverb
vroot = verb
vscr = vroot + \
"S" if vroot[-1] not in ["S", "X", "Z"] else vroot + "ES"
add_dict_tree([], vscr, theverb, loccode, dict='verbs', line=line)
if vroot[-1] == 'E': # root ends in 'E'
vscr = vroot + "D"
add_dict_tree([], vscr, theverb, loccode, dict='verbs', line=line)
vscr = vroot[:-1] + "ING"
else:
vscr = vroot + "ED" # if vroot[-1] not == "Y" else vroot[-1]+"IES"
add_dict_tree([], vscr, theverb, loccode, dict='verbs', line=line)
vscr = vroot + "ING"
add_dict_tree([], vscr, theverb, loccode, dict='verbs', line=line)
def make_plural(st):
""" Create the plural of a synonym noun st """
if 'Y' == st[-1]:
return st[:-1] + 'IES' # space is added below
elif 'S' == st[-1]:
return st[:-1] + 'ES'
else:
return st + 'S'
# note that this will be ignored if there are no errors
logger = logging.getLogger('petr_log')
logger.info("Reading " + PETRglobals.VerbFileName)
open_FIN(verb_path, "verb")
theverb = ''
newblock = False
ka = 0 # primary verb count ( debug )
line = read_FIN_line()
while len(line) > 0: # loop through the file
if '[' in line:
part = line.partition('[')
verb = part[0].strip()
code = part[2][:part[2].find(']')]
else:
verb = line.strip()
code = ''
if verb.startswith('---'): # start of new block
if len(code) > 0:
primarycode = code
else:
primarycode = '---'
newblock = True
line = read_FIN_line()
elif verb[0] == '-': # pattern
# TABARI legacy: currently aren't processing these
if '{' in verb:
line = read_FIN_line()
continue
# resolve the ambiguous '_ ' construction to ' '
verb = verb.replace('_ ', ' ')
if verb[-1] == "_":
verb = verb[:-1]
targ = verb[1:].partition('*')
try:
highpat = make_phrase_list(targ[0].lstrip())
lowphrase = targ[2].rstrip()
if len(lowphrase) == 0:
lowpat = []
else:
lowpat = [targ[2][0]] # start with connector
loclist = make_phrase_list(lowphrase[1:])
lowpat.extend(loclist[:-1]) # don't need the final blank
add_dict_tree(lowpat, theverb, "", code, highpat, line=line)
except __HOLE__:
# just trap the error, which will skip the line containing it
pass
line = read_FIN_line()
elif verb[0] == '&': # Read and store a synset.
if verb[-1] == '_':
noplural = True
verb = verb[:-1] # remove final _
else:
noplural = False
PETRglobals.VerbDict[verb] = {}
line = read_FIN_line()
while line[0] == '+':
wordstr = line[1:].strip()
if noplural or wordstr[-1] == '_':
wordstr = wordstr.strip().replace('_', ' ')
add_dict_tree(
wordstr.split(),
verb,
synset=True,
dict='verbs',
line=line)
else:
wordstr = wordstr.replace('_', ' ')
add_dict_tree(
wordstr.split(),
verb,
synset=True,
dict='verbs',
line=line)
add_dict_tree(
make_plural(wordstr).split(),
verb,
synset=True,
dict='verbs',
line=line)
line = read_FIN_line()
else: # verb
if len(code) > 0:
curcode = code
else:
curcode = primarycode
if newblock:
if '{' in verb:
# theverb is the index to the pattern storage for the
# remainder of the block
theverb = verb[:verb.find('{')].strip()
else:
theverb = verb
add_dict_tree([], theverb, code=curcode, line=line)
newblock = False
if '_' in verb:
store_multi_word_verb(curcode, line)
else:
add_dict_tree(
[],
verb.split()[0],
theverb,
curcode,
dict='verbs',
line=line)
if '{' in verb:
get_verb_forms(curcode, line)
else:
make_verb_forms(curcode, line)
ka += 1 # counting primary verbs
line = read_FIN_line()
close_FIN() | ValueError | dataset/ETHPy150Open openeventdata/petrarch/petrarch/PETRreader.py/read_verb_dictionary |
def dstr_to_ordate(datestring):
""" Computes an ordinal date from a Gregorian calendar date string YYYYMMDD or YYMMDD."""
"""
This uses the 'ANSI date' with the base -- ordate == 1 -- of 1 Jan 1601. This derives
from [OMG!] COBOL (see http://en.wikipedia.org/wiki/Julian_day) but in fact should
work fairly well for our applications.
For consistency with KEDS and TABARI, YY years between 00 and 30 are interpreted as
20YY; otherwise 19YY is assumed.
Formatting and error checking:
1. YYMMDD dates *must* be <=7 characters, otherwise YYYYMMDD is assumed
2. If YYYYMMDD format is used, only the first 8 characters are checked so it is okay
to have junk at the end of the string.
3. Days are checked for validity according to the month and year, e.g. 20100931 is
never allowed; 20100229 is not valid but 20120229 is valid
4. Invalid dates raise DateError
Source of algorithm: http://en.wikipedia.org/wiki/Julian_day
Unit testing:
Julian dates from http://aa.usno.navy.mil/data/docs/JulianDate.php (set time to noon)
Results:
dstr_to_ordate("20130926") # 2456562
dstr_to_ordate("090120") # 2454852
dstr_to_ordate("510724") # 2433852
dstr_to_ordate("19411207") # 2430336
dstr_to_ordate("18631119") # 2401829
dstr_to_ordate("17760704") # 2369916
dstr_to_ordate("16010101") # 2305814
"""
# print datestring # debug
try:
if len(datestring) > 7:
year = int(datestring[:4])
month = int(datestring[4:6])
day = int(datestring[6:8])
else:
year = int(datestring[:2])
if year <= 30:
year += 2000
else:
year += 1900
month = int(datestring[2:4])
day = int(datestring[4:6])
except __HOLE__:
raise DateError
# print year, month, day # debug
if day <= 0:
raise DateError
if month == 2:
if year % 400 == 0:
if day > 29:
raise DateError
elif year % 100 == 0:
if day > 28:
raise DateError
elif year % 4 == 0:
if day > 29:
raise DateError
else:
if day > 28:
raise DateError
elif month in [4, 6, 9, 11]: # 30 days have September...
if day > 30:
raise DateError
else: # all the rest I don't remember...
if day > 31:
raise DateError
if (month < 3):
adj = 1
else:
adj = 0
yr = year + 4800 - adj
mo = month + (12 * adj) - 3
ordate = day + math.floor((153 * mo + 2) / 5) + 365 * yr
ordate += math.floor(yr / 4) - math.floor(yr / 100) + \
math.floor(yr / 400) - 32045 # pure Julian date
# print "Julian:", ordate # debug to cross-check for unit test
ordate -= 2305813 # adjust for ANSI date
# print ordate # debug
return int(ordate) | ValueError | dataset/ETHPy150Open openeventdata/petrarch/petrarch/PETRreader.py/dstr_to_ordate |
def read_actor_dictionary(actorfile):
""" Reads a TABARI-style actor dictionary. """
"""
Actor dictionary list elements:
Actors are stored in a dictionary of a list of pattern lists keyed on the first word
of the phrase. The pattern lists are sorted by length.
The individual pattern lists begin with an integer index to the tuple of possible codes
(that is, with the possibility of date restrictions) in PETRglobals.ActorCodes,
followed by the connector from the key, and then a series of 2-tuples containing the
remaining words and connectors. A 2-tuple of the form ('', ' ') signals the end of the
list. <14.02.26: Except at the moment these are just 2-item lists, not tuples, but
this could be easily changed and presumably would be more efficient: these are not
changed so they don't need to be lists.<>
Connector:
blank: words can occur between the previous word and the next word
_ (underscore): words must be consecutive: no intervening words
The codes with possible date restrictions are stored as lists in a [genuine] tuple in
PETRglobals.ActorCodes in the following format where
'ordate' is an ordinal date:
[code] : unrestricted code
[0,ordate,code] : < restriction
[1,ordate,code] : > restriction
[2,ordate,ordate, code] : - (interval) restriction
If PETRglobals.WriteActorRoot is True, the final element of a PETRglobals.ActorCodes
list is the text of the actor at the beginning of the synonym list.
Synonyms simply use the integer code index to point to these tuples.
STRICT FORMATTING OF THE ACTOR DICTIONARY
[With some additional coding, this can be relaxed, but anything following these
rules should read correctly]
Basic structure is a series of records of the form
[primary phrase]
[optional synonym phrases beginning with '+']
[optional date restrictions beginning with '\t']
Material that is ignored
1. Anything following ';' (this is the old KEDS/TABARI format and should probably
be replaced with '#' for consistency
2. Any line beginning with '#' or <!
3. Any null line (that is, line consisting of only \n
A "phrase string" is a set of character strings separated by either blanks or
underscores.
A "code" is a character string without blanks
A "date" has the form YYYYMMDD or YYMMDD. These can be mixed, e.g.
JAMES_BYRNES_ ; CountryInfo.txt
[USAELI 18970101-450703]
[USAGOV 450703-470121]
Primary phrase format:
phrase_string { optional [code] }
if the code is present, it becomes the default code if none of the date restrictions
are satisfied. If it is not present and none of the restrictions are satisfied,
this is equivalent to a null code
Synonym phrase
+phrase_string
Date restriction
\t[code restriction]
where restriction -- everything is interpret as 'or equal' -- takes the form
<date : applies to times before date
>date : applies to times after date
date-date: applies to times between dates
A date restriction of the form
\t[code]
is the same as a default restriction.
== Example ===
# .actor file produced by translate.countryinfo.pl from CountryInfo.120106.txt
# Generated at: Tue Jan 10 14:09:48 2012
# Version: CountryInfo.120106.txt
AFGHANISTAN_ [AFG]
+AFGHAN_
+AFGANISTAN_
+AFGHANESTAN_
+AFGHANYSTAN_
+KABUL_
+HERAT_
MOHAMMAD_ZAHIR_SHAH_ ; CountryInfo.txt
[AFGELI 320101-331108]
[AFGGOV 331108-730717]
[AFGELI 730717-070723]
ABDUL_QADIR_ ; CountryInfo.txt
+NUR_MOHAMMAD_TARAKI_ ; CountryInfo.txt
+HAFIZULLAH_AMIN_ ; CountryInfo.txt
[AFGELI 620101-780427]
[AFGGOV 780427-780430]
[AFGELI]
HAMID_KARZAI_ [AFGMIL]; CountryInfo.txt
+BABRAK_KARMAL_ ; CountryInfo.txt
+SIBGHATULLAH_MOJADEDI_ ; CountryInfo.txt
[AFGGOV 791227-861124]
[AFGGOV 791227-810611]
"""
dateerrorstr = "String in date restriction could not be interpreted; line skipped"
logger = logging.getLogger('petr_log')
logger.info("Reading " + actorfile)
open_FIN(actorfile, "actor")
# location where codes for current actor will be stored
codeindex = len(PETRglobals.ActorCodes)
# list of codes -- default and date restricted -- for current actor
curlist = []
line = read_FIN_line()
while len(line) > 0: # loop through the file
if '---STOP---' in line:
break
if line[0] == '\t': # deal with date restriction
# print "DR:",line, # debug
try:
brack = line.index('[')
except __HOLE__:
logger.warning(dateerrorstr)
line = read_FIN_line()
continue
part = line[brack + 1:].strip().partition(' ')
code = part[0].strip()
rest = part[2].lstrip()
if '<' in rest or '>' in rest:
# find an all-digit string: this is more robust than the TABARI
# equivalent
ka = 1
while (ka < len(rest)) and (not rest[ka].isdigit()):
# if this fails the length test, it will be caught as
# DateError
ka += 1
kb = ka + 6
while (kb < len(rest)) and (rest[kb].isdigit()):
kb += 1
try:
ord = dstr_to_ordate(rest[ka:kb])
except DateError:
logger.warning(dateerrorstr)
line = read_FIN_line()
continue
if rest[0] == '<':
curlist.append([0, ord, code])
else:
curlist.append([1, ord, code])
elif '-' in rest:
part = rest.partition('-')
try:
pt0 = part[0].strip()
ord1 = dstr_to_ordate(pt0)
part2 = part[2].partition(']')
pt2 = part2[0].strip()
ord2 = dstr_to_ordate(pt2)
except DateError:
logger.warning(dateerrorstr)
line = read_FIN_line()
continue
if ord2 < ord1:
logger.warning(
"End date in interval date restriction is less than starting date; line skipped")
line = read_FIN_line()
continue
curlist.append([2, ord1, ord2, code])
else: # replace default code
# list containing a single code
curlist.append([code[:code.find(']')]])
else:
if line[0] == '+': # deal with synonym
# print "Syn:",line,
part = line.partition(';') # split on comment, if any
actor = part[0][1:].strip() + ' '
else: # primary phrase with code
if len(curlist) > 0:
if PETRglobals.WriteActorRoot:
curlist.append(rootactor)
# print(curlist)
PETRglobals.ActorCodes.append(
tuple(curlist)) # store code from previous entry
"""print(PETRglobals.ActorCodes[-1])
thelist = PETRglobals.ActorCodes[-1]
for item in thelist:
if not isinstance(item,list):
print('== Actor',item)"""
codeindex = len(PETRglobals.ActorCodes)
curlist = []
if '[' in line: # code specified?
part = line.partition('[')
# list containing a single code
curlist.append([part[2].partition(']')[0].strip()])
else:
# no code, so don't update curlist
part = line.partition(';')
actor = part[0].strip() + ' '
rootactor = actor
nounlist = make_noun_list(actor)
keyword = nounlist[0][0]
phlist = [codeindex, nounlist[0][1]] + nounlist[1:]
# we don't need to store the first word, just the connector
if keyword in PETRglobals.ActorDict:
PETRglobals.ActorDict[keyword].append(phlist)
else:
PETRglobals.ActorDict[keyword] = [phlist]
if isinstance(phlist[0], str):
# save location of the list if this is a primary phrase
curlist = PETRglobals.ActorDict[keyword]
line = read_FIN_line()
close_FIN()
# <14.11.20: does this need to save the final entry? >
# sort the patterns by the number of words
# for lockey in list(PETRglobals.ActorDict.keys()):
# PETRglobals.ActorDict[lockey].sort(key=len, reverse=True) | ValueError | dataset/ETHPy150Open openeventdata/petrarch/petrarch/PETRreader.py/read_actor_dictionary |
def read_pipeline_input(pipeline_list):
"""
Reads input from the processing pipeline and MongoDB and creates the global
holding dictionary. Please consult the documentation for more information
on the format of the global holding dictionary. The function iteratively
parses each file so is capable of processing large inputs without failing.
Parameters
----------
pipeline_list: List.
List of dictionaries as stored in the MongoDB instance.
These records are originally generated by the
`web scraper <https://github.com/openeventdata/scraper>`_.
Returns
-------
holding: Dictionary.
Global holding dictionary with StoryIDs as keys and various
sentence- and story-level attributes as the inner dictionaries.
Please refer to the documentation for greater information on
the format of this dictionary.
"""
holding = {}
for entry in pipeline_list:
entry_id = str(entry['_id'])
meta_content = {'date': utilities._format_datestr(entry['date']),
'date_added': entry['date_added'],
'source': entry['source'],
'story_title': entry['title'],
'url': entry['url']}
if 'parsed_sents' in entry:
parsetrees = entry['parsed_sents']
else:
parsetrees = ''
if 'corefs' in entry:
corefs = entry['corefs']
meta_content.update({'corefs': corefs})
split_sents = _sentence_segmenter(entry['content'])
# TODO Make the number of sents a setting
sent_dict = {}
for i, sent in enumerate(split_sents[:7]):
if parsetrees:
try:
tree = utilities._format_parsed_str(parsetrees[i])
except __HOLE__:
tree = ''
sent_dict[i] = {'content': sent, 'parsed': tree}
else:
sent_dict[i] = {'content': sent}
content_dict = {'sents': sent_dict, 'meta': meta_content}
holding[entry_id] = content_dict
return holding | IndexError | dataset/ETHPy150Open openeventdata/petrarch/petrarch/PETRreader.py/read_pipeline_input |
def main():
parser = argparse.ArgumentParser(prog='enjarify', description='Translates Dalvik bytecode (.dex or .apk) to Java bytecode (.jar)')
parser.add_argument('inputfile')
parser.add_argument('-o', '--output', help='Output .jar file. Default is [input-filename]-enjarify.jar.')
parser.add_argument('-f', '--force', action='store_true', help='Force overwrite. If output file already exists, this option is required to overwrite.')
parser.add_argument('--fast', action='store_true', help='Speed up translation at the expense of generated bytecode being less readable.')
args = parser.parse_args()
dexs = []
if args.inputfile.lower().endswith('.apk'):
with zipfile.ZipFile(args.inputfile, 'r') as z:
for name in z.namelist():
if name.startswith('classes') and name.endswith('.dex'):
dexs.append(z.read(name))
else:
dexs.append(read(args.inputfile))
# Exclusive mode requires 3.3+, so provide helpful error in this case
if not args.force:
try:
FileExistsError
except __HOLE__:
print('Overwrite protection requires Python 3.3+. Either pass -f or --force, or upgrade to a more recent version of Python. If you are using Pypy3 2.4, you need to switch to a nightly build or build from source. Or just pass -f.')
return
# Might as well open the output file early so we can detect existing file error
# before going to the trouble of translating everything
outname = args.output or args.inputfile.rpartition('/')[-1].rpartition('.')[0] + '-enjarify.jar'
try:
outfile = open(outname, mode=('wb' if args.force else 'xb'))
except FileExistsError:
print('Error, output file already exists and --force was not specified.')
print('To overwrite the output file, pass -f or --force.')
return
opts = options.NONE if args.fast else options.PRETTY
classes = collections.OrderedDict()
errors = collections.OrderedDict()
for data in dexs:
translate(data, opts=opts, classes=classes, errors=errors)
writeToJar(outfile, classes)
outfile.close()
print('Output written to', outname)
for name, error in sorted(errors.items()):
print(name, error)
print('{} classes translated successfully, {} classes had errors'.format(len(classes), len(errors))) | NameError | dataset/ETHPy150Open ajinabraham/Mobile-Security-Framework-MobSF/StaticAnalyzer/tools/enjarify/enjarify/main.py/main |
def check_version(branch, latest_hash=None):
if branch == 'master':
remote_dir = 'devel'
regex = ("(?<=This documentation is for version <b>\d{1}\.\d{1}\."
"\d{1}\.dev-)(\w{7})")
else:
remote_dir = 'stable'
regex = ("(?<=This documentation is for the <b>)(\d{1}\.\d{1}\.\d{1})"
"(?=</b> release.)")
base_url = 'http://statsmodels.sourceforge.net/{}'
page = urlopen(base_url.format(remote_dir)).read()
try:
version = re.search(regex, page).group()
except __HOLE__:
return True
if remote_dir == 'stable':
if last_release[1:] == version:
return False
else:
return True
# get the lastest hash
if latest_hash == version:
return False
else:
return True | AttributeError | dataset/ETHPy150Open statsmodels/statsmodels/tools/update_web.py/check_version |
def use(wcspkg, raise_err=True):
"""Choose WCS package."""
global coord_types, wcs_configured, WCS, \
have_kapteyn, kapwcs, \
have_astlib, astWCS, astCoords, \
have_starlink, Ast, Atl, \
have_astropy, pywcs, pyfits, astropy, coordinates, units
if wcspkg == 'kapteyn':
try:
from kapteyn import wcs as kapwcs
coord_types = ['icrs', 'fk5', 'fk4', 'galactic', 'ecliptic']
have_kapteyn = True
wcs_configured = True
WCS = KapteynWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'starlink':
try:
import starlink.Ast as Ast
import starlink.Atl as Atl
coord_types = ['icrs', 'fk5', 'fk4', 'galactic', 'ecliptic']
have_starlink = True
wcs_configured = True
WCS = StarlinkWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'astlib':
try:
from astLib import astWCS, astCoords
# astlib requires pyfits (or astropy) in order
# to create a WCS object from a FITS header.
try:
from astropy.io import fits as pyfits
except ImportError:
try:
import pyfits
except ImportError:
raise ImportError("Need pyfits module to use astLib WCS")
astWCS.NUMPY_MODE = True
coord_types = ['j2000', 'b1950', 'galactic']
have_astlib = True
wcs_configured = True
WCS = AstLibWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'astropy2':
try:
import astropy
from distutils.version import LooseVersion
if LooseVersion(astropy.__version__) <= LooseVersion('1'):
raise ImportError("astropy2 wrapper requires version 1 of astropy")
import astropy.coordinates
import astropy.wcs as pywcs
from astropy.io import fits as pyfits
import astropy.units as u
from astropy.version import version
except ImportError:
if raise_err:
raise
return False
have_pywcs = True
have_astropy = True
wcs_configured = True
WCS = AstropyWCS2
try:
import sunpy.coordinates
except ImportError:
pass
coord_types = [f.name for f in astropy.coordinates.frame_transform_graph.frame_set]
return True
elif wcspkg == 'astropy':
try:
import astropy.wcs as pywcs
from astropy.io import fits as pyfits
have_pywcs = True
except ImportError:
try:
import pywcs
have_pywcs = True
except ImportError as e:
if raise_err:
raise
return False
try:
from astropy import coordinates
from astropy import units
have_astropy = True
wcs_configured = True
WCS = AstropyWCS
if hasattr(coordinates, 'SkyCoord'):
try:
import sunpy.coordinates
except ImportError:
pass
coord_types = [f.name for f in coordinates.frame_transform_graph.frame_set]
else:
coord_types = ['icrs', 'fk5', 'fk4', 'galactic']
return True
except __HOLE__ as e:
if raise_err:
raise
return False
elif wcspkg == 'barebones':
coord_types = ['fk5']
WCS = BareBonesWCS
wcs_configured = True
return True
return False | ImportError | dataset/ETHPy150Open ejeschke/ginga/ginga/util/wcsmod.py/use |
def load_header(self, header, fobj=None):
from astropy.wcs.utils import wcs_to_celestial_frame
try:
# reconstruct a pyfits header, because otherwise we take an
# incredible performance hit in astropy.wcs
self.header = pyfits.Header(header.items())
self.logger.debug("Trying to make astropy wcs object")
self.wcs = pywcs.WCS(self.header, fobj=fobj, relax=True)
try:
self.coordframe = wcs_to_celestial_frame(self.wcs)
except __HOLE__:
sysname = get_coord_system_name(self.header)
if sysname in ('raw', 'pixel'):
self.coordframe = sysname
else:
raise
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None | ValueError | dataset/ETHPy150Open ejeschke/ginga/ginga/util/wcsmod.py/AstropyWCS2.load_header |
def pixtocoords(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system is None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
if not self.new_coords:
# convert to astropy coord
try:
fromclass = self.coord_table[self.coordsys]
except KeyError:
raise WCSError("No such coordinate system available: '%s'" % (
self.coordsys))
coord = fromclass(ra_deg, dec_deg,
unit=(units.degree, units.degree))
if (system is None) or (system == self.coordsys):
return coord
# Now give it back to the user in the system requested
try:
toclass = self.coord_table[system]
except __HOLE__:
raise WCSError("No such coordinate system available: '%s'" % (
system))
coord = coord.transform_to(toclass)
else:
frameClass = coordinates.frame_transform_graph.lookup_name(self.coordsys)
coord = frameClass(ra_deg * units.degree, dec_deg * units.degree)
toClass = coordinates.frame_transform_graph.lookup_name(system)
# Skip in input and output is the same (no realize_frame
# call in astropy)
if toClass != frameClass:
coord = coord.transform_to(toClass)
return coord | KeyError | dataset/ETHPy150Open ejeschke/ginga/ginga/util/wcsmod.py/AstropyWCS.pixtocoords |
def get_pixel_coordinates(self):
try:
cd11 = float(self.get_keyword('CD1_1'))
cd12 = float(self.get_keyword('CD1_2'))
cd21 = float(self.get_keyword('CD2_1'))
cd22 = float(self.get_keyword('CD2_2'))
except Exception as e:
cdelt1 = float(self.get_keyword('CDELT1'))
cdelt2 = float(self.get_keyword('CDELT2'))
try:
cd11 = float(self.get_keyword('PC1_1')) * cdelt1
cd12 = float(self.get_keyword('PC1_2')) * cdelt1
cd21 = float(self.get_keyword('PC2_1')) * cdelt2
cd22 = float(self.get_keyword('PC2_2')) * cdelt2
except __HOLE__:
cd11 = float(self.get_keyword('PC001001')) * cdelt1
cd12 = float(self.get_keyword('PC001002')) * cdelt1
cd21 = float(self.get_keyword('PC002001')) * cdelt2
cd22 = float(self.get_keyword('PC002002')) * cdelt2
return (cd11, cd12, cd21, cd22) | KeyError | dataset/ETHPy150Open ejeschke/ginga/ginga/util/wcsmod.py/BareBonesWCS.get_pixel_coordinates |
def get_coord_system_name(header):
"""Return an appropriate key code for the axes coordinate system by
examining the FITS header.
"""
try:
ctype = header['CTYPE1'].strip().upper()
except KeyError:
try:
# see if we have an "RA" header
ra = header['RA']
try:
equinox = float(header['EQUINOX'])
if equinox < 1984.0:
radecsys = 'FK4'
else:
radecsys = 'FK5'
except KeyError:
radecsys = 'ICRS'
return radecsys.lower()
except KeyError:
return 'raw'
match = re.match(r'^GLON\-.*$', ctype)
if match:
return 'galactic'
match = re.match(r'^ELON\-.*$', ctype)
if match:
return 'ecliptic'
match = re.match(r'^RA\-\-\-.*$', ctype)
if match:
hdkey = 'RADECSYS'
try:
radecsys = header[hdkey]
except __HOLE__:
try:
hdkey = 'RADESYS'
radecsys = header[hdkey]
except KeyError:
# missing keyword
# RADESYS defaults to IRCS unless EQUINOX is given
# alone, in which case it defaults to FK4 prior to 1984
# and FK5 after 1984.
try:
equinox = float(header['EQUINOX'])
if equinox < 1984.0:
radecsys = 'FK4'
else:
radecsys = 'FK5'
except KeyError:
radecsys = 'ICRS'
radecsys = radecsys.strip()
return radecsys.lower()
match = re.match(r'^HPLN\-.*$', ctype)
if match:
return 'helioprojective'
match = re.match(r'^HGLT\-.*$', ctype)
if match:
return 'heliographicstonyhurst'
match = re.match(r'^PIXEL$', ctype)
if match:
return 'pixel'
match = re.match(r'^LINEAR$', ctype)
if match:
return 'pixel'
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
return 'icrs' | KeyError | dataset/ETHPy150Open ejeschke/ginga/ginga/util/wcsmod.py/get_coord_system_name |
def __init__(self, values=None, maximize=True):
if values is None:
values = []
self.values = values
try:
iter(maximize)
except __HOLE__:
maximize = [maximize for v in values]
self.maximize = maximize | TypeError | dataset/ETHPy150Open aarongarrett/inspyred/inspyred/ec/emo.py/Pareto.__init__ |
def evolve(self, generator, evaluator, pop_size=1, seeds=None, maximize=True, bounder=None, **args):
final_pop = ec.EvolutionaryComputation.evolve(self, generator, evaluator, pop_size, seeds, maximize, bounder, **args)
try:
del self.archiver.grid_population
except AttributeError:
pass
try:
del self.archiver.global_smallest
except __HOLE__:
pass
try:
del self.archiver.global_largest
except AttributeError:
pass
return final_pop | AttributeError | dataset/ETHPy150Open aarongarrett/inspyred/inspyred/ec/emo.py/PAES.evolve |
def pop_frame():
"""
Pop a specific frame from the dictionary.
"""
try:
_thread_locals.d_stack.pop(get_tpid_key())
except __HOLE__:
print "[WARNING] Exception at 'expedient.common.middleware.threadlocals.ThreadLocals': tried to access to permittees stack when it was already empty. This may happen because an incorrect URL (request.url) was requested and Django's CommonMiddleware forwarded it to a new one without processing the request in this middleware."
pass | KeyError | dataset/ETHPy150Open fp7-ofelia/ocf/expedient/src/python/expedient/common/middleware/threadlocals.py/pop_frame |
def date(date_str):
try:
return dt.strptime(date_str, "%Y-%m-%d")
except __HOLE__:
msg = "Not a valid date: '{date_str}'. "\
"Expected format: YYYY-MM-DD.".format(date_str=date_str)
raise argparse.ArgumentTypeError(msg) | ValueError | dataset/ETHPy150Open PressLabs/silver/silver/management/commands/generate_docs.py/date |
def test_get_system_time(self):
'''
Test to get system time
'''
tm = datetime.strftime(datetime.now(), "%I:%M %p")
win_tm = win_system.get_system_time()
try:
self.assertEqual(win_tm, tm)
except __HOLE__:
# handle race condition
import re
self.assertTrue(re.search(r'^\d{2}:\d{2} \w{2}$', win_tm)) | AssertionError | dataset/ETHPy150Open saltstack/salt/tests/unit/modules/win_system_test.py/WinSystemTestCase.test_get_system_time |
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except __HOLE__:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
return pydoc.getdoc(method) | AttributeError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/xmlrpc/server.py/SimpleXMLRPCDispatcher.system_methodHelp |
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except __HOLE__:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method) | KeyError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/xmlrpc/server.py/SimpleXMLRPCDispatcher._dispatch |
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except __HOLE__:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response) | NotImplementedError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/xmlrpc/server.py/SimpleXMLRPCRequestHandler.do_POST |
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except __HOLE__:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers() | ValueError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/xmlrpc/server.py/SimpleXMLRPCRequestHandler.decode_request_content |
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (__HOLE__, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server. | ValueError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/xmlrpc/server.py/CGIXMLRPCRequestHandler.handle_request |
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except __HOLE__:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation) | AttributeError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/xmlrpc/server.py/XMLRPCDocGenerator.generate_html_documentation |
def get_filters(self):
filters = {'is_public': None}
filter_field = self.table.get_filter_field()
filter_string = self.table.get_filter_string()
filter_action = self.table._meta._filter_action
if filter_field and filter_string and (
filter_action.is_api_filter(filter_field)):
if filter_field in ['size_min', 'size_max']:
invalid_msg = ('API query is not valid and is ignored: %s=%s'
% (filter_field, filter_string))
try:
filter_string = long(float(filter_string) * (1024 ** 2))
if filter_string >= 0:
filters[filter_field] = filter_string
else:
LOG.warning(invalid_msg)
except __HOLE__:
LOG.warning(invalid_msg)
else:
filters[filter_field] = filter_string
return filters | ValueError | dataset/ETHPy150Open CiscoSystems/avos/openstack_dashboard/dashboards/admin/images/views.py/IndexView.get_filters |
def test_create_raise_exception_with_bad_keys(self):
try:
Address.create({"customer_id": "12345", "bad_key": "value"})
self.assertTrue(False)
except __HOLE__ as e:
self.assertEquals("'Invalid keys: bad_key'", str(e)) | KeyError | dataset/ETHPy150Open braintree/braintree_python/tests/unit/test_address.py/TestAddress.test_create_raise_exception_with_bad_keys |
def test_create_raises_error_if_no_customer_id_given(self):
try:
Address.create({"country_name": "United States of America"})
self.assertTrue(False)
except __HOLE__ as e:
self.assertEquals("'customer_id must be provided'", str(e)) | KeyError | dataset/ETHPy150Open braintree/braintree_python/tests/unit/test_address.py/TestAddress.test_create_raises_error_if_no_customer_id_given |
def test_create_raises_key_error_if_given_invalid_customer_id(self):
try:
Address.create({"customer_id": "!@#$%"})
self.assertTrue(False)
except __HOLE__ as e:
self.assertEquals("'customer_id contains invalid characters'", str(e)) | KeyError | dataset/ETHPy150Open braintree/braintree_python/tests/unit/test_address.py/TestAddress.test_create_raises_key_error_if_given_invalid_customer_id |
def test_update_raise_exception_with_bad_keys(self):
try:
Address.update("customer_id", "address_id", {"bad_key": "value"})
self.assertTrue(False)
except __HOLE__ as e:
self.assertEquals("'Invalid keys: bad_key'", str(e)) | KeyError | dataset/ETHPy150Open braintree/braintree_python/tests/unit/test_address.py/TestAddress.test_update_raise_exception_with_bad_keys |
def crawl_path(self, path_descriptor):
info = self.path_info[path_descriptor.key()] = {}
for path, dirs, files in os.walk(path_descriptor.path):
for file in [os.path.abspath(os.path.join(path, filename)) for filename in files]:
try:
info[file] = os.path.getmtime(file)
except __HOLE__:
#file was removed as we were trying to access it, don't include it
pass | OSError | dataset/ETHPy150Open tmc/mutter/mutter/watchers.py/ModTimeWatcher.crawl_path |
@validator
def ipv6(value):
"""
Return whether or not given value is a valid IP version 6 address.
This validator is based on `WTForms IPAddress validator`_.
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv6('abcd:ef::42:1')
True
>>> ipv6('abc.0.0.1')
ValidationFailure(func=ipv6, args={'value': 'abc.0.0.1'})
.. versionadded:: 0.2
:param value: IP address string to validate
"""
parts = value.split(':')
if len(parts) > 8:
return False
num_blank = 0
for part in parts:
if not part:
num_blank += 1
else:
try:
value = int(part, 16)
except __HOLE__:
return False
else:
if value < 0 or value >= 65536:
return False
if num_blank < 2:
return True
elif num_blank == 2 and not parts[0] and not parts[1]:
return True
return False | ValueError | dataset/ETHPy150Open SickRage/SickRage/lib/validators/ip_address.py/ipv6 |
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet.cfreactor import CFReactor
from twisted.internet import reactor as globalReactor
except __HOLE__:
pass
else:
if (isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself")
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = filter(
lambda required: not required.providedBy(reactor),
self.requiredInterfaces)
if missing:
self.unbuildReactor(reactor)
raise SkipTest("%r does not provide %s" % (
reactor, ",".join([repr(x) for x in missing])))
self.addCleanup(self.unbuildReactor, reactor)
return reactor | ImportError | dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/internet/test/reactormixins.py/ReactorBuilder.buildReactor |
def LinkFunc(target, source, env):
# Relative paths cause problems with symbolic links, so
# we use absolute paths, which may be a problem for people
# who want to move their soft-linked src-trees around. Those
# people should use the 'hard-copy' mode, softlinks cannot be
# used for that; at least I have no idea how ...
src = source[0].abspath
dest = target[0].abspath
dir, file = os.path.split(dest)
if dir and not target[0].fs.isdir(dir):
os.makedirs(dir)
if not Link_Funcs:
# Set a default order of link functions.
set_duplicate('hard-soft-copy')
fs = source[0].fs
# Now link the files with the previously specified order.
for func in Link_Funcs:
try:
func(fs, src, dest)
break
except (IOError, __HOLE__):
# An OSError indicates something happened like a permissions
# problem or an attempt to symlink across file-system
# boundaries. An IOError indicates something like the file
# not existing. In either case, keeping trying additional
# functions in the list and only raise an error if the last
# one failed.
if func == Link_Funcs[-1]:
# exception of the last link method (copy) are fatal
raise
return 0 | OSError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/FS.py/LinkFunc |
def do_diskcheck_match(node, predicate, errorfmt):
result = predicate()
try:
# If calling the predicate() cached a None value from stat(),
# remove it so it doesn't interfere with later attempts to
# build this Node as we walk the DAG. (This isn't a great way
# to do this, we're reaching into an interface that doesn't
# really belong to us, but it's all about performance, so
# for now we'll just document the dependency...)
if node._memo['stat'] is None:
del node._memo['stat']
except (__HOLE__, KeyError):
pass
if result:
raise TypeError(errorfmt % node.abspath) | AttributeError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/FS.py/do_diskcheck_match |
def do_diskcheck_rcs(node, name):
try:
rcs_dir = node.rcs_dir
except __HOLE__:
if node.entry_exists_on_disk('RCS'):
rcs_dir = node.Dir('RCS')
else:
rcs_dir = None
node.rcs_dir = rcs_dir
if rcs_dir:
return rcs_dir.entry_exists_on_disk(name+',v')
return None | AttributeError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/FS.py/do_diskcheck_rcs |
def do_diskcheck_sccs(node, name):
try:
sccs_dir = node.sccs_dir
except __HOLE__:
if node.entry_exists_on_disk('SCCS'):
sccs_dir = node.Dir('SCCS')
else:
sccs_dir = None
node.sccs_dir = sccs_dir
if sccs_dir:
return sccs_dir.entry_exists_on_disk('s.'+name)
return None | AttributeError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/FS.py/do_diskcheck_sccs |
def __getattr__(self, name):
# This is how we implement the "special" attributes
# such as base, posix, srcdir, etc.
try:
attr_function = self.dictSpecialAttrs[name]
except KeyError:
try:
attr = SCons.Util.Proxy.__getattr__(self, name)
except __HOLE__, e:
# Raise our own AttributeError subclass with an
# overridden __str__() method that identifies the
# name of the entry that caused the exception.
raise EntryProxyAttributeError(self, name)
return attr
else:
return attr_function(self) | AttributeError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/FS.py/EntryProxy.__getattr__ |
def _save_str(self):
try:
return self._memo['_save_str']
except __HOLE__:
pass
result = sys.intern(self._get_str())
self._memo['_save_str'] = result
return result | KeyError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Node/FS.py/Base._save_str |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.