function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def _bcompile(file, cfile=None, dfile=None, doraise=False):
encoding = py_compile.read_encoding(file, "utf-8")
f = open(file, 'U', encoding=encoding)
try:
timestamp = int(os.fstat(f.fileno()).st_mtime)
except __HOLE__:
timestamp = int(os.stat(file).st_mtime)
codestring = f.read()
f.close()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
try:
codeobject = builtins.compile(codestring, dfile or file,'exec')
except Exception as err:
py_exc = py_compile.PyCompileError(err.__class__, err, dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
fc = io.BytesIO()
try:
fc.write(b'\0\0\0\0')
py_compile.wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(py_compile.MAGIC)
return fc.getvalue()
finally:
fc.close()
|
AttributeError
|
dataset/ETHPy150Open cournape/Bento/bento/private/_bytecode_3.py/_bcompile
|
def _bcompile(file, cfile=None, dfile=None, doraise=False, optimize=-1):
with tokenize.open(file) as f:
try:
timestamp = int(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = int(os.stat(file).st_mtime)
codestring = f.read()
try:
codeobject = builtins.compile(codestring, dfile or file, 'exec',
optimize=optimize)
except Exception as err:
py_exc = py_compile.PyCompileError(err.__class__, err, dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
if cfile is None:
if optimize >= 0:
cfile = imp.cache_from_source(file, debug_override=not optimize)
else:
cfile = imp.cache_from_source(file)
try:
os.makedirs(os.path.dirname(cfile))
except __HOLE__ as error:
if error.errno != errno.EEXIST:
raise
fc = io.BytesIO()
try:
fc.write(b'\0\0\0\0')
py_compile.wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(py_compile.MAGIC)
return fc.getvalue()
finally:
fc.close()
|
OSError
|
dataset/ETHPy150Open cournape/Bento/bento/private/_bytecode_3.py/_bcompile
|
def node_dictionary():
"""Return a dictionary containing node name as key and node class as
value. This will be depreciated soon in favour of
:func:`node_catalogue()`"""
classes = node_subclasses(Node)
dictionary = {}
for c in classes:
try:
name = c.identifier()
dictionary[name] = c
except __HOLE__:
# If node does not provide identifier, we consider it to be
# private or abstract class
pass
return dictionary
|
AttributeError
|
dataset/ETHPy150Open Stiivi/brewery/brewery/nodes/base.py/node_dictionary
|
def node_catalogue():
"""Returns a dictionary of information about all available nodes. Keys are
node identifiers, values are dictionaries. The information dictionary contains
all the keys from the node's `node_info` dictionary plus keys: `factory`
with node class, `type` (if not provided) is set to one of ``source``,
``processing`` or ``target``.
"""
classes = node_subclasses(Node)
catalogue = {}
for node_class in classes:
try:
name = node_class.identifier()
except __HOLE__:
# If node does not provide identifier, we consider it to be
# private or abstract class
continue
# Get copy of node info
info = dict(get_node_info(node_class))
info["name"] = name
info["factory"] = node_class
# Get node type based on superclass, if not provided
if "type" not in info:
if issubclass(node_class, SourceNode):
info["type"] = "source"
elif not issubclass(node_class, SourceNode) \
and not issubclass(node_class, TargetNode):
info["type"] = "processing"
elif issubclass(node_class, TargetNode):
info["type"] = "target"
else:
info["type"] = "unknown"
catalogue[name] = info
return catalogue
|
AttributeError
|
dataset/ETHPy150Open Stiivi/brewery/brewery/nodes/base.py/node_catalogue
|
def node_subclasses(root, abstract = False):
"""Get all subclasses of node.
:Parameters:
* `abstract`: If set to ``True`` all abstract classes are included as well. Default is
``False``
"""
classes = []
for c in utils.subclass_iterator(root):
try:
info = get_node_info(c)
node_type = info.get("type")
if node_type != "abstract":
classes.append(c)
except __HOLE__:
pass
return classes
|
AttributeError
|
dataset/ETHPy150Open Stiivi/brewery/brewery/nodes/base.py/node_subclasses
|
def syncfile(src, dst):
"""Same as cp() but only do copying when source file is newer than target file"""
if not os.path.isfile(src):
raise Exception("No such file: %s" % src)
try:
dst_mtime = os.path.getmtime(dst)
src_mtime = os.path.getmtime(src)
# Only accecpt equal modification time as equal as copyFile()
# syncs over the mtime from the source.
if src_mtime == dst_mtime:
return False
except __HOLE__:
# destination file does not exist, so mtime check fails
pass
return cp(src, dst)
|
OSError
|
dataset/ETHPy150Open zynga/jasy/jasy/core/File.py/syncfile
|
def get_pvalue(self, pvalue):
"""Gets the PValue's computed value from the runner's cache."""
try:
return self._cache.get_pvalue(pvalue)
except __HOLE__:
raise error.PValueError('PValue is not computed.')
|
KeyError
|
dataset/ETHPy150Open GoogleCloudPlatform/DataflowPythonSDK/google/cloud/dataflow/runners/direct_runner.py/DirectPipelineRunner.get_pvalue
|
def _cmp(self, other, op):
try:
diff = self - other
except __HOLE__:
return NotImplemented
else:
return op(diff.p, 0)
|
TypeError
|
dataset/ETHPy150Open sympy/sympy/sympy/polys/domains/pythonrational.py/PythonRational._cmp
|
def __pow__(self, value):
try:
float(value)
except __HOLE__:
raise ValueError("Non-numeric value supplied for boost")
q = LuceneQuery()
q.subqueries = [self]
q._and = False
q._pow = value
return q
|
ValueError
|
dataset/ETHPy150Open lugensa/scorched/scorched/search.py/LuceneQuery.__pow__
|
def add(self, args, kwargs):
self.normalized = False
_args = []
for arg in args:
if isinstance(arg, LuceneQuery):
self.subqueries.append(arg)
else:
_args.append(arg)
args = _args
try:
terms_or_phrases = kwargs.pop("__terms_or_phrases")
except __HOLE__:
terms_or_phrases = None
for value in args:
self.add_exact(None, value, terms_or_phrases)
for k, v in list(kwargs.items()):
try:
field_name, rel = k.split("__")
except ValueError:
field_name, rel = k, 'eq'
if not field_name:
if (k, v) != ("*", "*"):
# the only case where wildcards in field names are allowed
raise ValueError("%s is not a valid field name" % k)
if rel == 'eq':
self.add_exact(field_name, v, terms_or_phrases)
else:
self.add_range(field_name, rel, v)
|
KeyError
|
dataset/ETHPy150Open lugensa/scorched/scorched/search.py/LuceneQuery.add
|
def add_range(self, field_name, rel, value):
if rel not in self.range_query_templates:
raise scorched.exc.SolrError("No such relation '%s' defined" % rel)
insts = (value,)
if rel in ('range', 'rangeexc'):
try:
assert len(value) == 2
except (__HOLE__, TypeError):
raise scorched.exc.SolrError(
"'%s__%s' argument must be a length-2 iterable" % (
field_name, rel))
elif rel == 'any':
if value is not True:
raise scorched.exc.SolrError("'%s__%s' argument must be True")
insts = ()
self.ranges.add((field_name, rel, insts))
|
AssertionError
|
dataset/ETHPy150Open lugensa/scorched/scorched/search.py/LuceneQuery.add_range
|
def boost_relevancy(self, boost_score, **kwargs):
if not self.query_obj:
raise TypeError("Can't boost the relevancy of an empty query")
try:
float(boost_score)
except __HOLE__:
raise ValueError("Non-numeric boost value supplied")
newself = self.clone()
newself.query_obj.add_boost(kwargs, boost_score)
return newself
|
ValueError
|
dataset/ETHPy150Open lugensa/scorched/scorched/search.py/BaseSearch.boost_relevancy
|
def update(self, **kwargs):
checked_kwargs = self.check_opts(kwargs)
for f in ('qf', 'pf'):
field = kwargs.get(f, {})
for k, v in list(field.items()):
if v is not None:
try:
v = float(v)
except __HOLE__:
raise scorched.exc.SolrError(
"'%s' has non-numerical boost value" % k)
self.kwargs.update(checked_kwargs)
|
ValueError
|
dataset/ETHPy150Open lugensa/scorched/scorched/search.py/DismaxOptions.update
|
def update(self, fields, query_fields=None, **kwargs):
if fields is None:
return
if not is_iter(fields):
fields = [fields]
self.fields.update(fields)
if query_fields is not None:
for k, v in list(query_fields.items()):
if k not in self.fields:
raise scorched.exc.SolrError(
"'%s' specified in query_fields but not fields" % k)
if v is not None:
try:
v = float(v)
except __HOLE__:
raise scorched.exc.SolrError(
"'%s' has non-numerical boost value" % k)
self.query_fields.update(query_fields)
checked_kwargs = self.check_opts(kwargs)
self.kwargs.update(checked_kwargs)
|
ValueError
|
dataset/ETHPy150Open lugensa/scorched/scorched/search.py/MoreLikeThisOptions.update
|
def main():
parser = argparse.ArgumentParser(
description='An easier way to use cProfile.',
usage='%(prog)s [--version] [-a ADDRESS] [-p PORT] scriptfile [arg] ...',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version=VERSION)
parser.add_argument('-a', '--address', type=str, default='127.0.0.1',
help='The address to listen on. (defaults to 127.0.0.1).')
parser.add_argument('-p', '--port', type=int, default=4000,
help='The port to listen on. (defaults to 4000).')
# Preserve v0 functionality using a flag.
parser.add_argument('-f', '--file', type=str,
help='cProfile output to view.\nIf specified, the scriptfile provided will be ignored.')
parser.add_argument('remainder', nargs=argparse.REMAINDER,
help='The python script file to run and profile.',
metavar="scriptfile")
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
info = '[cProfileV]: cProfile output available at http://%s:%s' % \
(args.address, args.port)
# v0 mode: Render profile output.
if args.file:
# Note: The info message is sent to stderr to keep stdout clean in case
# the profiled script writes some output to stdout
sys.stderr.write(info + "\n")
cprofilev = CProfileV(args.file, title=args.file, address=args.address, port=args.port)
cprofilev.start()
return
# v1 mode: Start script and render profile output.
sys.argv[:] = args.remainder
if len(args.remainder) < 0:
parser.print_help()
sys.exit(2)
profile = cProfile.Profile()
progname = args.remainder[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
try:
code = compile(fp.read(), progname, 'exec')
except (__HOLE__, SyntaxError) as e:
sys.stderr.write(
'[cProfileV]: there was a problem compiling your scriptfile.' + '\n\n'
)
parser.print_help()
sys.exit(2)
# Note: The info message is sent to stderr to keep stdout clean in case
# the profiled script writes some output to stdout
sys.stderr.write(info + "\n")
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
# Start the given program in a separate thread.
progthread = threading.Thread(target=profile.runctx, args=(code, globs, None))
progthread.setDaemon(True)
progthread.start()
cprofilev = CProfileV(profile, title=progname, address=args.address, port=args.port)
cprofilev.start()
|
TypeError
|
dataset/ETHPy150Open ymichael/cprofilev/cprofilev.py/main
|
def __init__(self, watchers, endpoint, pubsub_endpoint, check_delay=1.0,
prereload_fn=None, context=None, loop=None, statsd=False,
stats_endpoint=None, statsd_close_outputs=False,
multicast_endpoint=None, plugins=None,
sockets=None, warmup_delay=0, httpd=False,
httpd_host='localhost', httpd_port=8080,
httpd_close_outputs=False, debug=False, debug_gc=False,
ssh_server=None, proc_name='circusd', pidfile=None,
loglevel=None, logoutput=None, loggerconfig=None,
fqdn_prefix=None, umask=None, endpoint_owner=None,
papa_endpoint=None):
self.watchers = watchers
self.endpoint = endpoint
self.check_delay = check_delay
self.prereload_fn = prereload_fn
self.pubsub_endpoint = pubsub_endpoint
self.multicast_endpoint = multicast_endpoint
self.proc_name = proc_name
self.ssh_server = ssh_server
self.evpub_socket = None
self.pidfile = pidfile
self.loglevel = loglevel
self.logoutput = logoutput
self.loggerconfig = loggerconfig
self.umask = umask
self.endpoint_owner = endpoint_owner
self._running = False
try:
# getfqdn appears to fail in Python3.3 in the unittest
# framework so fall back to gethostname
socket_fqdn = socket.getfqdn()
except __HOLE__:
socket_fqdn = socket.gethostname()
if fqdn_prefix is None:
fqdn = socket_fqdn
else:
fqdn = '{}@{}'.format(fqdn_prefix, socket_fqdn)
self.fqdn = fqdn
if papa_endpoint and papa:
if papa_endpoint.startswith('ipc:/'):
papa_endpoint = papa_endpoint[4:]
while papa_endpoint[:2] == '//':
papa_endpoint = papa_endpoint[1:]
papa.set_default_path(papa_endpoint)
elif papa_endpoint.startswith('tcp://'):
papa_endpoint = papa_endpoint[6:].partition(':')[2]
papa.set_default_port = papa_endpoint
self.ctrl = self.loop = None
self._provided_loop = False
self.socket_event = False
if loop is not None:
self._provided_loop = True
self.loop = loop
# initialize zmq context
self._init_context(context)
self.pid = os.getpid()
self._watchers_names = {}
self._stopping = False
self._restarting = False
self.debug = debug
self._exclusive_running_command = None
if self.debug:
self.stdout_stream = self.stderr_stream = {'class': 'StdoutStream'}
else:
self.stdout_stream = self.stderr_stream = None
self.debug_gc = debug_gc
if debug_gc:
gc.set_debug(gc.DEBUG_LEAK)
# initializing circusd-stats as a watcher when configured
self.statsd = statsd
self.stats_endpoint = stats_endpoint
if self.statsd:
cmd = "%s -c 'from circus import stats; stats.main()'" % \
sys.executable
cmd += ' --endpoint %s' % self.endpoint
cmd += ' --pubsub %s' % self.pubsub_endpoint
cmd += ' --statspoint %s' % self.stats_endpoint
if ssh_server is not None:
cmd += ' --ssh %s' % ssh_server
if debug:
cmd += ' --log-level DEBUG'
elif self.loglevel:
cmd += ' --log-level ' + self.loglevel
if self.logoutput:
cmd += ' --log-output ' + self.logoutput
stats_watcher = Watcher('circusd-stats', cmd, use_sockets=True,
singleton=True,
stdout_stream=self.stdout_stream,
stderr_stream=self.stderr_stream,
copy_env=True, copy_path=True,
close_child_stderr=statsd_close_outputs,
close_child_stdout=statsd_close_outputs)
self.watchers.append(stats_watcher)
# adding the httpd
if httpd:
# adding the socket
httpd_socket = CircusSocket(name='circushttpd', host=httpd_host,
port=httpd_port)
if sockets is None:
sockets = [httpd_socket]
else:
sockets.append(httpd_socket)
cmd = ("%s -c 'from circusweb import circushttpd; "
"circushttpd.main()'") % sys.executable
cmd += ' --endpoint %s' % self.endpoint
cmd += ' --fd $(circus.sockets.circushttpd)'
if ssh_server is not None:
cmd += ' --ssh %s' % ssh_server
# Adding the watcher
httpd_watcher = Watcher('circushttpd', cmd, use_sockets=True,
singleton=True,
stdout_stream=self.stdout_stream,
stderr_stream=self.stderr_stream,
copy_env=True, copy_path=True,
close_child_stderr=httpd_close_outputs,
close_child_stdout=httpd_close_outputs)
self.watchers.append(httpd_watcher)
# adding each plugin as a watcher
ch_stderr = self.stderr_stream is None
ch_stdout = self.stdout_stream is None
if plugins is not None:
for plugin in plugins:
fqn = plugin['use']
cmd = get_plugin_cmd(plugin, self.endpoint,
self.pubsub_endpoint, self.check_delay,
ssh_server, debug=self.debug,
loglevel=self.loglevel,
logoutput=self.logoutput)
plugin_cfg = dict(cmd=cmd, priority=1, singleton=True,
stdout_stream=self.stdout_stream,
stderr_stream=self.stderr_stream,
copy_env=True, copy_path=True,
close_child_stderr=ch_stderr,
close_child_stdout=ch_stdout)
plugin_cfg.update(plugin)
if 'name' not in plugin_cfg:
plugin_cfg['name'] = fqn
plugin_watcher = Watcher.load_from_config(plugin_cfg)
self.watchers.append(plugin_watcher)
self.sockets = CircusSockets(sockets)
self.warmup_delay = warmup_delay
|
KeyError
|
dataset/ETHPy150Open circus-tent/circus/circus/arbiter.py/Arbiter.__init__
|
@classmethod
def load_from_config(cls, config_file, loop=None):
cfg = get_config(config_file)
watchers = []
for watcher in cfg.get('watchers', []):
watchers.append(Watcher.load_from_config(watcher))
sockets = []
for socket_ in cfg.get('sockets', []):
sockets.append(CircusSocket.load_from_config(socket_))
httpd = cfg.get('httpd', False)
if httpd:
# controlling that we have what it takes to run the web UI
# if something is missing this will tell the user
try:
import circusweb # NOQA
except __HOLE__:
logger.error('You need to install circus-web')
sys.exit(1)
# creating arbiter
arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'],
check_delay=cfg.get('check_delay', 1.),
prereload_fn=cfg.get('prereload_fn'),
statsd=cfg.get('statsd', False),
stats_endpoint=cfg.get('stats_endpoint'),
multicast_endpoint=cfg.get('multicast_endpoint'),
plugins=cfg.get('plugins'), sockets=sockets,
warmup_delay=cfg.get('warmup_delay', 0),
httpd=httpd,
loop=loop,
httpd_host=cfg.get('httpd_host', 'localhost'),
httpd_port=cfg.get('httpd_port', 8080),
debug=cfg.get('debug', False),
debug_gc=cfg.get('debug_gc', False),
ssh_server=cfg.get('ssh_server', None),
pidfile=cfg.get('pidfile', None),
loglevel=cfg.get('loglevel', None),
logoutput=cfg.get('logoutput', None),
loggerconfig=cfg.get('loggerconfig', None),
fqdn_prefix=cfg.get('fqdn_prefix', None),
umask=cfg['umask'],
endpoint_owner=cfg.get('endpoint_owner', None))
# store the cfg which will be used, so it can be used later
# for checking if the cfg has been changed
arbiter._cfg = cls.get_arbiter_config(cfg)
arbiter.config_file = config_file
return arbiter
|
ImportError
|
dataset/ETHPy150Open circus-tent/circus/circus/arbiter.py/Arbiter.load_from_config
|
def reap_processes(self):
# map watcher to pids
watchers_pids = {}
for watcher in self.iter_watchers():
if not watcher.is_stopped():
for process in watcher.processes.values():
watchers_pids[process.pid] = watcher
# detect dead children
if not IS_WINDOWS:
while True:
try:
# wait for our child (so it's not a zombie)
pid, status = os.waitpid(-1, os.WNOHANG)
if not pid:
break
if pid in watchers_pids:
watcher = watchers_pids[pid]
watcher.reap_process(pid, status)
except __HOLE__ as e:
if e.errno == errno.EAGAIN:
sleep(0)
continue
elif e.errno == errno.ECHILD:
# process already reaped
return
else:
raise
|
OSError
|
dataset/ETHPy150Open circus-tent/circus/circus/arbiter.py/Arbiter.reap_processes
|
def StructuredPropertyToProto(prop, index):
"""Converts a structured property to the corresponding message field.
Args:
prop: The NDB property to be converted.
index: The index of the property within the message.
Returns:
A message field with attributes corresponding to those in prop, index
corresponding to that which was passed in and with underlying message
class equal to the message class produced by the model class, which
should be a subclass of EndpointsModel.
Raises:
TypeError if the model class of the property does not have a callable
ProtoModel method. This is because we expected a subclass of
EndpointsModel set on the structured property.
"""
modelclass = prop._modelclass
try:
property_proto_method = modelclass.ProtoModel
property_proto = property_proto_method()
except (AttributeError, __HOLE__):
error_msg = ('Structured properties must receive a model class with a '
'callable ProtoModel attribute. The class %s has no such '
'attribute.' % (modelclass.__name__,))
raise TypeError(error_msg)
# No default for {MessageField}s
kwargs = GetKeywordArgs(prop, include_default=False)
return messages.MessageField(property_proto, index, **kwargs)
|
TypeError
|
dataset/ETHPy150Open GoogleCloudPlatform/endpoints-proto-datastore/endpoints_proto_datastore/ndb/utils.py/StructuredPropertyToProto
|
def callback(self, root, raw):
tag, attrs = root
if self._device_handler.perform_qualify_check():
if tag != qualify("rpc-reply"):
return
for key in attrs: # in the <rpc-reply> attributes
if key == "message-id": # if we found msgid attr
id = attrs[key] # get the msgid
with self._lock:
try:
rpc = self._id2rpc[id] # the corresponding rpc
logger.debug("Delivering to %r" % rpc)
rpc.deliver_reply(raw)
except __HOLE__:
raise OperationError("Unknown 'message-id': %s", id)
# no catching other exceptions, fail loudly if must
else:
# if no error delivering, can del the reference to the RPC
del self._id2rpc[id]
break
else:
raise OperationError("Could not find 'message-id' attribute in <rpc-reply>")
|
KeyError
|
dataset/ETHPy150Open ncclient/ncclient/ncclient/operations/rpc.py/RPCReplyListener.callback
|
def __init__(self, session, device_handler, async=False, timeout=30, raise_mode=RaiseMode.NONE):
"""
*session* is the :class:`~ncclient.transport.Session` instance
*device_handler" is the :class:`~ncclient.devices.*.*DeviceHandler` instance
*async* specifies whether the request is to be made asynchronously, see :attr:`is_async`
*timeout* is the timeout for a synchronous request, see :attr:`timeout`
*raise_mode* specifies the exception raising mode, see :attr:`raise_mode`
"""
self._session = session
try:
for cap in self.DEPENDS:
self._assert(cap)
except __HOLE__:
pass
self._async = async
self._timeout = timeout
self._raise_mode = raise_mode
self._id = uuid4().urn # Keeps things simple instead of having a class attr with running ID that has to be locked
self._listener = RPCReplyListener(session, device_handler)
self._listener.register(self._id, self)
self._reply = None
self._error = None
self._event = Event()
self._device_handler = device_handler
|
AttributeError
|
dataset/ETHPy150Open ncclient/ncclient/ncclient/operations/rpc.py/RPC.__init__
|
def testFileRebuild(self):
from twisted.python.util import sibpath
import shutil, time
shutil.copyfile(sibpath(__file__, "myrebuilder1.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"))
from twisted_rebuild_fakelib import myrebuilder
a = myrebuilder.A()
try:
object
except NameError:
pass
else:
from twisted.test import test_rebuild
b = myrebuilder.B()
class C(myrebuilder.B):
pass
test_rebuild.C = C
c = C()
i = myrebuilder.Inherit()
self.assertEquals(a.a(), 'a')
# necessary because the file has not "changed" if a second has not gone
# by in unix. This sucks, but it's not often that you'll be doing more
# than one reload per second.
time.sleep(1.1)
shutil.copyfile(sibpath(__file__, "myrebuilder2.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"))
rebuild.rebuild(myrebuilder)
try:
object
except __HOLE__:
pass
else:
b2 = myrebuilder.B()
self.assertEquals(b2.b(), 'c')
self.assertEquals(b.b(), 'c')
self.assertEquals(i.a(), 'd')
self.assertEquals(a.a(), 'b')
# more work to be done on new-style classes
# self.assertEquals(c.b(), 'c')
|
NameError
|
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/test/test_rebuild.py/RebuildTestCase.testFileRebuild
|
def check_if_installed(self):
self.log.debug('Check Siege: %s' % self.tool_path)
try:
shell_exec([self.tool_path, '-h'])
except __HOLE__:
return False
return True
|
OSError
|
dataset/ETHPy150Open Blazemeter/taurus/bzt/modules/siege.py/Siege.check_if_installed
|
def _check_stat_op(self, name, alternate, check_objects=False,
check_allna=False):
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max', 'min']:
ds = Series(date_range('1/1/2001', periods=10))
self.assertRaises(TypeError, f, ds)
# skipna or no
self.assertTrue(notnull(f(self.series)))
self.assertTrue(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
if check_allna:
# xref 9422
# bottleneck >= 1.0 give 0.0 for an allna Series sum
try:
self.assertTrue(nanops._USE_BOTTLENECK)
import bottleneck as bn # noqa
self.assertTrue(bn.__version__ >= LooseVersion('1.0'))
self.assertEqual(f(allna), 0.0)
except:
self.assertTrue(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40 + 1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
# check on string data
if name not in ['sum', 'min', 'max']:
self.assertRaises(TypeError, f, Series(list('abc')))
# Invalid axis.
self.assertRaises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in compat.signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
self.series, numeric_only=True)
testit()
try:
import bottleneck as bn # noqa
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except __HOLE__:
pass
|
ImportError
|
dataset/ETHPy150Open pydata/pandas/pandas/tests/series/test_analytics.py/TestSeriesAnalytics._check_stat_op
|
def envFileAsDictionary(fname):
retval = {}
fp = open(fname, 'r')
for line in fp:
line = line.rstrip('\r\n')
try:
(k, v) = line.split('=', 1)
except __HOLE__:
continue
retval[k] = v
return retval
|
ValueError
|
dataset/ETHPy150Open Netflix/gcviz/root/apps/apache/htdocs/AdminGCViz/vmsgcvizutils.py/envFileAsDictionary
|
def setting(name, default=None):
try:
return getattr(settings, setting_name(name))
except __HOLE__:
return getattr(settings, name, default)
|
AttributeError
|
dataset/ETHPy150Open omab/python-social-auth/social/apps/django_app/utils.py/setting
|
def list_directory(project_tree, directory):
"""List Paths directly below the given path, relative to the ProjectTree.
Raises an exception if the path is not a directory.
:returns: A DirectoryListing.
"""
try:
path = normpath(directory.path)
entries = [join(path, e) for e in project_tree.listdir(path)]
return DirectoryListing(directory,
True,
tuple(Path(e) for e in entries))
except (__HOLE__, OSError) as e:
if e.errno == errno.ENOENT:
return DirectoryListing(directory, False, tuple())
else:
raise e
|
IOError
|
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/engine/exp/fs.py/list_directory
|
def file_content(project_tree, path):
try:
return FileContent(path.path, project_tree.content(path.path))
except (IOError, __HOLE__) as e:
if e.errno == errno.ENOENT:
return FileContent(path.path, None)
else:
raise e
|
OSError
|
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/engine/exp/fs.py/file_content
|
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : function
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then f may
instead be a ``ctypes`` function of the form:
f(int n, double args[n]),
where ``args`` is an array of function arguments and ``n`` is the
length of ``args``. ``f.argtypes`` should be set to
``(c_int, c_double)``, and ``f.restype`` should be ``(c_double,)``.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message :
A convergence message.
explain :
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance.
epsrel : float or int, optional
Relative error tolerance.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e. it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions. The possible values of weight and the corresponding
weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
"""
if not isinstance(args, tuple):
args = (args,)
if (weight is None):
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except __HOLE__:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos','sin'] and (b == Inf or a == Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning)
return retval[:-1]
else:
raise ValueError(msg)
|
KeyError
|
dataset/ETHPy150Open scipy/scipy/scipy/integrate/quadpack.py/quad
|
def Exec(self, feedback_fn):
jobs = []
if self.op.group_name:
groups = [self.op.group_name]
depends_fn = lambda: None
else:
groups = self.cfg.GetNodeGroupList()
# Verify global configuration
jobs.append([
opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors),
])
# Always depend on global verification
depends_fn = lambda: [(-len(jobs), [])]
jobs.extend(
[opcodes.OpClusterVerifyGroup(group_name=group,
ignore_errors=self.op.ignore_errors,
depends=depends_fn(),
verify_clutter=self.op.verify_clutter)]
for group in groups)
# Fix up all parameters
for op in itertools.chain(*jobs): # pylint: disable=W0142
op.debug_simulate_errors = self.op.debug_simulate_errors
op.verbose = self.op.verbose
op.error_codes = self.op.error_codes
try:
op.skip_checks = self.op.skip_checks
except __HOLE__:
assert not isinstance(op, opcodes.OpClusterVerifyGroup)
return ResultWithJobs(jobs)
|
AttributeError
|
dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/cluster/verify.py/LUClusterVerify.Exec
|
def _VerifyNodeTime(self, ninfo, nresult,
nvinfo_starttime, nvinfo_endtime):
"""Check the node time.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param nvinfo_starttime: the start time of the RPC call
@param nvinfo_endtime: the end time of the RPC call
"""
ntime = nresult.get(constants.NV_TIME, None)
try:
ntime_merged = utils.MergeTime(ntime)
except (__HOLE__, TypeError):
self._ErrorIf(True, constants.CV_ENODETIME, ninfo.name,
"Node returned invalid time")
return
if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
else:
ntime_diff = None
self._ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, ninfo.name,
"Node time diverges by at least %s from master node time",
ntime_diff)
|
ValueError
|
dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/cluster/verify.py/LUClusterVerifyGroup._VerifyNodeTime
|
def _VerifyAcceptedFileStoragePaths(self, ninfo, nresult, is_master):
"""Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@type is_master: bool
@param is_master: Whether node is the master node
"""
cluster = self.cfg.GetClusterInfo()
if (is_master and
(cluster.IsFileStorageEnabled() or
cluster.IsSharedFileStorageEnabled())):
try:
fspaths = nresult[constants.NV_ACCEPTED_STORAGE_PATHS]
except __HOLE__:
# This should never happen
self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
"Node did not return forbidden file storage paths")
else:
self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
"Found forbidden file storage paths: %s",
utils.CommaJoin(fspaths))
else:
self._ErrorIf(constants.NV_ACCEPTED_STORAGE_PATHS in nresult,
constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
"Node should not have returned forbidden file storage"
" paths")
|
KeyError
|
dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/cluster/verify.py/LUClusterVerifyGroup._VerifyAcceptedFileStoragePaths
|
def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
"""Verifies and computes a node information map
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param nimg: the node image object
@param vg_name: the configured VG name
"""
# try to read free memory (from the hypervisor)
hv_info = nresult.get(constants.NV_HVINFO, None)
test = not isinstance(hv_info, dict) or "memory_free" not in hv_info \
or "memory_total" not in hv_info \
or "memory_dom0" not in hv_info
self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
"rpc call to node failed (hvinfo)")
if not test:
try:
nimg.mfree = int(hv_info["memory_free"])
nimg.mtotal = int(hv_info["memory_total"])
nimg.mdom0 = int(hv_info["memory_dom0"])
except (__HOLE__, TypeError):
self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
"node returned invalid nodeinfo, check hypervisor")
# FIXME: devise a free space model for file based instances as well
if vg_name is not None:
test = (constants.NV_VGLIST not in nresult or
vg_name not in nresult[constants.NV_VGLIST])
self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
"node didn't return data for the volume group '%s'"
" - it is either missing or broken", vg_name)
if not test:
try:
nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
except (ValueError, TypeError):
self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
"node returned invalid LVM info, check LVM status")
|
ValueError
|
dataset/ETHPy150Open ganeti/ganeti/lib/cmdlib/cluster/verify.py/LUClusterVerifyGroup._UpdateNodeInfo
|
def _convert_dpid(self, dpid_str):
try:
return int(dpid_str, 16)
except __HOLE__ as err:
self.logger.error('Invarid dpid parameter. %s', err)
self._test_end()
|
ValueError
|
dataset/ETHPy150Open osrg/ryu/ryu/tests/switch/tester.py/OfTester._convert_dpid
|
def _get_tests(self, path):
with open(path, 'r') as fhandle:
buf = fhandle.read()
try:
json_list = json.loads(buf)
for test_json in json_list:
if isinstance(test_json, six.text_type):
self.description = test_json
else:
self._normalize_test_json(test_json)
self.tests.append(Test(test_json))
except (__HOLE__, TypeError) as e:
result = (TEST_FILE_ERROR %
{'file': path, 'detail': str(e)})
self.logger.warning(result)
|
ValueError
|
dataset/ETHPy150Open osrg/ryu/ryu/tests/switch/tester.py/TestFile._get_tests
|
@pytest.mark.slow
@pytest.mark.parametrize("module_name", _example_modules())
def test_example(example, module_name):
try:
main = getattr(import_module(module_name), 'main')
except __HOLE__ as e:
skip_exceptions = ["requires a GPU", "pylearn2", "dnn not available"]
if any([text in str(e) for text in skip_exceptions]):
pytest.skip(e)
else:
raise
main(num_epochs=1) # run the example for one iteration
|
ImportError
|
dataset/ETHPy150Open Lasagne/Lasagne/lasagne/tests/test_examples.py/test_example
|
def _get_by_id(self, resource_id, exclude_fields=None):
try:
resource_db = self.access.get(id=resource_id, exclude_fields=exclude_fields)
except __HOLE__:
resource_db = None
return resource_db
|
ValidationError
|
dataset/ETHPy150Open StackStorm/st2/st2api/st2api/controllers/resource.py/ResourceController._get_by_id
|
def testTasklets_Raising(self):
self.ExpectWarnings()
@tasklets.tasklet
def t1():
f = t2(True)
try:
yield f
except __HOLE__, err:
self.assertEqual(f.get_exception(), err)
raise tasklets.Return(str(err))
@tasklets.tasklet
def t2(error):
if error:
raise RuntimeError('hello')
else:
yield tasklets.Future()
x = t1()
y = x.get_result()
self.assertEqual(y, 'hello')
|
RuntimeError
|
dataset/ETHPy150Open GoogleCloudPlatform/datastore-ndb-python/ndb/tasklets_test.py/TaskletTests.testTasklets_Raising
|
def testTasklet_YieldTupleTypeError(self):
self.ExpectWarnings()
@tasklets.tasklet
def good():
yield tasklets.sleep(0)
@tasklets.tasklet
def bad():
raise ZeroDivisionError
yield tasklets.sleep(0)
@tasklets.tasklet
def foo():
try:
yield good(), bad(), 42
except __HOLE__:
pass
else:
self.assertFalse('Should have raised TypeError')
foo().check_success()
|
TypeError
|
dataset/ETHPy150Open GoogleCloudPlatform/datastore-ndb-python/ndb/tasklets_test.py/TaskletTests.testTasklet_YieldTupleTypeError
|
def testBasicError(self):
self.ExpectWarnings()
frames = [sys._getframe()]
@tasklets.tasklet
def level3():
frames.append(sys._getframe())
raise RuntimeError('hello')
yield
@tasklets.tasklet
def level2():
frames.append(sys._getframe())
yield level3()
@tasklets.tasklet
def level1():
frames.append(sys._getframe())
yield level2()
@tasklets.tasklet
def level0():
frames.append(sys._getframe())
yield level1()
fut = level0()
try:
fut.check_success()
except __HOLE__, err:
_, _, tb = sys.exc_info()
self.assertEqual(str(err), 'hello')
tbframes = []
while tb is not None:
# It's okay if some _help_tasklet_along frames are present.
if tb.tb_frame.f_code.co_name != '_help_tasklet_along':
tbframes.append(tb.tb_frame)
tb = tb.tb_next
self.assertEqual(frames, tbframes)
else:
self.fail('Expected RuntimeError not raised')
|
RuntimeError
|
dataset/ETHPy150Open GoogleCloudPlatform/datastore-ndb-python/ndb/tasklets_test.py/TracebackTests.testBasicError
|
def testYieldError(self):
try:
self.provoke_yield_error()
except __HOLE__, err:
self.assertTrue(re.match(
"A tasklet should not yield a plain value: "
".*bad_user_code.*yielded 'abc'$",
str(err)))
|
RuntimeError
|
dataset/ETHPy150Open GoogleCloudPlatform/datastore-ndb-python/ndb/tasklets_test.py/TracebackTests.testYieldError
|
def execute(self, arguments, updateCompletion, updateMessage, updateStats, updateLicense):
self.log.debug("Callable runner arguments: %s" % arguments)
execType = arguments['execType']
#
# Add locations to site path if needed
#
# TODO instead, we should start rez with a script to load the callable
try:
self.log.info("Add %d site dirs" % len(arguments.get('sysPath', 0)))
for path in arguments.get('sysPath', None):
site.addsitedir(path)
except Exception, e:
raise JobTypeImportError(e)
#
# Retrieve user_args and user_kwargs
#
try:
user_args = json.loads(arguments.get("user_args", None))
user_kwargs = json.loads(arguments.get("user_kwargs", None))
self.log.info("args = %s" % user_args)
self.log.info("kwargs = %s" % user_kwargs)
except Exception, e:
print("Problem retrieving args and kwargs: %s)" % e)
raise CommandError("Problem retrieving args and kwargs: (%s, %s)" % (user_args, user_kwargs))
#
# Execute simple function
#
if execType == 'function':
try:
moduleName = arguments['moduleName']
funcName = arguments['funcName']
except Exception:
raise JobTypeImportError("Missing function or module name in callable arguments: %s", arguments)
self.log.info("Preparing to load: %s from %s" % (funcName, moduleName))
#
# Import module with given moduleName and funcName
# NB: replace with import_lib.import in python2.7
#
try:
module = __import__(moduleName, fromlist=funcName)
except __HOLE__, error:
traceback.print_exc()
raise JobTypeImportError("No module '%s' on PYTHONPATH:\n%s. (%s)" % (moduleName, "\n".join(sys.path), error))
#
# Load target function in module
#
try:
func = getattr(module, funcName)
except Exception, e:
raise JobTypeImportError("No function '%s' defined in module %s (%s)" % (funcName, moduleName, e))
#
# Go!
#
try:
func(*user_args, **user_kwargs)
except Exception, e:
print "Problem when executing: %s (msg: %s)" % (func, e)
raise CommandError("Problem when executing: %s (msg: %s)" % (func, e))
#
# Execute instance method
#
elif execType == 'method':
self.log.debug("input: %s" % arguments)
try:
moduleName = arguments['moduleName']
className = arguments['className']
methodName = arguments['methodName']
except Exception:
raise JobTypeImportError("Missing function or module name in callable arguments: %s", arguments)
self.log.info("Preparing to load: %s.%s from %s" % (methodName, className, moduleName))
module = __import__(moduleName, fromlist=[className])
jobtype = getattr(module, className)
func = jobtype()
#
# Go!
#
try:
# params = json.loads(arguments.get("params", None))
# self.log.info("params: %s" % params)
getattr(func, methodName)(*user_args, **user_kwargs)
except Exception, e:
print "Problem when executing: %s (error: %s)" % (func, e)
raise CommandError("Problem when executing: %s" % func)
else:
raise CommandError("Callable not supported")
|
ImportError
|
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/runner.py/CallableRunner.execute
|
@classmethod
def executeWithOutput(cls, command, outputCallback=None):
"""
| Starts a subprocess with given command string. The subprocess is started without shell
| for safety reason. stderr is send to stdout and stdout is either send to a callback if given or
| printed on stdout again.
| If a callback is given for output, it will be called each time a line is printed.
:param command: a string holding any command line
:param outputCallback: any callable that we be able to parse line and retrieve useful info from it (usually in the runner)
:raise CommandError: When any error occurred that should end the command with ERROR status
When a subprocess error is raised (OSError or ValueError usually)
"""
if outputCallback is not None and not callable(outputCallback):
raise CommandError("Invalid param: outputCallback=%s must be a callable or None" % outputCallback)
shlex.split(command)
try:
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, bufsize=1)
for line in iter(p.stdout.readline, b''):
if callable(outputCallback):
outputCallback(line)
else:
sys.stdout.write(line)
(stdout, stderr) = p.communicate()
return p.returncode
except ValueError as e:
raise CommandError("%s" % e)
except __HOLE__ as e:
raise CommandError("Error during process exec: %s" % e)
except Exception as e:
raise e
|
OSError
|
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/runner.py/RunnerToolkit.executeWithOutput
|
@classmethod
def runGraphInXterm(cls, graph, keepWindowOpen=True):
"""
Specifically runs a graph locally in a separate xterm windows and subprocess.
Several possibilities to render locally:
- use graph.execute() in current thread --> PB: blocks the GUI
- use graph.execute(detached=True) in a separate thread --> PB: force to quit maya to stop long process
- pickle graph to dump and reload it in xterm subprocess --> WARN: potential pb if dump is too long
When using first 2 methods, log output is in reversed order making it difficult to debug
Method details (pickle graph):
1. create a data temp file with the pickled graph
2. create a script temp with a minimal python code to load the data file and "execute" it
3. Start a command line process to open xterm and execute the script file
Precisely the command has 3 steps:
- open xterm with title "Running" and execution command
- change title to "finished" after command execution
- eventually starts a new shell in interactive mode to keep xterm opened
:param graph: a puliclient graph to execute
:raise CommandError: When any error occurred that should end the command with ERROR status
When a subprocess error is raised (OSError or ValueError usually)
"""
try:
from tempfile import NamedTemporaryFile
graphName = ''
with NamedTemporaryFile(mode='w', prefix='localGraph_', delete=False) as graphFile:
graphFile.write(pickle.dumps(graph))
graphName = graphFile.name
script = """
import pickle
import time
with open("__graphFile__") as file:
g = pickle.loads(file.read())
g.execute()
"""
script = script.replace("__graphFile__", graphName)
with NamedTemporaryFile(mode='w', prefix='localScript_', delete=False) as scriptFile:
scriptFile.write(script)
# args = 'xterm -title "Running" -e "python {script}'.format(script=scriptFile.name)
if keepWindowOpen:
args = 'xterm -title "Running" -e "python {script}' \
'; echo -ne \'\033]0;Finished\007\'' \
'; sh -i"'.format(script=scriptFile.name)
else:
args = 'xterm -title "Running" -e "python {script}"'.format(script=scriptFile.name)
print "Start local render, please check your terminal for details."
process = subprocess.Popen(args, shell=True)
print "Process started: {pid}".format(pid=process.pid)
except ValueError as e:
raise CommandError("%s" % e)
except __HOLE__ as e:
raise CommandError("Error during process exec: %s" % e)
except Exception as e:
raise e
|
OSError
|
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/runner.py/RunnerToolkit.runGraphInXterm
|
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except __HOLE__:
self.fail()
|
RuntimeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/tests/regressiontests/settings_tests/tests.py/ClassDecoratedTestCase.test_max_recursion_error
|
@run_in_both(Person, "['setreadonly', 'setemitter', '_setprop1', '_setprop2']")
def test_try_illegal_stuff(Person):
# we cannot test delete, because deleting *is* possible
# on JS
res = []
name = Person()
try:
name.full_name = 'john doe'
except AttributeError:
res.append('setreadonly')
try:
name.yell = 3
except AttributeError:
res.append('setemitter')
try:
name._set_prop(3, 3) # Property name must be a string
except __HOLE__:
res.append('_setprop1')
try:
name._set_prop('spam', 3) # MyObject has not spam property
except AttributeError:
res.append('_setprop2')
return res
|
TypeError
|
dataset/ETHPy150Open zoofIO/flexx/flexx/event/tests/test_both.py/test_try_illegal_stuff
|
@run_in_both(Person, "['ok-label', 'ok-type']")
def test_emit_fail(Person):
res = []
name = Person()
try:
name.emit('first_name:xx', dict(old_value='x1', new_value='y1'))
except ValueError:
res.append('ok-label')
try:
name.emit('first_name', 4)
except __HOLE__:
res.append('ok-type')
return res
|
TypeError
|
dataset/ETHPy150Open zoofIO/flexx/flexx/event/tests/test_both.py/test_emit_fail
|
@run_in_both(ConnectFail, "['ok']")
def test_handler_connection_fail1(ConnectFail):
try:
m = ConnectFail()
except __HOLE__:
return ['ok']
return ['fail']
|
RuntimeError
|
dataset/ETHPy150Open zoofIO/flexx/flexx/event/tests/test_both.py/test_handler_connection_fail1
|
@run_in_both(Person, "['ok']")
def test_handler_connection_fail2(Person):
name = Person()
def handler(*events):
pass
try:
name.connect(handler, 'foo.bar')
except __HOLE__:
return ['ok']
return ['fail']
|
RuntimeError
|
dataset/ETHPy150Open zoofIO/flexx/flexx/event/tests/test_both.py/test_handler_connection_fail2
|
@run_in_both(HandlerFail, "', 1, 1]")
def test_handler_exception1(HandlerFail):
res = []
m = HandlerFail()
def handler(*events):
raise IndexError('bla')
handler = m.connect(handler, 'foo')
# Does not fail when triggered
m.foo = 3
m.foo = 42
m.failing_handler.handle_now()
handler.handle_now()
# But calling directly fails
try:
m.failing_handler()
except IndexError:
res.append(1)
try:
handler()
except __HOLE__:
res.append(1)
return ["!!!!"] + res # trick to ditch stderr in JS
## Dynamism
|
IndexError
|
dataset/ETHPy150Open zoofIO/flexx/flexx/event/tests/test_both.py/test_handler_exception1
|
def handle(self, **options):
if len(settings.SOCKJS_CLASSES) > 1:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"Multiple connections not yet supported"
)
module_name, cls_name = settings.SOCKJS_CLASSES[0].rsplit('.', 1)
module = import_module(module_name)
cls = getattr(module, cls_name)
channel = getattr(settings, 'SOCKJS_CHANNEL', '/echo')
if not channel.startswith('/'):
channel = '/%s' % channel
router = SockJSRouter(cls, channel)
app_settings = {
'debug': settings.DEBUG,
}
PORT = int(options['port'])
app = web.Application(router.urls, **app_settings)
app.listen(PORT, no_keep_alive=options['no_keep_alive'])
print "Running sock app on port", PORT, "with channel", channel
try:
ioloop.IOLoop.instance().start()
except __HOLE__:
# so you don't think you errored when ^C'ing out
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open peterbe/django-sockjs-tornado/django_sockjs_tornado/management/commands/socketserver.py/Command.handle
|
def retry(retries=4, delay_multiplier=3, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param int retries: number of times to try (not retry) before giving up
:param int delay: initial delay between retries in seconds
:param int backoff: backoff multiplier e.g. value of 2 will double the
delay each retry
"""
def retry_decorator(f):
@wraps(f)
def f_retry(*args, **kwargs):
if retries < 0:
raise ValueError('retries must be at least 0')
if delay_multiplier <= 0:
raise ValueError('delay_multiplier must be larger than 0')
if backoff <= 1:
raise ValueError('backoff must be greater than 1')
mtries, mdelay = retries, delay_multiplier
while mtries > 1:
try:
return f(*args, **kwargs)
except __HOLE__:
msg = "Retrying in {0} seconds...".format(mdelay)
lgr.warning(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry
return retry_decorator
# # utils.execute(sh.ls, 3, 2, [0], '/var/log')
# def execute(func, attempts=1, sleep=3, accepted_err_codes=[0], *args):
# if attempts < 1:
# raise RuntimeError('Attempts must be at least 1')
# if not sleep > 0:
# raise RuntimeError('Sleep must be larger than 0')
# for execution in xrange(attempts):
# outcome = func(*args, _ok_code=accepted_err_codes)
# if outcome.exit_code not in accepted_err_codes:
# lgr.warning('Failed to execute: {0}'.format(func))
# time.sleep(sleep)
# else:
# return outcome
# lgr.error('Failed to run command even after {0} attempts'
# ' with output: {1}'.format(execution, outcome))
# sys.exit(codes.mapping['failed_to_execute_command'])
|
SystemExit
|
dataset/ETHPy150Open cloudify-cosmo/packman/packman/utils.py/retry
|
def mkdir(self, dir):
"""creates (recursively) a directory
:param string dir: directory to create
"""
if not os.path.isdir(dir):
lgr.debug('Creating directory {0}'.format(dir))
try:
os.makedirs(dir)
except __HOLE__ as ex:
lgr.error('Failed to create {0} ({1})'.format(dir, str(ex)))
sys.exit(codes.mapping['failed_to_mkdir'])
else:
lgr.debug('Directory already exists, skipping.')
|
OSError
|
dataset/ETHPy150Open cloudify-cosmo/packman/packman/utils.py/Handler.mkdir
|
def cp(self, src, dst):
"""copies (recuresively or not) files or directories
:param string src: source to copy
:param string dst: destination to copy to
:param bool recurse: should the copying process be recursive?
"""
lgr.debug('Copying {0} to {1}'.format(src, dst))
try:
shutil.copytree(src, dst)
except __HOLE__ as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else:
lgr.error('Copying failed. Error: {0}'.format(e))
return False
|
OSError
|
dataset/ETHPy150Open cloudify-cosmo/packman/packman/utils.py/Handler.cp
|
def HasSniSupport():
try:
import OpenSSL
return (distutils.version.StrictVersion(OpenSSL.__version__) >=
distutils.version.StrictVersion('0.13'))
except __HOLE__:
return False
|
ImportError
|
dataset/ETHPy150Open chromium/web-page-replay/platformsettings.py/HasSniSupport
|
def has_ipfw(self):
try:
self.ipfw('list')
return True
except __HOLE__ as e:
logging.warning('Failed to start ipfw command. '
'Error: %s' % e.message)
return False
|
AssertionError
|
dataset/ETHPy150Open chromium/web-page-replay/platformsettings.py/_BasePlatformSettings.has_ipfw
|
def _get_primary_nameserver(self):
try:
resolv_file = open(self.RESOLV_CONF)
except __HOLE__:
raise DnsReadError()
for line in resolv_file:
if line.startswith('nameserver '):
return line.split()[1]
raise DnsReadError()
|
IOError
|
dataset/ETHPy150Open chromium/web-page-replay/platformsettings.py/_FreeBSDPlatformSettings._get_primary_nameserver
|
def _get_primary_nameserver(self):
try:
resolv_file = open(self.RESOLV_CONF)
except __HOLE__:
raise DnsReadError()
for line in resolv_file:
if line.startswith('nameserver '):
return line.split()[1]
raise DnsReadError()
|
IOError
|
dataset/ETHPy150Open chromium/web-page-replay/platformsettings.py/_LinuxPlatformSettings._get_primary_nameserver
|
def _set_primary_nameserver(self, dns):
"""Replace the first nameserver entry with the one given."""
try:
self._write_resolve_conf(dns)
except __HOLE__, e:
if 'Permission denied' in e:
raise self._get_dns_update_error()
raise
|
OSError
|
dataset/ETHPy150Open chromium/web-page-replay/platformsettings.py/_LinuxPlatformSettings._set_primary_nameserver
|
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
continue
##########################################################################
# This is a modification to Django's native implementation of this method;
# we conditionally build a __in lookup if the value is an iterable.
lookup = str(field_name)
if isinstance(lookup_value, (list, set, tuple)):
lookup = "%s__in" % lookup
lookup_kwargs[lookup] = lookup_value
##########################################################################
# / end of changes
if len(unique_check) != len(lookup_kwargs):
continue
#######################################################
# Deal with long __in lookups by doing multiple queries in that case
# This is a bit hacky, but we really have no choice due to App Engine's
# 30 multi-query limit. This also means we can't support multiple list fields in
# a unique combination
#######################################################
if len([x for x in lookup_kwargs if x.endswith("__in") ]) > 1:
raise NotSupportedError("You cannot currently have two list fields in a unique combination")
# Split IN queries into multiple lookups if they are too long
lookups = []
for k, v in lookup_kwargs.iteritems():
if k.endswith("__in") and len(v) > 30:
v = list(v)
while v:
new_lookup = lookup_kwargs.copy()
new_lookup[k] = v[:30]
v = v[30:]
lookups.append(new_lookup)
break
else:
# Otherwise just use the one lookup
lookups = [ lookup_kwargs ]
for lookup_kwargs in lookups:
qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True)
model_class_pk = self._get_pk_val(model_class._meta)
result = list(qs)
if not self._state.adding and model_class_pk is not None:
# If we are saving an instance, we ignore it's PK in the result
try:
result.remove(model_class_pk)
except __HOLE__:
pass
if result:
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
break
return errors
|
ValueError
|
dataset/ETHPy150Open potatolondon/djangae/djangae/db/constraints.py/UniquenessMixin._perform_unique_checks
|
def to_agraph(N):
"""Return a pygraphviz graph from a NetworkX graph N.
Parameters
----------
N : NetworkX graph
A graph created with NetworkX
Examples
--------
>>> K5=nx.complete_graph(5)
>>> A=nx.to_agraph(K5)
Notes
-----
If N has an dict N.graph_attr an attempt will be made first
to copy properties attached to the graph (see from_agraph)
and then updated with the calling arguments if any.
"""
try:
import pygraphviz
except __HOLE__:
raise ImportError('requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
directed=N.is_directed()
strict=N.number_of_selfloops()==0 and not N.is_multigraph()
A=pygraphviz.AGraph(name=N.name,strict=strict,directed=directed)
# default graph attributes
A.graph_attr.update(N.graph.get('graph',{}))
A.node_attr.update(N.graph.get('node',{}))
A.edge_attr.update(N.graph.get('edge',{}))
# add nodes
for n,nodedata in N.nodes(data=True):
A.add_node(n,**nodedata)
# loop over edges
if N.is_multigraph():
for u,v,key,edgedata in N.edges_iter(data=True,keys=True):
str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
A.add_edge(u,v,key=str(key),**str_edgedata)
else:
for u,v,edgedata in N.edges_iter(data=True):
str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
A.add_edge(u,v,**str_edgedata)
return A
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/drawing/nx_agraph.py/to_agraph
|
def write_dot(G,path):
"""Write NetworkX graph G to Graphviz dot format on path.
Parameters
----------
G : graph
A networkx graph
path : filename
Filename or file handle to write
"""
try:
import pygraphviz
except __HOLE__:
raise ImportError('requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
A=to_agraph(G)
A.write(path)
A.clear()
return
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/drawing/nx_agraph.py/write_dot
|
def read_dot(path):
"""Return a NetworkX graph from a dot file on path.
Parameters
----------
path : file or string
File name or file handle to read.
"""
try:
import pygraphviz
except __HOLE__:
raise ImportError('read_dot() requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
A=pygraphviz.AGraph(file=path)
return from_agraph(A)
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/drawing/nx_agraph.py/read_dot
|
def pygraphviz_layout(G,prog='neato',root=None, args=''):
"""Create node positions for G using Graphviz.
Parameters
----------
G : NetworkX graph
A graph created with NetworkX
prog : string
Name of Graphviz layout program
root : string, optional
Root node for twopi layout
args : string, optional
Extra arguments to Graphviz layout program
Returns : dictionary
Dictionary of x,y, positions keyed by node.
Examples
--------
>>> G=nx.petersen_graph()
>>> pos=nx.graphviz_layout(G)
>>> pos=nx.graphviz_layout(G,prog='dot')
"""
try:
import pygraphviz
except __HOLE__:
raise ImportError('requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
if root is not None:
args+="-Groot=%s"%root
A=to_agraph(G)
A.layout(prog=prog,args=args)
node_pos={}
for n in G:
node=pygraphviz.Node(A,n)
try:
xx,yy=node.attr["pos"].split(',')
node_pos[n]=(float(xx),float(yy))
except:
print("no position for node",n)
node_pos[n]=(0.0,0.0)
return node_pos
|
ImportError
|
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/drawing/nx_agraph.py/pygraphviz_layout
|
def _libvirt_creds():
'''
Returns the user and group that the disk images should be owned by
'''
g_cmd = 'grep ^\\s*group /etc/libvirt/qemu.conf'
u_cmd = 'grep ^\\s*user /etc/libvirt/qemu.conf'
try:
stdout = subprocess.Popen(g_cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
group = salt.utils.to_str(stdout).split('"')[1]
except __HOLE__:
group = 'root'
try:
stdout = subprocess.Popen(u_cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0]
user = salt.utils.to_str(stdout).split('"')[1]
except IndexError:
user = 'root'
return {'user': user, 'group': group}
|
IndexError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/virt.py/_libvirt_creds
|
def _qemu_image_info(path):
'''
Detect information for the image at path
'''
ret = {}
out = __salt__['cmd.run']('qemu-img info {0}'.format(path))
match_map = {'size': r'virtual size: \w+ \((\d+) byte[s]?\)',
'format': r'file format: (\w+)'}
for info, search in six.iteritems(match_map):
try:
ret[info] = re.search(search, out).group(1)
except __HOLE__:
continue
return ret
# TODO: this function is deprecated, should be replaced with
# _qemu_image_info()
|
AttributeError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/virt.py/_qemu_image_info
|
def init(name,
cpu,
mem,
image=None,
nic='default',
hypervisor=VIRT_DEFAULT_HYPER,
start=True, # pylint: disable=redefined-outer-name
disk='default',
saltenv='base',
seed=True,
install=True,
pub_key=None,
priv_key=None,
seed_cmd='seed.apply',
enable_vnc=False,
**kwargs):
'''
Initialize a new vm
CLI Example:
.. code-block:: bash
salt 'hypervisor' virt.init vm_name 4 512 salt://path/to/image.raw
salt 'hypervisor' virt.init vm_name 4 512 nic=profile disk=profile
'''
hypervisor = __salt__['config.get']('libvirt:hypervisor', hypervisor)
log.debug('Using hyperisor {0}'.format(hypervisor))
nicp = _nic_profile(nic, hypervisor, **kwargs)
log.debug('NIC profile is {0}'.format(nicp))
diskp = None
seedable = False
if image: # with disk template image
log.debug('Image {0} will be used'.format(image))
# if image was used, assume only one disk, i.e. the
# 'default' disk profile
# TODO: make it possible to use disk profiles and use the
# template image as the system disk
diskp = _disk_profile('default', hypervisor, **kwargs)
log.debug('Disk profile is {0}'.format(diskp))
# When using a disk profile extract the sole dict key of the first
# array element as the filename for disk
disk_name = next(six.iterkeys(diskp[0]))
disk_type = diskp[0][disk_name]['format']
disk_file_name = '{0}.{1}'.format(disk_name, disk_type)
if hypervisor in ['esxi', 'vmware']:
# TODO: we should be copying the image file onto the ESX host
raise SaltInvocationError(
'virt.init does not support image template template in '
'conjunction with esxi hypervisor'
)
elif hypervisor in ['qemu', 'kvm']:
img_dir = __salt__['config.option']('virt.images')
img_dest = os.path.join(
img_dir,
name,
disk_file_name
)
img_dir = os.path.dirname(img_dest)
sfn = __salt__['cp.cache_file'](image, saltenv)
log.debug('Image directory is {0}'.format(img_dir))
try:
os.makedirs(img_dir)
except __HOLE__:
pass
try:
log.debug('Copying {0} to {1}'.format(sfn, img_dest))
salt.utils.files.copyfile(sfn, img_dest)
mask = os.umask(0)
os.umask(mask)
# Apply umask and remove exec bit
mode = (0o0777 ^ mask) & 0o0666
os.chmod(img_dest, mode)
except (IOError, OSError) as e:
raise CommandExecutionError('problem copying image. {0} - {1}'.format(image, e))
seedable = True
else:
log.error('Unsupported hypervisor when handling disk image')
else:
# no disk template image specified, create disks based on disk profile
diskp = _disk_profile(disk, hypervisor, **kwargs)
log.debug('No image specified, disk profile will be used: {0}'.format(diskp))
if hypervisor in ['qemu', 'kvm']:
# TODO: we should be creating disks in the local filesystem with
# qemu-img
raise SaltInvocationError(
'virt.init does not support disk profiles in conjunction with '
'qemu/kvm at this time, use image template instead'
)
else:
# assume libvirt manages disks for us
for disk in diskp:
for disk_name, args in six.iteritems(disk):
log.debug('Generating libvirt XML for {0}'.format(disk))
xml = _gen_vol_xml(
name,
disk_name,
args['size'],
hypervisor,
)
define_vol_xml_str(xml)
log.debug('Generating VM XML')
kwargs['enable_vnc'] = enable_vnc
xml = _gen_xml(name, cpu, mem, diskp, nicp, hypervisor, **kwargs)
try:
define_xml_str(xml)
except libvirtError:
# This domain already exists
pass
if seed and seedable:
log.debug('Seed command is {0}'.format(seed_cmd))
__salt__[seed_cmd](
img_dest,
id_=name,
config=kwargs.get('config'),
install=install,
pub_key=pub_key,
priv_key=priv_key,
)
if start:
log.debug('Creating {0}'.format(name))
_get_domain(name).create()
return True
|
OSError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/virt.py/init
|
def get_disks(vm_):
'''
Return the disks of a named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_disks <domain>
'''
disks = {}
doc = minidom.parse(_StringIO(get_xml(vm_)))
for elem in doc.getElementsByTagName('disk'):
sources = elem.getElementsByTagName('source')
targets = elem.getElementsByTagName('target')
if len(sources) > 0:
source = sources[0]
else:
continue
if len(targets) > 0:
target = targets[0]
else:
continue
if target.hasAttribute('dev'):
qemu_target = ''
if source.hasAttribute('file'):
qemu_target = source.getAttribute('file')
elif source.hasAttribute('dev'):
qemu_target = source.getAttribute('dev')
elif source.hasAttribute('protocol') and \
source.hasAttribute('name'): # For rbd network
qemu_target = '{0}:{1}'.format(
source.getAttribute('protocol'),
source.getAttribute('name'))
if qemu_target:
disks[target.getAttribute('dev')] = {
'file': qemu_target}
for dev in disks:
try:
hypervisor = __salt__['config.get']('libvirt:hypervisor', 'kvm')
if hypervisor not in ['qemu', 'kvm']:
break
output = []
stdout = subprocess.Popen(
['qemu-img', 'info', disks[dev]['file']],
shell=False,
stdout=subprocess.PIPE).communicate()[0]
qemu_output = salt.utils.to_str(stdout)
snapshots = False
columns = None
lines = qemu_output.strip().split('\n')
for line in lines:
if line.startswith('Snapshot list:'):
snapshots = True
continue
# If this is a copy-on-write image, then the backing file
# represents the base image
#
# backing file: base.qcow2 (actual path: /var/shared/base.qcow2)
elif line.startswith('backing file'):
matches = re.match(r'.*\(actual path: (.*?)\)', line)
if matches:
output.append('backing file: {0}'.format(matches.group(1)))
continue
elif snapshots:
if line.startswith('ID'): # Do not parse table headers
line = line.replace('VM SIZE', 'VMSIZE')
line = line.replace('VM CLOCK', 'TIME VMCLOCK')
columns = re.split(r'\s+', line)
columns = [c.lower() for c in columns]
output.append('snapshots:')
continue
fields = re.split(r'\s+', line)
for i, field in enumerate(fields):
sep = ' '
if i == 0:
sep = '-'
output.append(
'{0} {1}: "{2}"'.format(
sep, columns[i], field
)
)
continue
output.append(line)
output = '\n'.join(output)
disks[dev].update(yaml.safe_load(output))
except __HOLE__:
disks[dev].update(yaml.safe_load('image: Does not exist'))
return disks
|
TypeError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/virt.py/get_disks
|
def is_kvm_hyper():
'''
Returns a bool whether or not this node is a KVM hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.is_kvm_hyper
'''
try:
if 'kvm_' not in salt.utils.fopen('/proc/modules').read():
return False
except __HOLE__:
# No /proc/modules? Are we on Windows? Or Solaris?
return False
return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
|
IOError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/virt.py/is_kvm_hyper
|
def is_xen_hyper():
'''
Returns a bool whether or not this node is a XEN hypervisor
CLI Example:
.. code-block:: bash
salt '*' virt.is_xen_hyper
'''
try:
if __grains__['virtual_subtype'] != 'Xen Dom0':
return False
except KeyError:
# virtual_subtype isn't set everywhere.
return False
try:
if 'xen_' not in salt.utils.fopen('/proc/modules').read():
return False
except __HOLE__:
# No /proc/modules? Are we on Windows? Or Solaris?
return False
return 'libvirtd' in __salt__['cmd.run'](__grains__['ps'])
|
IOError
|
dataset/ETHPy150Open saltstack/salt/salt/modules/virt.py/is_xen_hyper
|
def Next(self, this, celt, rgVar, pCeltFetched):
if not rgVar: return E_POINTER
if not pCeltFetched: pCeltFetched = [None]
pCeltFetched[0] = 0
try:
for index in range(celt):
item = self.seq.next()
p = item.QueryInterface(IDispatch)
rgVar[index].value = p
pCeltFetched[0] += 1
except __HOLE__:
pass
## except:
## # ReportException? return E_FAIL?
## import traceback
## traceback.print_exc()
if pCeltFetched[0] == celt:
return S_OK
return S_FALSE
|
StopIteration
|
dataset/ETHPy150Open enthought/comtypes/comtypes/server/automation.py/VARIANTEnumerator.Next
|
def Skip(self, this, celt):
# skip some elements.
try:
for _ in range(celt):
self.seq.next()
except __HOLE__:
return S_FALSE
return S_OK
|
StopIteration
|
dataset/ETHPy150Open enthought/comtypes/comtypes/server/automation.py/VARIANTEnumerator.Skip
|
def OnViewCreated(self, view):
print "OnViewCreated", view
# And if our demo view has been registered, it may well
# be that view!
try:
pyview = unwrap(view)
print "and look - its a Python implemented view!", pyview
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open Jackeriss/Email_My_PC/shell/demos/explorer_browser.py/EventHandler.OnViewCreated
|
def make_dirs(*lpath):
"""Ensure the directories exist.
lpath: path fragments
"""
path = os.path.join(*lpath)
try:
os.makedirs(os.path.dirname(path))
except __HOLE__ as e:
if e.errno != errno.EEXIST:
raise
return os.path.abspath(path)
|
OSError
|
dataset/ETHPy150Open jpscaletti/Voodoo/voodoo/helpers.py/make_dirs
|
@classmethod
def socket_connect(cls, host, port, queryport, password=None, timeout=None):
"""Connect to socket.
Stores arguments on class variables to remember for reconnect.
Closes socket if open.
Returns:
Bool, Error
"""
if cls._socket:
cls._socket.close() # Reset socket in case this is a reconnect
if timeout is None:
timeout = socket.getdefaulttimeout()
cls.socket_host = host
cls.socket_port = port
cls.socket_query_port = queryport
cls.password = password
cls.socket_timeout = timeout
try:
cls._socket = socket.create_connection((cls.socket_host, cls.socket_port), timeout)
except socket.timeout as e: ### NEEDS TESTING.
return False, e
except __HOLE__ as e:
return False, e
return True, None
|
OSError
|
dataset/ETHPy150Open f4ble/pyarc/ark/steam/steam_socket_core.py/SteamSocketCore.socket_connect
|
@classmethod
def loop_communication(cls):
while True:
# Don't send stuff when we're not connected.
while not cls.is_connected:
time.sleep(1)
send_packet = None
try:
send_packet = cls.outgoing_queue.popleft()
# No items in queue. Sleep to avoid CPU drain
except __HOLE__:
time.sleep(1) # Performance. Dont spam the loop.
pass
if send_packet:
bytes_sent, err = cls.socket_send(send_packet)
if bytes_sent:
if not cls.wait_for_response(send_packet):
out('Retrying waiting for response:')
if not cls.wait_for_response(send_packet):
out('Failure to get response. Reconnecting...')
cls.reconnect()
else:
cls.is_connected = False
out('Failure to send command. Reconnecting...')
cls.reconnect()
|
IndexError
|
dataset/ETHPy150Open f4ble/pyarc/ark/steam/steam_socket_core.py/SteamSocketCore.loop_communication
|
@classmethod
def socket_send(cls, packet):
"""
Send SteamPacket
Args:
packet: SteamPacket
Returns:
bytes_sent: False or 0 means failure.
err: String
"""
assert isinstance(packet, SteamPacket), 'packet argument not of object SteamPacket'
packet.timestamp = time.time()
try:
bytes_sent = cls._socket.send(packet.binary_string)
except __HOLE__ as err:
return False, err
cls.outgoing_packets[int(packet.decoded["id"])] = packet
return bytes_sent, None
|
OSError
|
dataset/ETHPy150Open f4ble/pyarc/ark/steam/steam_socket_core.py/SteamSocketCore.socket_send
|
@classmethod
def _socket_read(cls, wait=False):
"""
Read from socket. Does not fail on low timeout - only returns None.
Args:
wait: Bool. Blocking mode - wait until timeout.
Returns:
True, error_message: None
None, error_message: 'No data'
False, error_message: 'Socket error'
"""
if wait:
data = ""
while len(data) == 0:
try:
data = cls._socket.recv(4096)
except socket.timeout:
pass
except OSError as err:
return False, 'Failure to read from socket: {}'.format(err)
cls._parse_socket_data(data)
return True, None
try:
data = cls._socket.recv(4096)
cls._parse_socket_data(data)
return True, None
except socket.timeout:
return None, 'No data'
except __HOLE__ as err:
return False, 'Failure to read from socket: {}'.format(err)
|
OSError
|
dataset/ETHPy150Open f4ble/pyarc/ark/steam/steam_socket_core.py/SteamSocketCore._socket_read
|
def _get_project_file(win_id):
session_data = None
# Construct the base settings paths
auto_save_session_path = os.path.join(
sublime.packages_path(),
'..',
'Settings',
'Auto Save Session.sublime_session'
)
regular_session_path = os.path.join(
sublime.packages_path(),
'..',
'Settings',
'Session.sublime_session'
)
# Try loading the session data from one of the files
for session_path in (auto_save_session_path, regular_session_path):
try:
with open(session_path) as session_file:
session_data = json.load(session_file, strict=False)
break
except (__HOLE__, ValueError):
continue
if session_data is None:
return None
# Find the window data corresponding with the given ID
project = _find_project_in_data(session_data, win_id) or ''
# Throw out empty project names
if re.match('.*\\.sublime-project', project) or os.path.exists(project):
return project
return None
|
IOError
|
dataset/ETHPy150Open Varriount/NimLime/nimlime_core/utils/project.py/_get_project_file
|
def get_nim_project(window, view):
"""
Given a window and view, return the Nim project associated with it.
:type window: sublime.Window
:type view: sublime.View
:rtype: str
"""
st_project = _get_project_file(window.id())
result = view.file_name()
if st_project is not None:
with open(st_project) as project_file:
data = json.loads(project_file.read())
try:
path = data['settings']['nim-project']
# Get full path
directory = os.path.dirname(st_project)
path = path.replace('/', os.sep)
result = os.path.join(directory, path)
except __HOLE__:
pass
return result
|
IOError
|
dataset/ETHPy150Open Varriount/NimLime/nimlime_core/utils/project.py/get_nim_project
|
def __new__(cls, file):
unicodeFile = False
if PY3:
try:
file.write(b"")
except __HOLE__:
unicodeFile = True
if unicodeFile:
# On Python 3 native json module outputs unicode:
_dumps = pyjson.dumps
_linebreak = u"\n"
else:
_dumps = fast_json.dumps
_linebreak = b"\n"
return PClass.__new__(
cls, file=file, _dumps=_dumps, _linebreak=_linebreak)
|
TypeError
|
dataset/ETHPy150Open ClusterHQ/eliot/eliot/_output.py/FileDestination.__new__
|
def get_userdata(self, code):
"""Returns the relevant userdata from github.
This function must be called from githun oauth callback
and the auth code must be passed as argument.
"""
try:
session = self.get_auth_session(data={'code': code})
d = session.get('user').json()
email = self.get_verified_email(session)
if not email:
logger.error("No verified email found for this user {}".format(d['login']))
return
return dict(
name=d["name"],
email=email,
username=d["login"],
github=d["login"],
service="GitHub")
except __HOLE__, e:
logger.error("failed to get user data from github. Error: %s",
str(e))
|
KeyError
|
dataset/ETHPy150Open anandology/broadgauge/broadgauge/oauth.py/GitHub.get_userdata
|
def get_userdata(self, code):
"""Returns the relevant userdata from github.
This function must be called from githun oauth callback
and the auth code must be passed as argument.
"""
try:
session = self.get_auth_session(data={'code': code},
decoder=json.loads)
d = session.get('userinfo').json()
# suggest basename of the email as username
username = d['email'].split("@")[0]
return dict(
name=d['name'],
email=d['email'],
username=username,
service='Google')
except __HOLE__, e:
logger.error("failed to get user data from google. Error: %s",
str(e), exc_info=True)
|
KeyError
|
dataset/ETHPy150Open anandology/broadgauge/broadgauge/oauth.py/Google.get_userdata
|
def get_userdata(self, code):
"""Returns the relevant userdata from github.
This function must be called from githun oauth callback
and the auth code must be passed as argument.
"""
try:
session = self.get_auth_session(
data={'code': code, 'redirect_uri': self.redirect_uri})
d = session.get('me').json()
# suggest basename of the email as username
username = d['email'].split("@")[0]
return dict(
name=d['name'],
email=d['email'],
username=username,
service='Facebook')
except __HOLE__, e:
logger.error("failed to get user data from facebook. Error: %s",
str(e), exc_info=True)
|
KeyError
|
dataset/ETHPy150Open anandology/broadgauge/broadgauge/oauth.py/Facebook.get_userdata
|
def create_model(self, origin_resource, args=None, wait_time=3, retries=10):
"""Creates a model from an origin_resource.
Uses a remote resource to create a new model using the
arguments in `args`.
The allowed remote resources can be:
- dataset
- list of datasets
- cluster
In the case of using cluster id as origin_resource, a centroid must
also be provided in the args argument. The first centroid is used
otherwise.
"""
create_args = {}
if args is not None:
create_args.update(args)
if isinstance(origin_resource, list):
# mutidatasets
create_args = self._set_create_from_datasets_args(
origin_resource, args=create_args, wait_time=wait_time,
retries=retries)
else:
resource_type = get_resource_type(origin_resource)
# model from cluster and centroid
if resource_type == CLUSTER_PATH:
cluster_id = get_cluster_id(origin_resource)
cluster = check_resource(cluster_id,
query_string=TINY_RESOURCE,
wait_time=wait_time,
retries=retries,
raise_on_error=True, api=self)
if 'centroid' not in create_args:
try:
centroid = cluster['object'][
'cluster_models'].keys()[0]
create_args.update({'centroid': centroid})
except __HOLE__:
raise KeyError("Failed to generate the model. A "
"centroid id is needed in the args "
"argument to generate a model from "
"a cluster.")
create_args.update({'cluster': cluster_id})
elif resource_type == DATASET_PATH:
create_args = self._set_create_from_datasets_args(
origin_resource, args=create_args, wait_time=wait_time,
retries=retries)
else:
raise Exception("A dataset, list of dataset ids"
" or cluster id plus centroid id are needed"
" to create a"
" dataset. %s found." % resource_type)
body = json.dumps(create_args)
return self._create(self.model_url, body)
|
KeyError
|
dataset/ETHPy150Open bigmlcom/python/bigml/modelhandler.py/ModelHandler.create_model
|
def detach(self):
if self._parent:
try:
i = self._parent.index(self)
del self._parent[i]
except __HOLE__:
pass
self._parent = None
return self
|
ValueError
|
dataset/ETHPy150Open kdart/pycopia/net/pycopia/router.py/Impairment.detach
|
@staticmethod
def _associate_pd_user(email_address, pager):
try:
user = next(pager.users.list(query=email_address, limit=1))
return user
except __HOLE__:
return None
|
StopIteration
|
dataset/ETHPy150Open skoczen/will/will/plugins/devops/pagerduty.py/PagerDutyPlugin._associate_pd_user
|
def _get_user_email_from_mention_name(self, mention_name):
try:
u = self.get_user_by_nick(mention_name[1:])
email_address = self.get_hipchat_user(u['hipchat_id'])['email']
return email_address
except __HOLE__:
return None
|
TypeError
|
dataset/ETHPy150Open skoczen/will/will/plugins/devops/pagerduty.py/PagerDutyPlugin._get_user_email_from_mention_name
|
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
)
# guardian is optional
try:
import guardian # NOQA
except __HOLE__:
pass
else:
settings.ANONYMOUS_USER_ID = -1
settings.AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
settings.INSTALLED_APPS += (
'guardian',
)
try:
import django
django.setup()
except AttributeError:
pass
|
ImportError
|
dataset/ETHPy150Open tomchristie/django-rest-framework/tests/conftest.py/pytest_configure
|
def test_create_node_invalid_disk_size(self):
image = NodeImage(
id=1, name='Ubuntu 8.10 (intrepid)', driver=self.driver)
size = NodeSize(
1, '256 slice', None, None, None, None, driver=self.driver)
location = NodeLocation(id=1, name='Europe', country='England',
driver=self.driver)
try:
self.driver.create_node(name='foo', image=image, size=size,
location=location)
except __HOLE__:
pass
else:
self.fail('Invalid disk size provided but an exception was not'
' thrown')
|
ValueError
|
dataset/ETHPy150Open apache/libcloud/libcloud/test/compute/test_voxel.py/VoxelTest.test_create_node_invalid_disk_size
|
def run(self, *args):
self.options += self.default_options()
cmd_ns = self.parser.parse_args(args)
logger.debug('Parsed command namespace: %s' % cmd_ns.__dict__)
kwargs = {}
for k, v in cmd_ns.__dict__.items():
if k in self.args:
kwargs[k] = v
try:
logger.debug('Running target:%s' % self.target)
return self.target(**kwargs)
except __HOLE__, e:
raise CommandError('Invalid command args: %s' % e)
|
TypeError
|
dataset/ETHPy150Open jmohr/compago/compago/command.py/Command.run
|
def get_values(in_file, out_file, keyword1):
"""
Created on 21 Oct 2014
Created for use with Wonderware Archestra, using the exported csv file from an object.
Lets say you exported a composite object to a csv file and you want all the short descriptions or all the tagnames, this function will get it for you.
*note that input file encoding have to be UTF-16-LE.
USAGE:
%prog in_file.csv out_file.txt %keyword
@author: Roan Fourie
@mail: [email protected]
"""
return_data = 0
str_data = ' '
write_data = ''
kw_position = -1
next_row = 0
occurences = 0
start_position = 0
count1 = 0
count2 = 0
i = 0
try:
with open(in_file, 'rt', encoding='utf-16-le') as fi:
for line in fi: #For each line do the following
str_data = line
if next_row == 1: #Means that a valid keyword was found in the previous row
i = 0
count1 = 0
count2 = 0
while i < len(str_data): #Iterate until the amount of commas is reached, we know the amount of commas from the previous row
i = i+1
if str_data[i-1] == ",": #Looks for the commas and count them with their positions in the string
count1 = count1 + 1
count2 = count2 + 1
if count1 == occurences: #The keyword begin position is reached, get the position in the string
start_position = i-1
if count2 == (occurences + 1): #The keyword end position is reached, get the position in the string
end_position = i-1
i = len(str_data)
start_position = start_position + 1 #Else the first comma is also copied for output
if kw_position <= 2: #this part for when the keyword is at the beginning without a comma in front of it
start_position = start_position - 1
print('Value = ', str_data[start_position:end_position])
write_data = str_data[start_position:end_position] + '\n' #Build the data string with a new line appended to it
with open(out_file, 'at') as fo: #Write the data string to a file
fo.seek(0,2)
fo.write(write_data)
fo.close()
next_row = 0
kw_position = str_data.find(keyword1) #Check the keyword position in the line, -1 if not found
if kw_position > -1: #If the keyword is found in the line
next_row = 1 #set the next row to be processed for the keyword
occurences = str_data.count(',', 0, kw_position) #Check how many comma's it is up to the keyword
except __HOLE__:
print("Error in reading/writing file.")
return_data = 2
else:
print('Operation completed successfully.')
return_data = 1
finally:
print("done")
return return_data
|
IOError
|
dataset/ETHPy150Open RoanFourie/ArchestrA-Tools/aaTools/aaCSV.py/get_values
|
def DestroyDatastore(self):
try:
data_store.DB.cache.Flush()
except __HOLE__:
pass
try:
if self.root_path:
shutil.rmtree(self.root_path)
except (OSError, IOError):
pass
|
AttributeError
|
dataset/ETHPy150Open google/grr/grr/lib/data_stores/sqlite_data_store_test.py/SqliteTestMixin.DestroyDatastore
|
def _render_cell(self, row, column, cell_format):
"""
Renders table cell with padding.
:param row: The row to render
:type: row: list
:param column: The column to render
:param cell_format: The cell format
:type cell_format: str
"""
try:
cell = row[column]
except __HOLE__:
cell = ''
width = self._column_widths[column]
if isinstance(cell, TableCell) and cell.colspan > 1:
# add the width of the following columns(numbers of colspan).
for next_column in range(column + 1, column + cell.colspan):
width += self._get_column_separator_width() + self._column_widths[next_column]
# Encoding fix
width += len(cell) - Helper.len(cell)
style = self.get_column_style(column)
if isinstance(cell, TableSeparator):
self._output.write(style.border_format % (style.horizontal_border_char * width))
else:
width += Helper.len(cell) - Helper.len_without_decoration(self._output.get_formatter(), cell)
content = style.cell_row_content_format % cell
self._output.write(cell_format % getattr(content, style.pad_type)(width, style.padding_char))
|
IndexError
|
dataset/ETHPy150Open sdispater/cleo/cleo/helpers/table.py/Table._render_cell
|
def _get_cell_width(self, row, column):
"""
Gets cell width.
:type row: list
:type column: int
:rtype: int
"""
try:
cell = row[column]
cell_width = Helper.len_without_decoration(self._output.get_formatter(), cell)
if isinstance(cell, TableCell) and cell.colspan > 1:
# we assume that cell value will be across more than one column.
cell_width = cell_width // cell.colspan
return cell_width
except __HOLE__:
return 0
|
IndexError
|
dataset/ETHPy150Open sdispater/cleo/cleo/helpers/table.py/Table._get_cell_width
|
def __delitem__(self, header):
try:
del self.headers[header]
except __HOLE__:
pass
|
KeyError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/http/__init__.py/HttpResponse.__delitem__
|
def check_py_syntax(src_path, dst_path):
"""Check the Python syntax."""
if not os.path.exists(src_path):
os.makedirs(dst_path)
good = []
bad = []
for file_name in glob.glob(os.path.join(src_path, '*.mochi')):
mod_name = os.path.splitext(os.path.basename(file_name))[0]
py_file_name = make_target_file_name(file_name, dst_path, 'py')
with TempDir(src_path):
try:
make_py_source_file(mochi_file_name=file_name,
python_file_name=py_file_name,
mochi_env=MONKEY_PATCH_ENV,
add_init=True, show_tokens=False)
except __HOLE__ as err:
print('#' * 30)
print('Error in module', mod_name)
print('#' * 30)
print(err)
bad.append(mod_name)
continue
with TempDir(dst_path):
try:
py_compile.compile('{}.py'.format(mod_name), doraise=True)
except Exception as err: # pylint: disable=broad-except
print('#' * 30)
print('Error in module', mod_name)
print('#' * 30)
print(err)
bad.append(mod_name)
continue
good.append(mod_name)
print('good', good)
print('bad', bad)
|
TypeError
|
dataset/ETHPy150Open i2y/mochi/tests/check_py_source.py/check_py_syntax
|
def __init__(self, client_cert=None, ca_cert=None, verify=None,
ssl_version=None, assert_hostname=None,
assert_fingerprint=None):
# Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
# TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious
# ways: https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
try:
tls_cert, tls_key = client_cert
except __HOLE__:
raise errors.TLSParameterError(
'client_config must be a tuple of'
' (client certificate, key file)'
)
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_config param'
)
self.cert = (tls_cert, tls_key)
# If verify is set, make sure the cert exists
self.verify = verify
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
'Invalid CA certificate provided for `tls_ca_cert`.'
)
|
ValueError
|
dataset/ETHPy150Open docker/docker-py/docker/tls.py/TLSConfig.__init__
|
@error_handler
def execute(request, design_id=None):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
query_type = beeswax.models.SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
try:
query_form = get_query_form(request)
if query_form.is_valid():
query_str = query_form.query.cleaned_data["query"]
explain = request.GET.get('explain', 'false').lower() == 'true'
design = save_design(request, query_form, query_type, design, False)
if query_form.query.cleaned_data['is_parameterized']:
# Parameterized query
parameterization_form_cls = make_parameterization_form(query_str)
if parameterization_form_cls:
parameterization_form = parameterization_form_cls(request.REQUEST, prefix="parameterization")
if parameterization_form.is_valid():
parameters = parameterization_form.cleaned_data
real_query = substitute_variables(query_str, parameters)
query = HQLdesign(query_form, query_type=query_type)
query._data_dict['query']['query'] = real_query
try:
if explain:
return explain_directly(request, query_server, query)
else:
return execute_directly(request, query, design, query_server, parameters=parameters)
except Exception, ex:
db = dbms.get(request.user, query_server)
error_message, log = expand_exception(ex, db)
response['message'] = error_message
return JsonResponse(response)
else:
response['errors'] = parameterization_form.errors
return JsonResponse(response)
# Non-parameterized query
query = HQLdesign(query_form, query_type=query_type)
if request.GET.get('explain', 'false').lower() == 'true':
return explain_directly(request, query_server, query)
else:
return execute_directly(request, query, design, query_server)
else:
response['message'] = _('There was an error with your query.')
response['errors'] = {
'query': [query_form.query.errors],
'settings': query_form.settings.errors,
'file_resources': query_form.file_resources.errors,
'functions': query_form.functions.errors,
}
except __HOLE__, e:
response['message']= str(e)
return JsonResponse(response)
|
RuntimeError
|
dataset/ETHPy150Open cloudera/hue/apps/beeswax/src/beeswax/api.py/execute
|
@error_handler
def save_query_design(request, design_id=None):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
app_name = get_app_name(request)
query_type = beeswax.models.SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
try:
query_form = get_query_form(request)
if query_form.is_valid():
design = save_design(request, query_form, query_type, design, True)
response['design_id'] = design.id
response['status'] = 0
else:
response['errors'] = {
'query': [query_form.query.errors],
'settings': query_form.settings.errors,
'file_resources': query_form.file_resources.errors,
'functions': query_form.functions.errors,
'saveform': query_form.saveform.errors,
}
except __HOLE__, e:
response['message'] = str(e)
return JsonResponse(response)
|
RuntimeError
|
dataset/ETHPy150Open cloudera/hue/apps/beeswax/src/beeswax/api.py/save_query_design
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.