davidschulte/ESM_claudios__cubert_ETHPy150Open_swapped_operands_datasets
Updated
•
6
function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def run(self):
"""
Called when the process intializes.
"""
logger.info('started worker')
FULL_NAMESPACE = settings.FULL_NAMESPACE
MINI_NAMESPACE = settings.MINI_NAMESPACE
MAX_RESOLUTION = settings.MAX_RESOLUTION
full_uniques = FULL_NAMESPACE + 'unique_metrics'
mini_uniques = MINI_NAMESPACE + 'unique_metrics'
pipe = self.redis_conn.pipeline()
while 1:
# Make sure Redis is up
try:
self.redis_conn.ping()
except:
logger.error('worker can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
pipe = self.redis_conn.pipeline()
continue
try:
# Get a chunk from the queue with a 15 second timeout
chunk = self.q.get(True, 15)
now = time()
for metric in chunk:
# Check if we should skip it
if self.in_skip_list(metric[0]):
continue
# Bad data coming in
if metric[1][0] < now - MAX_RESOLUTION:
continue
# Append to messagepack main namespace
key = ''.join((FULL_NAMESPACE, metric[0]))
pipe.append(key, packb(metric[1]))
pipe.sadd(full_uniques, key)
if not self.skip_mini:
# Append to mini namespace
mini_key = ''.join((MINI_NAMESPACE, metric[0]))
pipe.append(mini_key, packb(metric[1]))
pipe.sadd(mini_uniques, mini_key)
pipe.execute()
# Log progress
if self.canary:
logger.info('queue size at %d' % self.q.qsize())
self.send_graphite_metric('skyline.horizon.queue_size', self.q.qsize())
except Empty:
logger.info('worker queue is empty and timed out')
except WatchError:
logger.error(key)
except __HOLE__:
pass
except Exception as e:
logger.error("worker error: " + str(e)) | NotImplementedError | dataset/ETHPy150Open etsy/skyline/src/horizon/worker.py/Worker.run |
def create_entropies(vmx, m):
try:
default_signature = vmx.get_method_signature(m, predef_sign = DEFAULT_SIGNATURE).get_string()
l = [ default_signature,
entropy( vmx.get_method_signature(m, "L4", { "L4" : { "arguments" : ["Landroid"] } } ).get_string() ),
entropy( vmx.get_method_signature(m, "L4", { "L4" : { "arguments" : ["Ljava"] } } ).get_string() ),
entropy( vmx.get_method_signature(m, "hex" ).get_string() ),
entropy( vmx.get_method_signature(m, "L2" ).get_string() ),
]
return l
except __HOLE__:
return [ "", 0.0, 0.0, 0.0, 0.0 ] | KeyError | dataset/ETHPy150Open androguard/androguard/androguard/core/data/data.py/create_entropies |
def new_id(self, i, l):
try:
return l[i]
except __HOLE__:
l[i] = len(l)
return l[i] | KeyError | dataset/ETHPy150Open androguard/androguard/androguard/core/data/data.py/DexViewer.new_id |
@property
def storage_path(self):
try:
conan_user_home = os.getenv("CONAN_USER_HOME")
if conan_user_home:
storage = self.storage["path"]
if storage[:2] == "~/":
storage = storage[2:]
result = os.path.join(conan_user_home, storage)
else:
result = os.path.expanduser(self.storage["path"])
except __HOLE__:
result = None
result = get_env('CONAN_STORAGE_PATH', result)
return result | KeyError | dataset/ETHPy150Open conan-io/conan/conans/client/conf/__init__.py/ConanClientConfigParser.storage_path |
def run_tests(tests, test_input, timeout = 60, waittime = 0.1):
"""
Runs all the tests given by directly executing them. If they return
with a non-zero exit code, it adds them to the dictionary returned.
Keyword arguments:
tests -- a list of paths to executable test files
test_input -- dictionary of key value pairs to provide to the test
timeout -- how many seconds to let the tests run
waittime -- how many seconds to wait in between test polls
"""
failed_tests = {}
for t in tests:
log.debug("Executing test %s", t)
p = subprocess.Popen(t, stdin=subprocess.PIPE)
to_send = json.dumps(test_input)
log.debug("Sending test_input to test: %s", to_send)
# Using write here instead of communicate because communicate
# blocks on reading output
try:
p.stdin.write(to_send + os.linesep)
p.stdin.close()
countdown = int(timeout / waittime)
while countdown:
retcode = p.poll()
if retcode is not None:
break
time.sleep(waittime)
countdown = countdown - 1
else:
p.terminate()
log.warn("Test %s timed out", t)
p.wait()
retcode = 'TIMEOUT'
except __HOLE__ as e:
log.warn("Caught IOError from process: %s", e)
retcode = e
if retcode:
failed_tests[t] = retcode
log.error("Test %s failed with code %s", t, retcode)
return failed_tests | IOError | dataset/ETHPy150Open gosquadron/squadron/squadron/tests.py/run_tests |
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except __HOLE__:
GIT_REVISION = "Unknown"
return GIT_REVISION
# This is a bit hackish: we are setting a global variable so that the main
# pyfunt __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used. | OSError | dataset/ETHPy150Open dnlcrl/PyFunt/setup.py/git_version |
def setup_package():
# Rewrite the version file every time
write_version_py()
cmdclass = {}
# Figure out whether to add ``*_requires = ['numpy']``.
# We don't want to do that unconditionally, because we risk updating
# an installed numpy which fails too often. Just if it's not installed, we
# may give it a try. See gh-3379.
build_requires = []
try:
import numpy
if (len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel' and
sys.platform == 'darwin'):
# We're ony building wheels for platforms where we know there's
# also a Numpy wheel, so do this unconditionally. See gh-5184.
build_requires = ['numpy>=1.7.1']
except:
build_requires = ['numpy>=1.7.1']
metadata = dict(
name="pyfunt",
author="Daniele Ettore Ciriello",
author_email="[email protected]",
version="0.1",
license="MIT",
url="https://github.com/dnlcrl/PyFunt",
download_url="https://github.com/dnlcrl/PyFunt",
description="Pythonic Deep Learning Framework",
packages=['pyfunt', 'pyfunt/layers', 'pyfunt/utils'],
cmdclass=cmdclass, # {'build_ext': build_ext},
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
setup_requires=build_requires,
install_requires=required,
# ext_modules=extensions,
keywords='pyfunt deep learning artificial neural network convolution',
)
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scipy when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except __HOLE__:
from distutils.core import setup
else:
if (len(sys.argv) >= 2 and sys.argv[1] in ('bdist_wheel', 'bdist_egg')) or (
'develop' in sys.argv):
# bdist_wheel/bdist_egg needs setuptools
import setuptools
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
metadata['configuration'] = configuration
print 'setup complete'
setup(**metadata) | ImportError | dataset/ETHPy150Open dnlcrl/PyFunt/setup.py/setup_package |
def exclude(self, excludes):
"""Takes a list of node names and removes all nodes and their
successors from the graph.
:param excludes: list of node names
:type excludes: list of string
"""
if not excludes:
return
if not isinstance(excludes, (list, tuple)):
excludes = [excludes]
excludes = set(excludes)
# index the nodes by name
names2nodes = {}
for node in self.nodes():
if node._job.name is not None:
names2nodes[node._job.name] = node
def _recursive_remove(node, force=True):
parents = list(node.parents())
if force or len(parents) <= 1:
children = list(node.children())
map(lambda n: _recursive_remove(n, False),
children)
try:
log.info("Excluding node %s", node)
self.remove(node)
# check the children again, they might have becom invalid
for child in [c for c in children
if c._tool in self._nodes]:
try:
child._tool.validate()
except:
log.info("Forcing exclude of %s, "
"node became invalid",
child)
_recursive_remove(child)
except __HOLE__:
## ignore errors where the node was already removed
pass
for name in excludes:
if not name in names2nodes:
log.warn("Node marked for exclusing not found: %s", name)
else:
if isinstance(name, basestring) and not name in names2nodes:
node = names2nodes[name]
else:
node = name
_recursive_remove(names2nodes[name])
self._update_cleanup_nodes() | KeyError | dataset/ETHPy150Open thasso/pyjip/jip/pipelines.py/Pipeline.exclude |
def download(uri, filename=None, headers=None, redirect_limit=5, **kwargs):
""" Get a remote resource and save the content to a local file. If a local
file already exists at the download destination, the modification time of
that file is sent in the request headers as the `If-Modified-Since` value
to perform a conditional GET. If the remote file has not been modified, a
304 response should then be returned.
:param uri:
:param filename:
:param headers:
:param redirect_limit:
:param kwargs:
:return:
"""
from datetime import datetime
from os import utime
from os.path import getmtime
from time import time
from .tardis import datetime_to_timestamp
try:
last_modified = datetime.fromtimestamp(getmtime(filename))
except __HOLE__:
last_modified = None
with get(uri, if_modified_since=last_modified, headers=headers, redirect_limit=redirect_limit, **kwargs) as source:
if source.status_code == 200:
if not filename:
filename = source.filename
with open(filename, "wb") as destination:
finished = False
while not finished:
data = source.read(8192)
if data:
destination.write(data)
else:
finished = True
utime(filename, (time(), datetime_to_timestamp(source.last_modified)))
return True
elif source.status_code == 304:
return False
else:
raise ValueError("Unexpected status code %s from download response" % source.status_code) | OSError | dataset/ETHPy150Open nigelsmall/py2neo/py2neo/packages/httpstream/__init__.py/download |
def ReadFile(filename, logger=None):
"""Read the contents of the file.
An optional logger can be specified to emit messages to your favorite logging
stream. If specified, then no exception is raised. This is external so that it
can be used by third-party applications.
Arguments:
filename: (unicode) The name of the file.
logger: (function) A function or lambda that takes a string and emits it.
Returns:
The contents of filename.
Raises:
IOError: raised if there was an error reading the file.
"""
try:
with open(filename, 'rb') as fd:
encoding = tokenize.detect_encoding(fd.readline)[0]
except __HOLE__ as err:
if logger:
logger(err)
raise
try:
with py3compat.open_with_encoding(filename,
mode='r',
encoding=encoding) as fd:
source = fd.read()
return source, encoding
except IOError as err:
if logger:
logger(err)
raise | IOError | dataset/ETHPy150Open google/yapf/yapf/yapflib/yapf_api.py/ReadFile |
@view(path=r'^themes/?$')
def index(request):
order = request.GET.get('order', '')
if not order in ['downloads', 'date']:
order = 'downloads'
nameFilter = request.GET.get('filter', '')
try:
page = int(request.GET.get('page', '1'))
except __HOLE__:
page = 1
themes, page, pagesCount = get_page(order, nameFilter, page - 1)
return render(request, 'themes/index.html', {
'themes': themes,
'order': order,
'filter': nameFilter,
'page': page + 1,
'pages': range(1, pagesCount + 1),
'nextPage': page + 2 if page < pagesCount - 1 else 0,
'adPlace': themes[random.randint(0, len(themes) - 1)]['id'] if len(themes) > 0 else -1,
'emptyList': len(themes) == 0
}) | ValueError | dataset/ETHPy150Open y-a-r-g/idea-color-themes/backend/views/themes.py/index |
def payment_successful_handler(sender, **kwargs):
if sender.payment_status == "Completed":
try:
token = ShoppingToken.objects.get(value=sender.invoice)
token.payed = True
token.save()
except __HOLE__:
pass | ObjectDoesNotExist | dataset/ETHPy150Open y-a-r-g/idea-color-themes/backend/views/themes.py/payment_successful_handler |
def test_numparser_strict():
parsenumber = numparser(strict=True)
assert parsenumber('1') == 1
assert parsenumber('1.0') == 1.0
assert parsenumber(str(maxint + 1)) == maxint + 1
assert parsenumber('3+4j') == 3 + 4j
try:
parsenumber('aaa')
except __HOLE__:
pass # expected
else:
assert False, 'expected exception'
try:
parsenumber(None)
except TypeError:
pass # expected
else:
assert False, 'expected exception' | ValueError | dataset/ETHPy150Open alimanfoo/petl/petl/test/util/test_parsers.py/test_numparser_strict |
def test_laxparsers():
p1 = datetimeparser('%Y-%m-%dT%H:%M:%S')
try:
p1('2002-12-25 00:00:00')
except ValueError:
pass
else:
assert False, 'expected exception'
p2 = datetimeparser('%Y-%m-%dT%H:%M:%S', strict=False)
try:
v = p2('2002-12-25 00:00:00')
except __HOLE__:
assert False, 'did not expect exception'
else:
eq_('2002-12-25 00:00:00', v) | ValueError | dataset/ETHPy150Open alimanfoo/petl/petl/test/util/test_parsers.py/test_laxparsers |
def logout(self, request):
try:
request.user.auth_token.delete()
except (__HOLE__, ObjectDoesNotExist):
pass
logout(request)
return Response({"success": _("Successfully logged out.")},
status=status.HTTP_200_OK) | AttributeError | dataset/ETHPy150Open Tivix/django-rest-auth/rest_auth/views.py/LogoutView.logout |
def search_external_subtitles(path, directory=None):
"""Search for external subtitles from a video `path` and their associated language.
Unless `directory` is provided, search will be made in the same directory as the video file.
:param str path: path to the video.
:param str directory: directory to search for subtitles.
:return: found subtitles with their languages.
:rtype: dict
"""
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
subtitles = {}
for p in os.listdir(directory or dirpath):
# keep only valid subtitle filenames
if not p.startswith(fileroot) or not p.endswith(SUBTITLE_EXTENSIONS):
continue
# extract the potential language code
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:]
# default language is undefined
language = Language('und')
# attempt to parse
if language_code:
try:
language = Language.fromietf(language_code)
except __HOLE__:
logger.error('Cannot parse language code %r', language_code)
subtitles[p] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles | ValueError | dataset/ETHPy150Open Diaoul/subliminal/subliminal/video.py/search_external_subtitles |
def scan_videos(path, subtitles=True, embedded_subtitles=True, subtitles_dir=None):
"""Scan `path` for videos and their subtitles.
:param str path: existing directory path to scan.
:param bool subtitles: scan for subtitles with the same name.
:param bool embedded_subtitles: scan for embedded subtitles.
:param str subtitles_dir: directory to search for subtitles.
:return: the scanned videos.
:rtype: list of :class:`Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
# walk the path
videos = []
for dirpath, dirnames, filenames in os.walk(path):
logger.debug('Walking directory %s', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
# filter on videos
if not filename.endswith(VIDEO_EXTENSIONS):
continue
# skip hidden files
if filename.startswith('.'):
logger.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
# skip links
if os.path.islink(filepath):
logger.debug('Skipping link %r in %r', filename, dirpath)
continue
# scan video
try:
video = scan_video(filepath, subtitles=subtitles, embedded_subtitles=embedded_subtitles,
subtitles_dir=subtitles_dir)
except __HOLE__: # pragma: no cover
logger.exception('Error scanning video')
continue
videos.append(video)
return videos | ValueError | dataset/ETHPy150Open Diaoul/subliminal/subliminal/video.py/scan_videos |
def _ParseQueueYaml(self):
"""Loads the queue.yaml file and parses it.
Returns:
None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object
populated from the queue.yaml.
"""
if hasattr(self, 'queue_yaml_parser'):
return self.queue_yaml_parser(self._root_path)
if self._root_path is None:
return None
for queueyaml in ('queue.yaml', 'queue.yml'):
try:
path = os.path.join(self._root_path, queueyaml)
modified = os.stat(path).st_mtime
if self._yaml_last_modified and self._yaml_last_modified == modified:
return self._last_queue_info
fh = open(path, 'r')
except (IOError, __HOLE__):
continue
try:
queue_info = queueinfo.LoadSingleQueue(fh)
self._last_queue_info = queue_info
self._yaml_last_modified = modified
return queue_info
finally:
fh.close()
return None | OSError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/taskqueue/taskqueue_stub.py/TaskQueueServiceStub._ParseQueueYaml |
def add_image_info_cb(self, viewer, channel, image_info):
"""Almost the same as add_image_info(), except that the image
may not be loaded in memory.
"""
chname = channel.name
name = image_info.name
self.logger.debug("name=%s" % (name))
# Updates of any extant information
try:
image = channel.get_loaded_image(name)
except __HOLE__:
# images that are not yet loaded will show "N/A" for keywords
image = None
self.add_image_cb(viewer, chname, image, image_info) | KeyError | dataset/ETHPy150Open ejeschke/ginga/ginga/misc/plugins/Contents.py/Contents.add_image_info_cb |
def asm(chrom, start, end, bamfilename, reffile, kmersize, tmpdir, mutid='null', debug=False):
bamfile = pysam.Samfile(bamfilename,'rb')
matefile = pysam.Samfile(bamfilename,'rb')
readpairs = {}
nreads = 0
ndisc = 0 # track discordant reads
rquals = []
mquals = []
for read in bamfile.fetch(chrom,start,end):
if not read.mate_is_unmapped and read.is_paired:
try:
mate = matefile.mate(read)
readpairs[read.qname] = ReadPair(read,mate)
nreads += 1
if not read.is_proper_pair:
ndisc += 1
if nreads % 1000 == 0:
print "INFO\t" + now() + "\t" + mutid + "\tfound mates for", nreads, "reads,", float(ndisc)/float(nreads), "discordant."
if read.is_read1:
if read.is_reverse:
rquals.append(read.qual[::-1])
mquals.append(mate.qual)
else:
rquals.append(read.qual)
mquals.append(mate.qual[::-1])
else:
if read.is_reverse:
rquals.append(mate.qual)
mquals.append(read.qual[::-1])
else:
rquals.append(mate.qual[::-1])
mquals.append(read.qual)
except __HOLE__:
sys.stderr.write("WARN\t" + now() + "\t" + mutid + "\tcannot find mate for read marked paired: " + read.qname + "\n")
sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tfound " + str(nreads) + " reads in region.\n")
if nreads == 0:
return []
refseq = None
if reffile:
refseq = reffile.fetch(chrom,start,end)
region = "%s:%d-%d" % (chrom, start, end)
contigs = runVelvet(readpairs, region, refseq, kmersize, tmpdir, cov_cutoff=True, mutid=mutid, debug=debug)
newcontigs = None
for contig in contigs:
contig.rquals = rquals
contig.mquals = mquals
return contigs | ValueError | dataset/ETHPy150Open adamewing/bamsurgeon/bamsurgeon/asmregion.py/asm |
def circuit_built(self, circuit):
"""ICircuitListener API"""
# older tor versions will have empty build_flags
if 'ONEHOP_TUNNEL' in circuit.build_flags:
return
if circuit.purpose == 'GENERAL':
if len(circuit.path) > 0:
if circuit.path[0] not in self.state.entry_guards.values():
print "WEIRD: first circuit hop not in entry guards:",
print circuit, circuit.path, circuit.purpose
return
self.built_circuits += 1
self.update_percent()
if len(circuit.path) != 3 and len(circuit.path) != 4:
print "WEIRD: circuit has odd pathlength:",
print circuit, circuit.path
try:
self.per_guard_built[circuit.path[0].unique_name] += 1.0
except __HOLE__:
self.per_guard_built[circuit.path[0].unique_name] = 1.0
self.per_guard_failed[circuit.path[0].unique_name] = 0.0 | KeyError | dataset/ETHPy150Open meejah/txtorcon/examples/circuit_failure_rates.py/CircuitFailureWatcher.circuit_built |
def circuit_failed(self, circuit, kw):
"""ICircuitListener API"""
if kw['REASON'] != 'MEASUREMENT_EXPIRED':
return
# older tor versions will have empty build_flags
if 'ONEHOP_TUNNEL' in circuit.build_flags:
return
if circuit.purpose == 'GENERAL':
if len(circuit.path) > 1:
if circuit.path[0] not in self.state.entry_guards.values():
# note that single-hop circuits are built for various
# internal reasons (and it seems they somtimes use
# GENERAL anyway)
print "WEIRD: first circuit hop not in entry guards:",
print circuit, circuit.path
return
self.failed_circuits += 1
print "failed", circuit.id
if circuit.id not in self.failed_circuit_ids:
self.failed_circuit_ids.append(circuit.id)
else:
print "WARNING: duplicate message for", circuit
if len(circuit.path) > 0:
try:
self.per_guard_failed[circuit.path[0].unique_name] += 1.0
except __HOLE__:
self.per_guard_failed[circuit.path[0].unique_name] = 1.0
self.per_guard_built[circuit.path[0].unique_name] = 0.0
self.update_percent() | KeyError | dataset/ETHPy150Open meejah/txtorcon/examples/circuit_failure_rates.py/CircuitFailureWatcher.circuit_failed |
@nif_blueprint.route('/', methods=['POST', 'GET'])
def home():
try:
params = get_params(request)
algo = params.get("algorithm", None)
specific_params = current_app.senpy.parameters(algo)
logger.debug(
"Specific params: %s", json.dumps(specific_params, indent=4))
params.update(get_params(request, specific_params))
response = current_app.senpy.analyse(**params)
in_headers = params["inHeaders"] != "0"
return response.flask(in_headers=in_headers)
except __HOLE__ as ex:
return ex.message.flask() | ValueError | dataset/ETHPy150Open gsi-upm/senpy/senpy/blueprints.py/home |
def automoderate(instance, user):
'''
Auto moderates given model instance on user. Returns moderation status:
0 - Rejected
1 - Approved
'''
try:
status = instance.moderated_object.automoderate(user)
except __HOLE__:
msg = "%s has been registered with Moderation." % instance.__class__
raise RegistrationError(msg)
return status | AttributeError | dataset/ETHPy150Open dominno/django-moderation/moderation/helpers.py/automoderate |
def import_moderator(app):
'''
Import moderator module and register all models it contains with moderation
'''
from django.utils.importlib import import_module
import imp
try:
app_path = import_module(app).__path__
except __HOLE__:
return None
try:
imp.find_module('moderator', app_path)
except ImportError:
return None
module = import_module("%s.moderator" % app)
return module | AttributeError | dataset/ETHPy150Open dominno/django-moderation/moderation/helpers.py/import_moderator |
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = str(self.raw_requestline, 'iso-8859-1')
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, __HOLE__):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive.
try:
self.headers = http_client.parse_headers(self.rfile,
_class=self.MessageClass)
except http_client.LineTooLong:
self.send_error(400, "Line too long")
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True | IndexError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/http/server.py/BaseHTTPRequestHandler.parse_request |
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
shortmsg, longmsg = self.responses[code]
except __HOLE__:
shortmsg, longmsg = '???', '???'
if message is None:
message = shortmsg
explain = longmsg
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content.encode('UTF-8', 'replace')) | KeyError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/http/server.py/BaseHTTPRequestHandler.send_error |
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except __HOLE__:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f | IOError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/http/server.py/SimpleHTTPRequestHandler.send_head |
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except __HOLE__:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(x[2] for x in pwd.getpwall())
return nobody | ImportError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/http/server.py/nobody_uid |
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if self.have_fork or not ispy:
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = copy.deepcopy(os.environ)
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib_parse.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.get("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = authorization[1].encode('ascii')
if utils.PY3:
# In Py3.3, was:
authorization = base64.decodebytes(authorization).\
decode('ascii')
else:
# Backport to Py2.7:
authorization = base64.decodestring(authorization).\
decode('ascii')
except (binascii.Error, UnicodeError):
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.get('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.get('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.get_all('cookie', []))
cookie_str = ', '.join(co)
if cookie_str:
env['HTTP_COOKIE'] = cookie_str
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
self.flush_headers()
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non-Unix -- use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, __HOLE__):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env = env
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
p.stderr.close()
p.stdout.close()
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK") | ValueError | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/http/server.py/CGIHTTPRequestHandler.run_cgi |
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0", port=8000):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
try:
httpd.serve_forever()
except __HOLE__:
print("\nKeyboard interrupt received, exiting.")
httpd.server_close()
sys.exit(0) | KeyboardInterrupt | dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/http/server.py/test |
def get_object_from_identifier(identifier, valid=None):
""" Helper function to resolve an item identifier
into a model instance.
Raises StoreException if the identifier is invalid
or the requested Model could not be found
Raises <Model>.DoesNotExist if the object lookup fails
Arguments (optional):
valid
One or more Django model classes to compare the
returned model instance to.
"""
try:
model_str, pk = identifier.split('__')
except __HOLE__:
raise StoreException('Invalid identifier string')
Model = get_model(*model_str.split('.'))
if Model is None:
raise StoreException('Model from identifier string "%s" not found' % model_str)
if valid is not None:
if not isinstance(valid, (list, tuple) ):
valid = (valid,)
if Model not in valid:
raise StoreException('Model type mismatch')
# This will raise Model.DoesNotExist if lookup fails
return Model._default_manager.get(pk=pk) | ValueError | dataset/ETHPy150Open klipstein/dojango/dojango/data/modelstore/utils.py/get_object_from_identifier |
def get_fields_and_servicemethods(bases, attrs, include_bases=True):
""" This function was pilfered (and slightly modified) from django/forms/forms.py
See the original function for doc and comments.
"""
fields = [ (field_name, attrs.pop(field_name)) for \
field_name, obj in attrs.items() if isinstance(obj, StoreField)]
# Get the method name directly from the __servicemethod__ dict
# as set by the decorator
methods = [ (method.__servicemethod__['name'], method) for \
method in attrs.values() if hasattr(method, '__servicemethod__') ]
if include_bases:
for base in bases[::-1]:
# Grab the fields and servicemethods from the base classes
try:
fields = base.fields.items() + fields
except AttributeError:
pass
try:
methods = base.servicemethods.items() + methods
except __HOLE__:
pass
return SortedDict(fields), SortedDict(methods) | AttributeError | dataset/ETHPy150Open klipstein/dojango/dojango/data/modelstore/utils.py/get_fields_and_servicemethods |
def GetCaParameters(settings, ca_id=0, omit_server_private_key=False):
"""Get ca/cert parameters for CA named ca_id.
Note, subtle: If no ca_id value is supplied, the default value from
settings.CA_ID is used for the ca_id. This value might make the chosen
parameters be NOT from the defaults (no prefix on the settings names).
However, if None value is supplied for ca_id then the CA_ID of default
(no specific CA_ID specified) is used. See the table below for examples.
ca_id argument settings settings
------------------ ------------ -------------------------------
ca_id (unspecified) CA_ID="FOO" uses: FOO_CA_PUBLIC_CERT_PEM
ca_id (unspecified) CA_ID=None uses: CA_PUBLIC_CERT_PEM
ca_id=None CA_ID=None uses: CA_PUBLIC_CERT_PEM
ca_id=None CA_ID="FOO" uses: CA_PUBLIC_CERT_PEM
ca_id="BAR" CA_ID=None uses: BAR_CA_PUBLIC_CERT_PEM
ca_id="BAR" CA_ID="FOO" uses: BAR_CA_PUBLIC_CERT_PEM
Args:
settings: object with attribute level access to settings parameters.
ca_id: str or None (default), identifies the CA/server cert/keys.
omit_server_private_key: bool, True to omit the server's private key, for
use when calling from clients. Default False, which includes the key.
Returns:
CaParameters instance.
Raises:
CaIdError: if any errors occur loading keys/certs for ca_id
"""
if ca_id is 0:
ca_id = GetCaId(settings)
if ca_id is not None and not CA_ID_RE.match(ca_id):
raise CaParametersError('invalid ca_id')
settings_params = [
L_CA_PUBLIC_CERT_PEM,
L_SERVER_PUBLIC_CERT_PEM,
L_REQUIRED_ISSUER,
]
optional_params = []
if not omit_server_private_key:
settings_params.append(L_SERVER_PRIVATE_KEY_PEM)
optional_params.append(L_SERVER_PRIVATE_KEY_PEM)
ca_params = CaParameters()
try:
for settings_name in settings_params:
if ca_id:
settings_k = '%s_%s' % (ca_id, settings_name)
else:
settings_k = settings_name
param_k = settings_name.lower()
try:
v = getattr(settings, settings_k)
except __HOLE__:
if settings_name in optional_params:
v = None
else:
raise
setattr(ca_params, param_k, v)
except (AttributeError, ValueError), e:
logging.critical(str(e))
logging.exception(str(e))
raise CaParametersError(str(e))
ca_params.ca_id = ca_id
return ca_params | AttributeError | dataset/ETHPy150Open google/simian/src/simian/auth/util.py/GetCaParameters |
def setup_config(config_dir):
""" Setup configuration directory. """
config_dir = os.path.abspath(config_dir)
if not os.path.isdir(config_dir):
try:
os.mkdir(config_dir)
except OSError as err:
print('Polyglot could not create configuration directory.')
print(repr(err))
sys.exit(1)
if not os.path.isdir(os.path.join(config_dir, 'node_servers')):
try:
os.mkdir(os.path.join(config_dir, 'node_servers'))
except __HOLE__ as err:
print('Polyglot could not create user node server directory.')
print(repr(err))
sys.exit(1)
return config_dir | OSError | dataset/ETHPy150Open UniversalDevicesInc/Polyglot/polyglot/__main__.py/setup_config |
def run_ginkgo():
parser = argparse.ArgumentParser(prog="ginkgo", add_help=False)
parser.add_argument("-v", "--version",
action="version", version="%(prog)s {}".format(ginkgo.__version__))
parser.add_argument("-h", "--help", action="store_true", help="""
show program's help text and exit
""".strip())
parser.add_argument("-d", "--daemonize", action="store_true", help="""
daemonize the service process
""".strip())
parser.add_argument("target", nargs='?', help="""
service class path to run (modulename.ServiceClass) or
configuration file path to use (/path/to/config.py)
""".strip())
args = parser.parse_args()
if args.help:
parser.print_help()
if args.target:
print# blank line
try:
app = setup_process(args.target)
app.config.print_help()
except __HOLE__, e:
parser.error(e)
else:
if args.target:
try:
ControlInterface().start(args.target, args.daemonize)
except RuntimeError, e:
parser.error(e)
else:
parser.print_usage() | RuntimeError | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/run_ginkgo |
def run_ginkgoctl():
parser = argparse.ArgumentParser(prog="ginkgoctl")
parser.add_argument("-v", "--version",
action="version", version="%(prog)s {}".format(ginkgo.__version__))
parser.add_argument("-p", "--pid", help="""
pid or pidfile to use instead of target
""".strip())
parser.add_argument("target", nargs='?', help="""
service class path to use (modulename.ServiceClass) or
configuration file path to use (/path/to/config.py)
""".strip())
parser.add_argument("action",
choices="start stop restart reload status log logtail".split())
args = parser.parse_args()
if args.pid and args.target:
parser.error("You cannot specify both a target and a pid")
try:
if args.action in "start restart log logtail".split():
if not args.target:
parser.error("You need to specify a target for {}".format(args.action))
getattr(ControlInterface(), args.action)(args.target)
else:
getattr(ControlInterface(), args.action)(resolve_pid(args.pid, args.target))
except __HOLE__, e:
parser.error(e) | RuntimeError | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/run_ginkgoctl |
def load_class(class_path):
if '.' not in class_path:
raise RuntimeError("Invalid class path")
module_name, class_name = class_path.rsplit('.', 1)
try:
try:
module = runpy.run_module(module_name)
except ImportError:
module = runpy.run_module(module_name + ".__init__")
except ImportError, e:
import traceback, pkgutil
tb_tups = traceback.extract_tb(sys.exc_info()[2])
if pkgutil.__file__.startswith(tb_tups[-1][0]):
# If the bottommost frame in our stack was in pkgutil,
# then we can safely say that this ImportError occurred
# because the top level class path was not found.
raise RuntimeError("Unable to load class path: {}:\n{}".format(
class_path, e))
else:
# If the ImportError occurred further down,
# raise original exception.
raise
try:
return module[class_name]
except __HOLE__, e:
raise RuntimeError("Unable to find class in module: {}".format(
class_path)) | KeyError | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/load_class |
def resolve_target(target):
if target.endswith('.py'):
if os.path.exists(target):
config = ginkgo.settings.load_file(target)
try:
return config['service']
except __HOLE__:
raise RuntimeError(
"Configuration does not specify a service factory")
else:
raise RuntimeError(
'Configuration file %s does not exist' % target)
else:
return target | KeyError | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/resolve_target |
def start(self, target, daemonize=True):
print "Starting process with {}...".format(target)
app = setup_process(target, daemonize)
try:
app.serve_forever()
except __HOLE__:
pass
finally:
app.stop() | KeyboardInterrupt | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/ControlInterface.start |
def _validate(self, pid):
try:
os.kill(pid, 0)
return pid
except (__HOLE__, TypeError):
print "Process is NOT running." | OSError | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/ControlInterface._validate |
def logtail(self, target):
try:
app = setup_process(target)
app.logger.tail_log()
except __HOLE__:
pass | KeyboardInterrupt | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/ControlInterface.logtail |
def do_reload(self):
try:
self.config.reload_file()
self.logger.load_config()
except __HOLE__, e:
logger.warn(e) | RuntimeError | dataset/ETHPy150Open progrium/ginkgo/ginkgo/runner.py/Process.do_reload |
def make_all(self, profiler=None, input_storage=None,
output_storage=None, storage_map=None):
# can't import at toplevel because of circular import TODO:
# don't do this ugly hacky way of setting the
# filter_checks_isfinite
from theano.tensor import TensorType # to set filter_check_isfinite
fgraph = self.fgraph
input_storage_ = input_storage
output_storage_ = output_storage
# Compute a topological ordering that IGNORES the destroy_map
# of destructive Ops. This will be OK, because every thunk is
# evaluated on a copy of its input.
fgraph_equiv = fgraph.equivalence_tracker
order_outputs = copy.copy(fgraph_equiv.all_variables_ever)
del fgraph_equiv
order_outputs.reverse()
order = graph.io_toposort(fgraph.inputs, order_outputs)
# an ordering of just the active nodes
active_order = self.schedule(fgraph)
active_order_set = set(active_order)
# Disable no_recycling, in order to be able to use
# check_preallocated_output even on the output of the function.
# no_recycling in individual thunks does not really matter, since
# the function's outputs will always be freshly allocated.
no_recycling = []
input_storage, output_storage, storage_map = link.map_storage(
fgraph, order, input_storage_, output_storage_, storage_map)
thunks_py = [] # python thunks
thunks_c = [] # c thunks
for node in order:
compute_map = {}
for k in node.inputs:
compute_map[k] = [True]
for k in node.outputs:
compute_map[k] = [False]
# Some Ops define a make_thunk with the expectation that
# it will be called before the C code is compiled, because
# the compilation of some dependency is triggered there.
thunk_other = None
if (get_unbound_function(node.op.make_thunk) not in
default_make_thunk):
thunk = node.op.make_thunk(node,
storage_map,
compute_map,
no_recycling)
thunk.inputs = [storage_map[v] for v in node.inputs]
thunk.outputs = [storage_map[v] for v in node.outputs]
thunk_other = thunk
else:
new_node = node.op.prepare_node(node, storage_map, compute_map)
if new_node is not None:
node = new_node
debug = hasattr(node.op, 'debug_perform')
try:
if not self.maker.mode.check_c_code or debug:
raise utils.MethodNotDefined()
# Ops that do not inherit from gof.op.Op don't have certain
# methods defined that the CLinker expects (Scan is an
# example, ifelse is another of such classes that inherit
# directly from PureOp)
if not isinstance(node.op, gof.op.Op):
raise utils.MethodNotDefined()
thunk = node.op.make_c_thunk(node, storage_map, compute_map,
no_recycling)
thunks_c.append(thunk)
except (NotImplementedError, utils.MethodNotDefined):
thunks_c.append(None)
# Pure ops don't really have a perform ( or their perform just
# raises an not implemented exception), so in those cases we
# consider that we don't have a python implementation
if (((self.maker.mode.check_py_code or thunks_c[-1] is None) and
node.op.perform.__code__ != gof.op.PureOp.perform.__code__) or
debug):
thunk = node.op.make_py_thunk(node, storage_map, compute_map,
no_recycling, debug=debug)
thunks_py.append(thunk)
else:
thunks_py.append(None)
if not self.maker.mode.check_c_code and thunks_py[-1] is None:
_logger.warn("Op %s doesn't have a perform, "
"forcing check of the C code" % node.op)
thunk = node.op.make_c_thunk(node, storage_map, compute_map,
no_recycling)
thunks_c[-1] = thunk
# If the op defined its own make_thunk, use the generated thunk
if thunk_other is not None:
if thunks_py[-1] is None:
thunks_py[-1] = thunk_other
elif thunks_c[-1] is None:
thunks_c[-1] = thunk_other
else:
_logger.warn("We won't check the perform function "
"of node '%s' but we will check its "
"make_thunk function" % node)
thunks_py[-1] = thunk_other
# Use self.no_recycling (that was passed in accept()) to always
# use new memory storage when it is needed, in particular for the
# function's outputs. no_recycling_map will be used in f() below.
if self.no_recycling is True:
no_recycling_map = list(storage_map.values())
no_recycling_map = utils.difference(no_recycling_map,
input_storage)
else:
no_recycling_map = [storage_map[r] for r in self.no_recycling
if r not in fgraph.inputs]
# Precompute some things for storage pre-allocation
def_val = int(config.unittests.rseed)
#####
# This is the function that runs when you evaluate the graph
#####
def f():
####
# Note: `f` ignores the compute_map and evaluates the nodes in
# topological order. In some sense, this is ok, and can be used
# for now.
#####
_logger.debug("starting a DebugMode call")
_logger.debug("self.maker.mode.check_preallocated_output: %s",
self.maker.mode.check_preallocated_output)
for x in no_recycling_map:
x[0] = None
# nest all this in try-finally to put storage *back* into
# storage_map when an exception is raised
original_storage_map_keys = [r for r in storage_map
if r.owner is None]
try:
# r_vals are the true values associated with each
# variable in the graph they should not change during
# the evaluation of this function, even when the graph
# has destructive ops in it
#
# This dictionary is used to populate the storage_map
# as necessary
r_vals = {}
# dr_vals are the values taken by variables after
# being destroyed
dr_vals = {}
assert len(thunks_py) == len(order)
# transfer the initial values from the storage_map to
# the r_vals
_logger.debug("DEBUGMODE: transfer initial values")
# r_vals_initialized keeps track of the values that have
# actually been transferred from storage_map to r_vals
r_vals_initialized = []
for r in storage_map:
if (r.owner is None):
if not r.type.is_valid_value(storage_map[r][0]):
# None may be a valid input value (for instance,
# for a Generic object). We only want to raise
# an error if it is not valid.
if (storage_map[r][0] is None):
raise InvalidValueError(
r, storage_map[r][0],
hint=("Graph Input '%s' is missing" %
str(r)))
raise InvalidValueError(
r, storage_map[r][0],
hint=("Graph Input '%s' has invalid value "
"%s" % (r, storage_map[r][0])))
r_vals[r] = storage_map[r][0]
storage_map[r][0] = None
r_vals_initialized.append(r)
# store preallocated outputs in another map, and test
# the thunks on them as output storages.
init_outputs = {}
for r in storage_map:
if r in fgraph.outputs:
if storage_map[r][0] is not None:
init_outputs[r] = storage_map[r][0]
storage_map[r][0] = None
#####
# Precondition: the storage map is empty, transferred
# completely to r_vals
#####
for r, s in iteritems(storage_map):
if s[0] is not None:
print(r, s)
assert s[0] is None
# try:
# compute the value of all variables
for i, (thunk_py, thunk_c, node) in enumerate(zip(thunks_py,
thunks_c,
order)):
_logger.debug("%i - starting node %i %s", i, i, node)
# put a copy of each input into the storage_map
# also, check that inputs have valid values
for r in node.inputs:
assert isinstance(r, gof.Variable)
assert r in r_vals
storage_map[r][0] = _lessbroken_deepcopy(r_vals[r])
if not r.type.is_valid_value(storage_map[r][0]):
raise InvalidValueError(r, storage_map[r][0],
client_node=node)
# On the first call to thunk_py(), its output
# storage will be None
if thunk_py:
_logger.debug("%i - running thunk_py with None as "
"output storage", i)
try:
thunk_py()
except (utils.MethodNotDefined, __HOLE__):
# shouldn't have put it into the list in
# the first place
thunk_py = None
thunks_py[i] = None
except Exception as e:
# I think that only 1 optimization can
# insert a given apply node. If that is not True,
# we would need to loop over all node outputs,
# But this make the output uglier.
reason = fgraph.equivalence_tracker.reasons[
node.outputs[0]]
if not reason:
raise
opt = str(reason[0][0])
msg = (
"An optimization (probably %s) inserted an "
"apply node that raise an error." % opt +
"\nThe information we have about this "
"optimizations is:" + str(reason[0][1]) +
"\n" + reason[0][2] +
"\n\nThe original exception: \n" + str(e))
new_e = e.__class__(msg)
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = new_e
raise_with_op(node, thunk_c,
(exc_type, exc_value, exc_trace))
if thunk_py:
# check output values for type-correctness
for r in node.outputs:
if not r.type.is_valid_value(storage_map[r][0]):
hint2 = r.type.value_validity_msg(
storage_map[r][0])
raise InvalidValueError(r, storage_map[r][0],
hint='perform output',
specific_hint=hint2)
warn_inp = config.DebugMode.warn_input_not_reused
py_inplace_outs = _check_inputs(
node, storage_map, r_vals, dr_vals,
active_order_set,
clobber_dr_vals=True, perform='py',
warn_input_not_reused=warn_inp)
_check_viewmap(node, storage_map)
# Retrieve each output from the storage_map.
# The return values of this first run will be
# the reference ones
for r in node.outputs:
assert r not in r_vals
r_vals[r] = storage_map[r][0]
# clear the storage_map of outputs for the thunk_c
storage_map[r][0] = None
if self.maker.mode.check_preallocated_output:
prealloc_modes = \
self.maker.mode.check_preallocated_output
_logger.debug(
'%i - calling _check_preallocated_output '
'with thunk_py', i)
_check_preallocated_output(
node=node,
thunk=thunk_py,
prealloc_modes=prealloc_modes,
def_val=def_val,
storage_map=storage_map,
r_vals=r_vals,
dr_vals=dr_vals,
perform='py',
active_order_set=active_order_set,
inplace_outs=py_inplace_outs,
init_outputs=init_outputs)
sys.stdout.flush()
if thunk_c:
clobber = True
if thunk_py:
dmap = getattr(node.op, 'destroy_map', {})
vmap = getattr(node.op, 'view_map', {})
for i, r in enumerate(node.inputs):
# if thunk_py ran, and we still got
# this far, it means that the
# destroy_map of the Op (and view_map)
# are accurate so we can assume that
# inputs not marked as destroyed have
# in fact not been destroyed.
# Therefore... we only need to
# overwrite inputs that *have* been
# marked as destroyed. Inputs marked
# as viewd are unsafe too, because the
# corresponding output can be
# destroyed.
if any(i in v for v in chain(dmap.values(),
vmap.values())):
storage_map[r][0] = _lessbroken_deepcopy(
r_vals[r])
clobber = False
_logger.debug("%i - running thunk_c", i)
# First time, with None in output_storage
try:
thunk_c()
except Exception as e:
# I think that only 1 optimization can
# insert a given apply node. If that is not True,
# we would need to loop over all node outputs,
# But this make the output uglier.
reason = fgraph.equivalence_tracker.reasons[
node.outputs[0]]
if not reason:
raise
opt = str(reason[0][0])
msg = (
"An optimization (probably %s) inserted "
"an apply node that raise an error." % opt +
"\nThe information we have about this "
"optimizations is:" + str(reason[0][1]) +
"\n" + reason[0][2] +
"\n\nThe original exception: \n" + str(e))
new_e = e.__class__(msg)
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = new_e
raise_with_op(node, thunk_c,
(exc_type, exc_value, exc_trace))
for r in node.outputs:
# check output values for type-correctness
if not r.type.is_valid_value(storage_map[r][0]):
raise InvalidValueError(r, storage_map[r][0],
hint='c output')
if thunk_py:
# because we put it in during the
# thunk_py branch
assert r in r_vals
# check for stride correctness (may
# raise exception)
_check_strides_match(
r_vals[r], storage_map[r][0],
self.maker.mode.require_matching_strides,
node.op)
warn_inp = config.DebugMode.warn_input_not_reused
c_inplace_outs = _check_inputs(
node, storage_map, r_vals,
dr_vals, active_order_set,
clobber_dr_vals=clobber, perform='c',
warn_input_not_reused=warn_inp)
_check_viewmap(node, storage_map)
# Check with Python result
for r in node.outputs:
if r in r_vals:
# compares the version from thunk_py
# (in r_vals) to the version produced
# by thunk_c (in storage_map)
if not check_eq(r, r_vals[r],
storage_map[r][0]):
inputs_val = [storage_map[inp][0]
for inp in r.owner.inputs]
raise BadThunkOutput(
r, thunk1='perform', val1=r_vals[r],
thunk2='c_code',
val2=storage_map[r][0],
inputs_val=inputs_val)
else:
# retrieve each output from the storage_map
r_vals[r] = storage_map[r][0]
# clear the storage_map for the thunk_c
storage_map[r][0] = None
if self.maker.mode.check_preallocated_output:
prealloc_modes = \
self.maker.mode.check_preallocated_output
def thunk():
try:
thunk_c()
except Exception:
raise_with_op(node, thunk_c)
_logger.debug(
'%i - calling _check_preallocated_output '
'with thunk_c', i)
_check_preallocated_output(
node=node,
thunk=thunk,
prealloc_modes=prealloc_modes,
def_val=def_val,
storage_map=storage_map,
r_vals=r_vals,
dr_vals=dr_vals,
perform='c code',
active_order_set=active_order_set,
inplace_outs=c_inplace_outs,
init_outputs=init_outputs)
sys.stdout.flush()
# we're done with this thunk
# clear everything out of the storage_map
for r in node.inputs:
storage_map[r][0] = None
_logger.debug("%i - done with node", i)
for r in node.outputs:
if r not in r_vals:
idx = order.index(node)
assert thunks_py[idx] is None, node
assert thunks_c[idx] is None, node
raise Exception("No code run for %s" % node)
if False:
# This could be useful to help finding refcount problem.
# But it is very slow and it is not sure it will help.
gc.collect()
_find_bad_optimizations(order,
fgraph.equivalence_tracker.reasons,
r_vals)
#####
# Postcondition: the input and output variables are
# in the storage map, nothing more
#####
# Nothing should be in storage map after evaluating
# each the thunk (specifically the last one)
for r, s in iteritems(storage_map):
assert type(s) is list
assert s[0] is None
# store our output variables to their respective storage lists
for output, storage in zip(fgraph.outputs, output_storage):
storage[0] = r_vals[output]
# transfer all inputs back to their respective storage lists
for r in r_vals:
if r.owner is None:
if r in fgraph.inputs:
assert (storage_map[r] is
input_storage[fgraph.inputs.index(r)])
storage_map[r][0] = r_vals[r]
# if an input was destroyed, the destroyed value
# should be returned
for r in dr_vals:
assert dr_vals[r][0] is not None
if r.owner is None:
assert r in fgraph.inputs
# HACK TO LOOK LIKE A REAL DESTRUCTIVE ACTION
# TOOK PLACE
if ((type(dr_vals[r][0]) in
(numpy.ndarray, numpy.memmap)) and
(dr_vals[r][0].dtype ==
storage_map[r][0].dtype) and
(dr_vals[r][0].shape ==
storage_map[r][0].shape)):
if len(dr_vals[r][0].shape):
storage_map[r][0][:] = dr_vals[r][0]
else:
storage_map[r][0].itemset(dr_vals[r][0])
else:
storage_map[r][0] = dr_vals[r][0]
except Exception:
# Restore the initial state of storage_map
for r in storage_map:
if r in original_storage_map_keys:
# If r was transferred to r_vals, put it back
if r in r_vals_initialized:
storage_map[r][0] = r_vals[r]
else:
# clear out any partially-computed stuff
storage_map[r][0] = None
raise
for r in storage_map:
if (r.owner is None):
if not r.type.is_valid_value(None):
assert storage_map[r][0] is not None
###############
# Done debugmode function call 'f'
##############
def run_with_tensortype_filter_check(f):
def deco():
# WARNING: this is a global mechanism...
# so it will screw up if we are trying to use
# multiple modes at once.
old_filter_checks_isfinite = TensorType.filter_checks_isfinite
TensorType.filter_checks_isfinite = \
self.maker.mode.check_isfinite
try:
return f()
finally:
# put back the filter_checks_isfinite
TensorType.filter_checks_isfinite = \
old_filter_checks_isfinite
return deco
f = run_with_tensortype_filter_check(f)
f.storage_map = storage_map
f.allow_gc = True
assert len(fgraph.inputs) == len(input_storage)
assert len(fgraph.outputs) == len(output_storage)
return (f,
[link.Container(input, storage, readonly=False)
for input, storage in zip(fgraph.inputs, input_storage)],
[link.Container(output, storage, readonly=True)
for output, storage in zip(fgraph.outputs, output_storage)],
thunks_py, order) | NotImplementedError | dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/compile/debugmode.py/_Linker.make_all |
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except __HOLE__, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name) | KeyboardInterrupt | dataset/ETHPy150Open francelabs/datafari/debian7/elk/kibana/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py/GenerateOutput |
@staticmethod
def load(path, name, cluster):
"""
Load a node from from the path on disk to the config files, the node name and the
cluster the node is part of.
"""
node_path = os.path.join(path, name)
filename = os.path.join(node_path, 'node.conf')
with open(filename, 'r') as f:
data = yaml.load(f)
try:
itf = data['interfaces']
initial_token = None
if 'initial_token' in data:
initial_token = data['initial_token']
remote_debug_port = 2000
if 'remote_debug_port' in data:
remote_debug_port = data['remote_debug_port']
binary_interface = None
if 'binary' in itf and itf['binary'] is not None:
binary_interface = tuple(itf['binary'])
node = cluster.create_node(data['name'], data['auto_bootstrap'], tuple(itf['thrift']), tuple(itf['storage']), data['jmx_port'], remote_debug_port, initial_token, save=False, binary_interface=binary_interface, byteman_port=data['byteman_port'])
node.status = data['status']
if 'pid' in data:
node.pid = int(data['pid'])
if 'install_dir' in data:
node.__install_dir = data['install_dir']
if 'config_options' in data:
node.__config_options = data['config_options']
if 'dse_config_options' in data:
node._dse_config_options = data['dse_config_options']
if 'data_center' in data:
node.data_center = data['data_center']
if 'workloads' in data:
node.workloads = data['workloads']
return node
except __HOLE__ as k:
raise common.LoadError("Error Loading " + filename + ", missing property: " + str(k)) | KeyError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node.load |
def print_process_output(self, name, proc, verbose=False):
try:
stderr = proc.communicate()[1]
except __HOLE__:
stderr = ''
if len(stderr) > 1:
print_("[%s ERROR] %s" % (name, stderr.strip()))
# This will return when exprs are found or it timeouts | ValueError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node.print_process_output |
def stress(self, stress_options=None, capture_output=False, whitelist=False,**kwargs):
if stress_options is None:
stress_options = []
else:
stress_options = stress_options[:]
stress = common.get_stress_bin(self.get_install_dir())
if self.cluster.cassandra_version() <= '2.1':
stress_options.append('-d')
stress_options.append(self.address())
else:
stress_options.append('-node')
if whitelist:
stress_options.append("whitelist")
stress_options.append(self.address())
# specify used jmx port if not already set
if not [opt for opt in stress_options if opt.startswith('jmx=')]:
stress_options.extend(['-port', 'jmx=' + self.jmx_port])
args = [stress] + stress_options
try:
if capture_output:
p = subprocess.Popen(args, cwd=common.parse_path(stress),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
stdout, stderr = p.communicate()
else:
p = subprocess.Popen(args, cwd=common.parse_path(stress),
**kwargs)
stdout, stderr = None, None
p.wait()
return stdout, stderr
except __HOLE__:
pass | KeyboardInterrupt | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node.stress |
def shuffle(self, cmd):
cdir = self.get_install_dir()
shuffle = common.join_bin(cdir, 'bin', 'cassandra-shuffle')
host = self.address()
args = [shuffle, '-h', host, '-p', str(self.jmx_port)] + [cmd]
try:
subprocess.call(args)
except __HOLE__:
pass | KeyboardInterrupt | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node.shuffle |
def __update_status(self):
if self.pid is None:
if self.status == Status.UP or self.status == Status.DECOMMISSIONED:
self.status = Status.DOWN
return
old_status = self.status
# os.kill on windows doesn't allow us to ping a process
if common.is_win():
self.__update_status_win()
else:
try:
os.kill(self.pid, 0)
except __HOLE__ as err:
if err.errno == errno.ESRCH:
# not running
if self.status == Status.UP or self.status == Status.DECOMMISSIONED:
self.status = Status.DOWN
elif err.errno == errno.EPERM:
# no permission to signal this process
if self.status == Status.UP or self.status == Status.DECOMMISSIONED:
self.status = Status.DOWN
else:
# some other error
raise err
else:
if self.status == Status.DOWN or self.status == Status.UNINITIALIZED:
self.status = Status.UP
if not old_status == self.status:
if old_status == Status.UP and self.status == Status.DOWN:
self.pid = None
self._update_config() | OSError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node.__update_status |
def _find_pid_on_windows(self):
found = False
try:
import psutil
found = psutil.pid_exists(self.pid)
except __HOLE__:
print_("WARN: psutil not installed. Pid tracking functionality will suffer. See README for details.")
cmd = 'tasklist /fi "PID eq ' + str(self.pid) + '"'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in proc.stdout:
if re.match("Image", str(line)):
found = True
return found | ImportError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node._find_pid_on_windows |
def _update_pid(self, process):
pidfile = os.path.join(self.get_path(), 'cassandra.pid')
start = time.time()
while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0):
if (time.time() - start > 30.0):
print_("Timed out waiting for pidfile to be filled (current time is %s)" % (datetime.now()))
break
else:
time.sleep(0.1)
try:
with open(pidfile, 'rb') as f:
if common.is_win() and self.get_base_cassandra_version() >= 2.1:
self.pid = int(f.readline().strip().decode('utf-16').strip())
else:
self.pid = int(f.readline().strip())
except __HOLE__ as e:
raise NodeError('Problem starting node %s due to %s' % (self.name, e), process)
self.__update_status() | IOError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node._update_pid |
def pause(self):
try:
import psutil
p = psutil.Process(self.pid)
p.suspend()
except __HOLE__:
if common.is_win():
print_("WARN: psutil not installed. Pause functionality will not work properly on Windows.")
else:
os.kill(self.pid, signal.SIGSTOP) | ImportError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node.pause |
def resume(self):
try:
import psutil
p = psutil.Process(self.pid)
p.resume()
except __HOLE__:
if common.is_win():
print_("WARN: psutil not installed. Resume functionality will not work properly on Windows.")
else:
os.kill(self.pid, signal.SIGCONT) | ImportError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/Node.resume |
def _get_load_from_info_output(info):
load_lines = [s for s in info.split('\n')
if s.startswith('Load')]
if not len(load_lines) == 1:
msg = ('Expected output from `nodetool info` to contain exactly 1 '
'line starting with "Load". Found:\n') + info
raise RuntimeError(msg)
load_line = load_lines[0].split()
# Don't have access to C* version here, so we need to support both prefix styles
# See CASSANDRA-9692 on Apache JIRA
unit_multipliers = {'KiB': 1,
'KB': 1,
'MiB': 1024,
'MB': 1024,
'GiB': 1024 * 1024,
'GB': 1024 * 1024,
'TiB': 1024 * 1024 * 1024,
'TB': 1024 * 1024 * 1024}
load_num, load_units = load_line[2], load_line[3]
try:
load_mult = unit_multipliers[load_units]
except __HOLE__:
expected = ', '.join(list(unit_multipliers))
msg = ('Expected `nodetool info` to report load in one of the '
'following units:\n'
' {expected}\n'
'Found:\n'
' {found}').format(expected=expected, found=load_units)
raise RuntimeError(msg)
return float(load_num) * load_mult | KeyError | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/_get_load_from_info_output |
def _grep_log_for_errors(log):
matchings = []
it = iter(log.splitlines())
for line in it:
is_error_line = ('ERROR' in line
and 'DEBUG' not in line.split('ERROR')[0])
if is_error_line:
matchings.append([line])
try:
it, peeker = itertools.tee(it)
while 'INFO' not in next(peeker):
matchings[-1].append(next(it))
except __HOLE__:
break
return matchings | StopIteration | dataset/ETHPy150Open pcmanus/ccm/ccmlib/node.py/_grep_log_for_errors |
def _get__data_sources_names(self):
names = []
for name in self.data_sources:
try:
self.data_sources[name] + 1
names.append(name)
except __HOLE__:
pass
names.sort()
return names
# Dictionnary mapping the views | TypeError | dataset/ETHPy150Open enthought/mayavi/mayavi/tools/data_wizards/data_source_wizard.py/DataSourceWizard._get__data_sources_names |
def _findNode(self, nodeId):
try:
return self.getDispatchTree().nodes[int(nodeId)]
except __HOLE__:
raise NodeNotFoundError(nodeId) | KeyError | dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/octopus/dispatcher/webservice/nodes.py/NodesResource._findNode |
def iterOnCommands(self):
"""
Cancel each command in a node hierarchy (command is given by a generator on the node)
Each command might be blocked by network pb (or machine swapping) but asynchronous mecanism will
allow other request to be treated between each command cancelation.
"""
try:
# Get next command in generator
cmd = self.gen.next()
cmd.cancel()
tornado.ioloop.IOLoop.instance().add_callback(self.iterOnCommands)
except __HOLE__:
self.finish() | StopIteration | dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/octopus/dispatcher/webservice/nodes.py/NodeStatusResource.iterOnCommands |
def put(self, nodeId):
data = self.getBodyAsJSON()
try:
paused = data['paused']
except __HOLE__:
raise Http400('Missing entry: "paused".')
except TypeError:
raise Http400('Missing entry: "paused".')
else:
nodeId = int(nodeId)
node = self._findNode(nodeId)
node.setPaused(paused)
self.writeCallback("Paused flag changed.") | KeyError | dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/octopus/dispatcher/webservice/nodes.py/NodePausedResource.put |
def put(self, nodeId):
'''
| Put a new value for the maxAttempt attribute of a task node
| The new maxAttempt value is taken from request body, for instance : "{ maxAttempt : 10 }"
:param nodeId: id of the task node to update
'''
# Get task object
try:
nodeId = int(nodeId)
node = self._findNode(nodeId)
except NodeNotFoundError:
raise HTTPError(404, "Node not found: %d" % nodeId)
# Get maxAttemtp from request body
data = self.getBodyAsJSON()
try:
maxAttempt = int(data['maxAttempt'])
except __HOLE__, e:
raise HTTPError(400, 'Missing entry: "maxAttempt".')
except (TypeError, ValueError), e:
raise HTTPError(400, 'Invalid type for "maxAttempt", integer expected but %r received (error: %s)' % (data['maxAttempt'], e))
# Update selected node and associated task
if isinstance(node, TaskNode) or isinstance(node, FolderNode):
result = node.setMaxAttempt(maxAttempt)
if result is False:
raise HTTPError(404, "Impossible to set 'maxAttempt' on node %d" % nodeId)
else:
raise HTTPError(404, "Invalid element selected: %r" % type(node))
message = "Attribute maxAttempt of node %d has successfully been updated." % nodeId
self.writeCallback(message) | KeyError | dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/octopus/dispatcher/webservice/nodes.py/NodeMaxAttemptResource.put |
def __getattr__(self, key):
try:
return self[key]
except __HOLE__:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key) | KeyError | dataset/ETHPy150Open michaelliao/transwarp/transwarp/utils.py/Dict.__getattr__ |
def test_options_without_type(self):
query = self.session.query(self.Character.name).filter(
match({self.Character.name: 0.5, self.Character.info['race']: 0.9},
'Trillian',
options={'boost': 10.0})
)
err = None
try:
str(query)
except __HOLE__ as e:
err = e
msg = "missing match_type. " + \
"It's not allowed to specify options without match_type"
self.assertEquals(str(err), msg) | ValueError | dataset/ETHPy150Open crate/crate-python/src/crate/client/sqlalchemy/tests/match_test.py/SqlAlchemyMatchTest.test_options_without_type |
def conv_TimeField(self, model, field, kwargs):
def time_only(obj):
try:
return obj.time()
except __HOLE__:
return obj
kwargs['filters'].append(time_only)
return f.DateTimeField(widget=form.DateTimePickerWidget(),
format='%H:%M:%S', **kwargs) | AttributeError | dataset/ETHPy150Open syrusakbary/Flask-SuperAdmin/flask_superadmin/model/backends/django/orm.py/AdminModelConverter.conv_TimeField |
def conv_DateTimeField(self, model, field, kwargs):
def time_only(obj):
try:
return obj.time()
except __HOLE__:
return obj
kwargs['filters'].append(time_only)
return f.DateTimeField(widget=form.DateTimePickerWidget(),
format='%H:%M:%S', **kwargs) | AttributeError | dataset/ETHPy150Open syrusakbary/Flask-SuperAdmin/flask_superadmin/model/backends/django/orm.py/AdminModelConverter.conv_DateTimeField |
def conv_DateField(self, model, field, kwargs):
def time_only(obj):
try:
return obj.date()
except __HOLE__:
return obj
kwargs['filters'].append(time_only)
return f.DateField(widget=form.DatePickerWidget(), **kwargs) | AttributeError | dataset/ETHPy150Open syrusakbary/Flask-SuperAdmin/flask_superadmin/model/backends/django/orm.py/AdminModelConverter.conv_DateField |
def conv_USStateField(self, model, field, kwargs):
try:
from django.contrib.localflavor.us.us_states import STATE_CHOICES
except __HOLE__:
STATE_CHOICES = []
return f.SelectField(choices=STATE_CHOICES, **kwargs) | ImportError | dataset/ETHPy150Open syrusakbary/Flask-SuperAdmin/flask_superadmin/model/backends/django/orm.py/AdminModelConverter.conv_USStateField |
def refresh(self):
"""
Refresh the screen.
"""
super(_CursesScreen, self).refresh()
try:
sys.stdout.flush()
except __HOLE__:
pass | IOError | dataset/ETHPy150Open peterbrittain/asciimatics/asciimatics/screen.py/_CursesScreen.refresh |
def _print_at(self, text, x, y):
"""
Print string at the required location.
:param text: The text string to print.
:param x: The x coordinate
:param y: The Y coordinate
"""
# Move the cursor if necessary
msg = ""
if x != self._x or y != self._y:
msg += curses.tparm(self._move_y_x, y, x).decode("utf-8")
msg += text
# Print the text at the required location and update the current
# position. Screen resize can throw IOErrors. These can be safely
# ignored as the screen will be shortly reset anyway.
try:
sys.stdout.write(msg)
except __HOLE__:
pass | IOError | dataset/ETHPy150Open peterbrittain/asciimatics/asciimatics/screen.py/_CursesScreen._print_at |
def refresh(self):
"""
Refresh the screen.
"""
# Flush screen buffer to get all updates after doing the common
# processing. Exact timing of the signal can interrupt the
# flush, raising an EINTR IOError, which we can safely ignore.
super(_BlessedScreen, self).refresh()
try:
sys.stdout.flush()
except __HOLE__:
pass | IOError | dataset/ETHPy150Open peterbrittain/asciimatics/asciimatics/screen.py/_BlessedScreen.refresh |
def get_order_for_user_or_404(user, number):
try:
return queryset_orders_for_user(user).get(number=number)
except __HOLE__:
raise Http404() | ObjectDoesNotExist | dataset/ETHPy150Open django-oscar/django-oscar/src/oscar/apps/dashboard/orders/views.py/get_order_for_user_or_404 |
def handle_line_action(self, request, order, action):
if action not in self.line_actions:
return self.reload_page(error=_("Invalid action"))
# Load requested lines
line_ids = request.POST.getlist('selected_line')
if len(line_ids) == 0:
return self.reload_page(error=_(
"You must select some lines to act on"))
lines = order.lines.filter(id__in=line_ids)
if len(line_ids) != len(lines):
return self.reload_page(error=_("Invalid lines requested"))
# Build list of line quantities
line_quantities = []
for line in lines:
qty = request.POST.get('selected_line_qty_%s' % line.id)
try:
qty = int(qty)
except __HOLE__:
qty = None
if qty is None or qty <= 0:
error_msg = _("The entered quantity for line #%s is not valid")
return self.reload_page(error=error_msg % line.id)
elif qty > line.quantity:
error_msg = _(
"The entered quantity for line #%(line_id)s "
"should not be higher than %(quantity)s")
kwargs = {'line_id': line.id, 'quantity': line.quantity}
return self.reload_page(error=error_msg % kwargs)
line_quantities.append(qty)
return getattr(self, action)(
request, order, lines, line_quantities) | ValueError | dataset/ETHPy150Open django-oscar/django-oscar/src/oscar/apps/dashboard/orders/views.py/OrderDetailView.handle_line_action |
def delete_note(self, request, order):
try:
note = order.notes.get(id=request.POST.get('note_id', None))
except __HOLE__:
messages.error(request, _("Note cannot be deleted"))
else:
messages.info(request, _("Note deleted"))
note.delete()
return self.reload_page() | ObjectDoesNotExist | dataset/ETHPy150Open django-oscar/django-oscar/src/oscar/apps/dashboard/orders/views.py/OrderDetailView.delete_note |
def _populate_port_ha_information(self, context, port, router_id, hags,
user_router_id, modified_interfaces):
subnet_id = port['fixed_ips'][0]['subnet_id']
try:
hag = hags[subnet_id]
except __HOLE__:
# Oops, the subnet_id was not found. Probably because the DB
# insertion of that HA group is still in progress by another
# process and has not been committed to the DB yet.
# Let's retry a few times to see if the DB entry turns up.
LOG.debug('No HA group info for router: %(r_id)s and subnet: '
'%(s_id)s was found when populating HA info for port: '
'%(p_id)s. Will now make additional lookup attempts.',
{'r_id': router_id, 's_id': subnet_id,
'p_id': port['id']})
try:
hag = self._get_ha_group_for_subnet_id(context, router_id,
subnet_id)
except exc.NoResultFound:
hag = None
if hag is None:
LOG.debug('Failed to fetch the HA group info for for router: '
'%(r_id)s and subnet: %(s_id)s. Giving up. No HA '
'info will be added to the router\'s port: %s.',
{'r_id': router_id, 's_id': subnet_id,
'p_id': port['id']})
# we leave it to the L3 config agent to handle this
return
else:
LOG.debug('Successfully fetched the HA group info for '
'router: %(r_id)s and subnet: %(s_id)s from DB',
{'r_id': router_id, 's_id': subnet_id})
hags[subnet_id] = hag
if router_id == user_router_id:
# If the router interface need no dedicated IP address we just
# set the HA (VIP) port to the port itself. The config agent
# driver will know how to handle this "signal".
p_id = hag.extra_port_id or port['id']
try:
interface_port = self._core_plugin.get_port(context, p_id)
except n_exc.PortNotFound:
LOG.debug('**** NO Port Info for '
'router: %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': port['id']})
return
LOG.debug('**** Fetched Port Info for '
'router: %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': port['id']})
self._populate_mtu_and_subnets_for_ports(context, [interface_port])
modified_interfaces.append(interface_port)
ha_port = port
else:
try:
ha_port = self._core_plugin.get_port(context, hag.ha_port_id)
except n_exc.PortNotFound:
LOG.debug('**** NO Port Info for '
'router(BAK): %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': hag.ha_port_id})
return
LOG.debug('**** Fetched Port Info for '
'router(BAK): %(r_id)s : Port: %(p_id)s from DB',
{'r_id': router_id, 'p_id': hag.ha_port_id})
self._populate_mtu_and_subnets_for_ports(context, [ha_port])
interface_port = port
interface_port[ha.HA_INFO] = {
ha.TYPE: hag.ha_type,
HA_GROUP: hag.group_identity,
'timers_config': hag.timers_config,
'tracking_config': hag.tracking_config,
'other_config': hag.other_config,
HA_PORT: ha_port}
return interface_port | KeyError | dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/db/l3/ha_db.py/HA_db_mixin._populate_port_ha_information |
@gen_test
def test_websocket_handler_bad_token(self):
"""
A bad token should returns a 401 during a websocket connect
"""
token = 'A'*32
url = 'ws://127.0.0.1:{0}/hook/{1}'.format(self.get_http_port(), token)
request = HTTPRequest(url, headers={'Origin': 'http://example.com',
'Host': 'example.com'})
try:
ws = yield websocket_connect(request)
except __HOLE__ as error:
self.assertEqual(error.code, 401) | HTTPError | dataset/ETHPy150Open saltstack/salt/tests/unit/netapi/rest_tornado/test_handlers.py/TestWebsocketSaltAPIHandler.test_websocket_handler_bad_token |
@gen_test
def test_cors_origin_single(self):
self._app.mod_opts['cors_origin'] = 'http://example.com'
response = yield self.http_client.fetch(self.get_url('/login'),
method='POST',
body=urlencode(self.auth_creds),
headers={'Content-Type': self.content_type_map['form']})
token = json.loads(response.body)['return'][0]['token']
url = 'ws://127.0.0.1:{0}/hook/{1}'.format(self.get_http_port(), token)
# Example.com should works
request = HTTPRequest(url, headers={'Origin': 'http://example.com',
'Host': 'example.com'})
ws = yield websocket_connect(request)
ws.write_message('websocket client ready')
ws.close()
# But foo.bar not
request = HTTPRequest(url, headers={'Origin': 'http://foo.bar',
'Host': 'example.com'})
try:
ws = yield websocket_connect(request)
except __HOLE__ as error:
self.assertEqual(error.code, 403) | HTTPError | dataset/ETHPy150Open saltstack/salt/tests/unit/netapi/rest_tornado/test_handlers.py/TestWebsocketSaltAPIHandler.test_cors_origin_single |
@workflow
def operation_mapping3(ctx, value, **_):
def expect_error(func):
try:
func('test.operation', kwargs={
'value': value
}).get()
except __HOLE__, e:
assert 'Duplicate' in e.message
node1 = list(ctx.get_node('node1').instances)[0]
node2_rel = list(list(ctx.get_node('node2').instances)[0].relationships)[0]
node3_rel = list(list(ctx.get_node('node3').instances)[0].relationships)[0]
expect_error(node1.execute_operation)
expect_error(node2_rel.execute_source_operation)
expect_error(node3_rel.execute_target_operation) | RuntimeError | dataset/ETHPy150Open cloudify-cosmo/cloudify-manager/tests/mock_plugins/mock_workflows/workflows.py/operation_mapping3 |
def is_superuser_staff_or_in_translators_group(user):
if not getattr(settings, 'ROSETTA_REQUIRES_AUTH', True):
return True
try:
if not user.is_authenticated():
return False
elif user.is_superuser and user.is_staff:
return True
else:
return user.groups.filter(name='translators').exists()
except __HOLE__:
if not hasattr(user, 'is_authenticated') or not hasattr(user, 'is_superuser') or not hasattr(user, 'groups'):
raise ImproperlyConfigured('If you are using custom User Models you must implement a custom authentication method for Rosetta. See ROSETTA_ACCESS_CONTROL_FUNCTION here: https://django-rosetta.readthedocs.org/en/latest/settings.html')
raise | AttributeError | dataset/ETHPy150Open mbi/django-rosetta/rosetta/access.py/is_superuser_staff_or_in_translators_group |
def can_translate_language(user, langid):
try:
if not rosetta_settings.ROSETTA_LANGUAGE_GROUPS:
return can_translate(user)
elif not user.is_authenticated():
return False
elif user.is_superuser and user.is_staff:
return True
else:
return user.groups.filter(name='translators-%s' % langid).exists()
except __HOLE__:
if not hasattr(user, 'is_authenticated') or not hasattr(user, 'is_superuser') or not hasattr(user, 'groups'):
raise ImproperlyConfigured('If you are using custom User Models you must implement a custom authentication method for Rosetta. See ROSETTA_ACCESS_CONTROL_FUNCTION here: https://django-rosetta.readthedocs.org/en/latest/settings.html')
raise | AttributeError | dataset/ETHPy150Open mbi/django-rosetta/rosetta/access.py/can_translate_language |
def create_authn_response(self, identity, in_response_to, destination,
sp_entity_id, name_id_policy=None, userid=None,
name_id=None, authn=None, issuer=None,
sign_response=None, sign_assertion=None,
encrypt_cert=None, encrypt_assertion=None,
**kwargs):
""" Constructs an AuthenticationResponse
:param identity: Information about an user
:param in_response_to: The identifier of the authentication request
this response is an answer to.
:param destination: Where the response should be sent
:param sp_entity_id: The entity identifier of the Service Provider
:param name_id_policy: How the NameID should be constructed
:param userid: The subject identifier
:param authn: Dictionary with information about the authentication
context
:param issuer: Issuer of the response
:param sign_assertion: Whether the assertion should be signed or not.
:param sign_response: Whether the response should be signed or not.
:return: A response instance
"""
try:
policy = kwargs["release_policy"]
except KeyError:
policy = self.config.getattr("policy", "idp")
try:
best_effort = kwargs["best_effort"]
except KeyError:
best_effort = False
if sign_assertion is None:
sign_assertion = self.config.getattr("sign_assertion", "idp")
if sign_assertion is None:
sign_assertion = False
if sign_response is None:
sign_response = self.config.getattr("sign_response", "idp")
if sign_response is None:
sign_response = False
if encrypt_assertion is None:
encrypt_assertion = self.config.getattr("encrypt_assertion", "idp")
if encrypt_assertion is None:
encrypt_assertion = False
if encrypt_assertion:
if encrypt_cert is not None:
verify_encrypt_cert = self.config.getattr("verify_encrypt_cert", "idp")
if verify_encrypt_cert is not None:
if not verify_encrypt_cert(encrypt_cert):
raise CertificateError("Invalid certificate for encryption!")
else:
raise CertificateError("No SPCertEncType certificate for encryption contained in authentication "
"request.")
else:
encrypt_assertion = False
if not name_id:
try:
nid_formats = []
for _sp in self.metadata[sp_entity_id]["spsso_descriptor"]:
if "name_id_format" in _sp:
nid_formats.extend([n["text"] for n in
_sp["name_id_format"]])
try:
snq = name_id_policy.sp_name_qualifier
except AttributeError:
snq = sp_entity_id
if not snq:
snq = sp_entity_id
kwa = {"sp_name_qualifier": snq}
try:
kwa["format"] = name_id_policy.format
except __HOLE__:
pass
_nids = self.ident.find_nameid(userid, **kwa)
# either none or one
if _nids:
name_id = _nids[0]
else:
name_id = self.ident.construct_nameid(userid, policy,
sp_entity_id,
name_id_policy)
logger.debug("construct_nameid: %s => %s" % (userid,
name_id))
except IOError, exc:
response = self.create_error_response(in_response_to,
destination,
sp_entity_id,
exc, name_id)
return ("%s" % response).split("\n")
try:
_authn = authn
if (sign_assertion or sign_response) and self.sec.cert_handler.generate_cert():
with self.lock:
self.sec.cert_handler.update_cert(True)
return self._authn_response(in_response_to, # in_response_to
destination, # consumer_url
sp_entity_id, # sp_entity_id
identity, # identity as dictionary
name_id,
authn=_authn,
issuer=issuer,
policy=policy,
sign_assertion=sign_assertion,
sign_response=sign_response,
best_effort=best_effort,
encrypt_assertion=encrypt_assertion,
encrypt_cert=encrypt_cert)
return self._authn_response(in_response_to, # in_response_to
destination, # consumer_url
sp_entity_id, # sp_entity_id
identity, # identity as dictionary
name_id,
authn=_authn,
issuer=issuer,
policy=policy,
sign_assertion=sign_assertion,
sign_response=sign_response,
best_effort=best_effort,
encrypt_assertion=encrypt_assertion,
encrypt_cert=encrypt_cert)
except MissingValue, exc:
return self.create_error_response(in_response_to, destination,
sp_entity_id, exc, name_id) | AttributeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/server.py/Server.create_authn_response |
def create_assertion_id_request_response(self, assertion_id, sign=False,
**kwargs):
"""
:param assertion_id:
:param sign:
:return:
"""
try:
(assertion, to_sign) = self.session_db.get_assertion(assertion_id)
except __HOLE__:
raise Unknown
if to_sign:
if assertion.signature is None:
assertion.signature = pre_signature_part(assertion.id,
self.sec.my_cert, 1)
return signed_instance_factory(assertion, self.sec, to_sign)
else:
return assertion
#noinspection PyUnusedLocal | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/server.py/Server.create_assertion_id_request_response |
def clean_out_user(self, name_id):
"""
Remove all authentication statements that belongs to a user identified
by a NameID instance
:param name_id: NameID instance
:return: The local identifier for the user
"""
lid = self.ident.find_local_id(name_id)
logger.info("Clean out %s" % lid)
# remove the authentications
try:
for _nid in [decode(x) for x in self.ident.db[lid].split(" ")]:
try:
self.session_db.remove_authn_statements(_nid)
except KeyError:
pass
except __HOLE__:
pass
return lid | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/server.py/Server.clean_out_user |
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (__HOLE__, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus') | ValueError | dataset/ETHPy150Open benoitc/flower/flower/util.py/cpu_count |
def extract_text(escaped_html_str):
notes = xml.sax.saxutils.unescape(escaped_html_str)
try:
from PyQt4 import QtGui
except __HOLE__:
return str(notes)
else:
fragment = QtGui.QTextDocumentFragment.fromHtml(notes)
return str(fragment.toPlainText())
# The queries are old and are preserved for reference. Some code is
# quite old (references to vis_application, for example). | ImportError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/query/__init__.py/extract_text |
@defer.inlineCallbacks
def on_PUT(self, request, txn_id):
try:
defer.returnValue(
self.txns.get_client_transaction(request, txn_id)
)
except __HOLE__:
pass
response = yield self.on_POST(request)
self.txns.store_client_transaction(request, txn_id, response)
defer.returnValue(response) | KeyError | dataset/ETHPy150Open matrix-org/synapse/synapse/rest/client/v1/room.py/RoomCreateRestServlet.on_PUT |
@defer.inlineCallbacks
def on_PUT(self, request, room_id, event_type, txn_id):
try:
defer.returnValue(
self.txns.get_client_transaction(request, txn_id)
)
except __HOLE__:
pass
response = yield self.on_POST(request, room_id, event_type, txn_id)
self.txns.store_client_transaction(request, txn_id, response)
defer.returnValue(response)
# TODO: Needs unit testing for room ID + alias joins | KeyError | dataset/ETHPy150Open matrix-org/synapse/synapse/rest/client/v1/room.py/RoomSendEventRestServlet.on_PUT |
@defer.inlineCallbacks
def on_PUT(self, request, room_identifier, txn_id):
try:
defer.returnValue(
self.txns.get_client_transaction(request, txn_id)
)
except __HOLE__:
pass
response = yield self.on_POST(request, room_identifier, txn_id)
self.txns.store_client_transaction(request, txn_id, response)
defer.returnValue(response)
# TODO: Needs unit testing | KeyError | dataset/ETHPy150Open matrix-org/synapse/synapse/rest/client/v1/room.py/JoinRoomAliasServlet.on_PUT |
@defer.inlineCallbacks
def on_PUT(self, request, room_id, membership_action, txn_id):
try:
defer.returnValue(
self.txns.get_client_transaction(request, txn_id)
)
except __HOLE__:
pass
response = yield self.on_POST(
request, room_id, membership_action, txn_id
)
self.txns.store_client_transaction(request, txn_id, response)
defer.returnValue(response) | KeyError | dataset/ETHPy150Open matrix-org/synapse/synapse/rest/client/v1/room.py/RoomMembershipRestServlet.on_PUT |
@defer.inlineCallbacks
def on_PUT(self, request, room_id, event_id, txn_id):
try:
defer.returnValue(
self.txns.get_client_transaction(request, txn_id)
)
except __HOLE__:
pass
response = yield self.on_POST(request, room_id, event_id, txn_id)
self.txns.store_client_transaction(request, txn_id, response)
defer.returnValue(response) | KeyError | dataset/ETHPy150Open matrix-org/synapse/synapse/rest/client/v1/room.py/RoomRedactEventRestServlet.on_PUT |
def get_connection(self, address):
try:
return self.connections[address]
except __HOLE__:
return None | KeyError | dataset/ETHPy150Open hazelcast/hazelcast-python-client/hazelcast/connection.py/ConnectionManager.get_connection |
def get_or_connect(self, address, authenticator=None):
if address in self.connections:
return ImmediateFuture(self.connections[address])
else:
with self._new_connection_mutex:
if address in self._pending_connections:
return self._pending_connections[address]
else:
authenticator = authenticator or self._cluster_authenticator
connection = self._new_connection_func(address,
connection_closed_callback=self._connection_closed,
message_callback=self._client.invoker._handle_client_message)
def on_auth(f):
if f.is_success():
self.logger.info("Authenticated with %s", f.result())
with self._new_connection_mutex:
self.connections[connection.endpoint] = f.result()
self._pending_connections.pop(address)
for on_connection_opened, _ in self._connection_listeners:
if on_connection_opened:
on_connection_opened(f.resul())
return f.result()
else:
self.logger.debug("Error opening %s", connection)
with self._new_connection_mutex:
try:
self._pending_connections.pop(address)
except __HOLE__:
pass
raise f.exception(), None, f.traceback()
future = authenticator(connection).continue_with(on_auth)
if not future.done():
self._pending_connections[address] = future
return future | KeyError | dataset/ETHPy150Open hazelcast/hazelcast-python-client/hazelcast/connection.py/ConnectionManager.get_or_connect |
def close_connection(self, address, cause):
try:
connection = self.connections[address]
connection.close(cause)
except __HOLE__:
logging.warn("No connection with %s was found to close.", address)
return False | KeyError | dataset/ETHPy150Open hazelcast/hazelcast-python-client/hazelcast/connection.py/ConnectionManager.close_connection |
def _inner_acquire(self, blocking, timeout):
# make sure our election parent node exists
if not self.assured_path:
self._ensure_path()
node = None
if self.create_tried:
node = self._find_node()
else:
self.create_tried = True
if not node:
node = self.client.create(self.create_path, self.data,
ephemeral=True, sequence=True)
# strip off path to node
node = node[len(self.path) + 1:]
self.node = node
while True:
self.wake_event.clear()
# bail out with an exception if cancellation has been requested
if self.cancelled:
raise CancelledError()
children = self._get_sorted_children()
try:
our_index = children.index(node)
except __HOLE__: # pragma: nocover
# somehow we aren't in the children -- probably we are
# recovering from a session failure and our ephemeral
# node was removed
raise ForceRetryError()
if self.acquired_lock(children, our_index):
return True
if not blocking:
return False
# otherwise we are in the mix. watch predecessor and bide our time
predecessor = self.path + "/" + children[our_index - 1]
self.client.add_listener(self._watch_session)
try:
if self.client.exists(predecessor, self._watch_predecessor):
self.wake_event.wait(timeout)
if not self.wake_event.isSet():
raise LockTimeout("Failed to acquire lock on %s after %s "
"seconds" % (self.path, timeout))
finally:
self.client.remove_listener(self._watch_session) | ValueError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/kazoo-2.0/kazoo/recipe/lock.py/Lock._inner_acquire |
def _ensure_path(self):
result = self.client.ensure_path(self.path)
self.assured_path = True
if result is True:
# node did already exist
data, _ = self.client.get(self.path)
try:
leases = int(data.decode('utf-8'))
except (ValueError, __HOLE__):
# ignore non-numeric data, maybe the node data is used
# for other purposes
pass
else:
if leases != self.max_leases:
raise ValueError(
"Inconsistent max leases: %s, expected: %s" %
(leases, self.max_leases)
)
else:
self.client.set(self.path, str(self.max_leases).encode('utf-8')) | TypeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/kazoo-2.0/kazoo/recipe/lock.py/Semaphore._ensure_path |
def get_partition_names(self, model):
current = model._partition_manager.current_partition_key
next = model._partition_manager.next_partition_key
current_only = self.options.get('current_only')
next_only = self.options.get('next_only')
if current_only and next_only:
raise CommandError(
u'You cannot specify current_only and next_only togethers')
try:
partition_names = [self.args[1]]
except __HOLE__:
partition_names = None
if current_only:
partition_names = [current()]
elif next_only:
partition_names = [next()]
elif not partition_names:
# No explicit partition names given, use current and next
partition_names = [current(), next()]
return partition_names | IndexError | dataset/ETHPy150Open danfairs/django-parting/parting/management/commands/ensure_partition.py/Command.get_partition_names |
def get_model(self):
try:
model = self.args[0]
except IndexError:
raise CommandError(u'Please supply at least one partitioned model')
try:
module_name, model_name = model.rsplit('.', 1)
except ValueError:
raise CommandError('Bad model name {}'.format(model))
# So - we can't use get_model, because this will be an abstract model.
# Try to grab the model directly from the module.
module = importlib.import_module(module_name)
try:
m = getattr(module, model_name)
except __HOLE__:
raise CommandError('Unknown model {}'.format(model))
return m | AttributeError | dataset/ETHPy150Open danfairs/django-parting/parting/management/commands/ensure_partition.py/Command.get_model |
@logging_level.setter
def logging_level(self, value):
if value is None:
value = self._default_logging_level
if type(value) is str:
try:
level = _levelNames[value.upper()]
except KeyError:
raise ValueError('Unrecognized logging level: %s' % value)
else:
try:
level = int(value)
except __HOLE__:
raise ValueError('Unrecognized logging level: %s' % value)
self.logger.setLevel(level)
return | ValueError | dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/searchcommands/search_command.py/SearchCommand.logging_level |
@property
def search_results_info(self):
""" Returns the search results info for this command invocation or None.
The search results info object is created from the search results info
file associated with the command invocation. Splunk does not pass the
location of this file by default. You must request it by specifying
these configuration settings in commands.conf:
.. code-block:: python
enableheader=true
requires_srinfo=true
The :code:`enableheader` setting is :code:`true` by default. Hence, you
need not set it. The :code:`requires_srinfo` setting is false by
default. Hence, you must set it.
:return: :class:`SearchResultsInfo`, if :code:`enableheader` and
:code:`requires_srinfo` are both :code:`true`. Otherwise, if either
:code:`enableheader` or :code:`requires_srinfo` are :code:`false`,
a value of :code:`None` is returned.
"""
if self._search_results_info is not None:
return self._search_results_info
try:
info_path = self.input_header['infoPath']
except KeyError:
return None
def convert_field(field):
return (field[1:] if field[0] == '_' else field).replace('.', '_')
def convert_value(field, value):
if field == 'countMap':
split = value.split(';')
value = dict((key, int(value))
for key, value in zip(split[0::2], split[1::2]))
elif field == 'vix_families':
value = ElementTree.fromstring(value)
elif value == '':
value = None
else:
try:
value = float(value)
if value.is_integer():
value = int(value)
except __HOLE__:
pass
return value
with open(info_path, 'rb') as f:
from collections import namedtuple
import csv
reader = csv.reader(f, dialect='splunklib.searchcommands')
fields = [convert_field(x) for x in reader.next()]
values = [convert_value(f, v) for f, v in zip(fields, reader.next())]
search_results_info_type = namedtuple('SearchResultsInfo', fields)
self._search_results_info = search_results_info_type._make(values)
return self._search_results_info | ValueError | dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/searchcommands/search_command.py/SearchCommand.search_results_info |
def process(self, args=argv, input_file=stdin, output_file=stdout):
""" Processes search results as specified by command arguments.
:param args: Sequence of command arguments
:param input_file: Pipeline input file
:param output_file: Pipeline output file
"""
self.logger.debug(u'%s arguments: %s', type(self).__name__, args)
self._configuration = None
self._output_file = output_file
try:
if len(args) >= 2 and args[1] == '__GETINFO__':
ConfigurationSettings, operation, args, reader = self._prepare(args, input_file=None)
self.parser.parse(args, self)
self._configuration = ConfigurationSettings(self)
writer = splunk_csv.DictWriter(output_file, self, self.configuration.keys(), mv_delimiter=',')
writer.writerow(self.configuration.items())
elif len(args) >= 2 and args[1] == '__EXECUTE__':
self.input_header.read(input_file)
ConfigurationSettings, operation, args, reader = self._prepare(args, input_file)
self.parser.parse(args, self)
self._configuration = ConfigurationSettings(self)
if self.show_configuration:
self.messages.append(
'info_message', '%s command configuration settings: %s'
% (self.name, self._configuration))
writer = splunk_csv.DictWriter(output_file, self)
self._execute(operation, reader, writer)
else:
file_name = path.basename(args[0])
message = (
u'Command {0} appears to be statically configured and static '
u'configuration is unsupported by splunklib.searchcommands. '
u'Please ensure that default/commands.conf contains this '
u'stanza:\n'
u'[{0}]\n'
u'filename = {1}\n'
u'supports_getinfo = true\n'
u'supports_rawargs = true\n'
u'outputheader = true'.format(type(self).name, file_name))
raise NotImplementedError(message)
except __HOLE__:
raise
except:
import traceback
import sys
error_type, error_message, error_traceback = sys.exc_info()
self.logger.error(traceback.format_exc(error_traceback))
origin = error_traceback
while origin.tb_next is not None:
origin = origin.tb_next
filename = origin.tb_frame.f_code.co_filename
lineno = origin.tb_lineno
self.write_error('%s at "%s", line %d : %s', error_type.__name__, filename, lineno, error_message)
exit(1)
return | SystemExit | dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/searchcommands/search_command.py/SearchCommand.process |
def create_transformer_classes(transformer_spec, config_globals, increment_id):
"""Create an importer and exporter class from a transformer spec.
Args:
transformer_spec: A bulkloader_parser.TransformerEntry.
config_globals: Dict to use to reference globals for code in the config.
increment_id: Method IncrementId(key) which will increment the
auto-allocated ids in the datastore beyond the key.id(). Can be None.
Raises:
InvalidConfig: when the config is invalid.
Returns:
Tuple, (importer class, exporter class), each which is in turn a wrapper
for the GenericImporter/GenericExporter class using a DictConvertor object
configured as per the transformer_spec.
"""
if transformer_spec.connector in CONNECTOR_FACTORIES:
connector_factory = CONNECTOR_FACTORIES[transformer_spec.connector]
elif config_globals and '.' in transformer_spec.connector:
try:
connector_factory = eval(transformer_spec.connector, config_globals)
except (__HOLE__, AttributeError):
raise bulkloader_errors.InvalidConfiguration(
'Invalid connector specified for name=%s. Could not evaluate %s.' %
(transformer_spec.name, transformer_spec.connector))
else:
raise bulkloader_errors.InvalidConfiguration(
'Invalid connector specified for name=%s. Must be either a built in '
'connector ("%s") or a factory method in a module imported via '
'python_preamble.' %
(transformer_spec.name, '", "'.join(CONNECTOR_FACTORIES)))
options = {}
if transformer_spec.connector_options:
options = transformer_spec.connector_options.ToDict()
try:
connector_object = connector_factory(options, transformer_spec.name)
except TypeError:
raise bulkloader_errors.InvalidConfiguration(
'Invalid connector specified for name=%s. Could not initialize %s.' %
(transformer_spec.name, transformer_spec.connector))
dict_to_model_object = DictConvertor(transformer_spec)
class ImporterClass(GenericImporter):
"""Class to pass to the bulkloader, wraps the specificed configuration."""
def __init__(self):
super(self.__class__, self).__init__(
connector_object.generate_import_record,
dict_to_model_object.dict_to_entity,
transformer_spec.name,
increment_id)
importer_class = ImporterClass
class ExporterClass(GenericExporter):
"""Class to pass to the bulkloader, wraps the specificed configuration."""
def __init__(self):
super(self.__class__, self).__init__(
connector_object,
dict_to_model_object.entity_to_dict,
transformer_spec.kind,
transformer_spec.sort_key_from_entity)
exporter_class = ExporterClass
return importer_class, exporter_class | NameError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/bulkload/bulkloader_config.py/create_transformer_classes |
This is an unofficial HuggingFace upload of the CuBERT ETH150 Open Benchmarks. This dataset was released along with Learning and Evaluating Contextual Embedding of Source Code.
Here we describe the 6 Python benchmarks we created. All 6 benchmarks were derived from ETH Py150 Open. All examples are stored as sharded text files. Each text line corresponds to a separate example encoded as a JSON object. For each dataset, we release separate training/validation/testing splits along the same boundaries that ETH Py150 Open splits its files to the corresponding splits. The fine-tuned models are the checkpoints of each model with the highest validation accuracy.
function
: string, the source code of a function as textdocstring
: string, the documentation string for that function. Note that the string is unquoted. To be able to properly tokenize it with the CuBERT tokenizers, you have to wrap it in quotes first. For example, in Python, use string_to_tokenize = f'"""{docstring}"""'
.label
: string, one of (“Incorrect”, “Correct”), the label of the example.info
: string, an unformatted description of how the example was constructed, including the source dataset (always “ETHPy150Open”), the repository and filepath, the function name and, for “Incorrect” examples, the function whose docstring was substituted.function
: string, the source code of a function as text, in which one exception type has been replaced with the special token “__HOLE__”label
: string, one of (ValueError
, KeyError
, AttributeError
, TypeError
, OSError
, IOError
, ImportError
, IndexError
, DoesNotExist
, KeyboardInterrupt
, StopIteration
, AssertionError
, SystemExit
, RuntimeError
, HTTPError
, UnicodeDecodeError
, NotImplementedError
, ValidationError
, ObjectDoesNotExist
, NameError
, None
), the masked exception type. Note that None
never occurs in the data and will be removed in a future release.info
: string, an unformatted description of how the example was constructed, including the source dataset (always “ETHPy150Open”), the repository and filepath, and the fully-qualified function name.function
: string, the source code of a function as text.label
: string, one of (“Correct”, “Variable misuse”) indicating if this is a buggy or bug-free example.info
: string, an unformatted description of how the example was constructed, including the source dataset (always “ETHPy150Open”), the repository and filepath, the function, and whether the example is bugfree (marked “original”) or the variable substitution that has occurred (e.g., “correct_variable” → “incorrect_variable”).function
: string, the source code of a function as text.label
: string, one of (“Correct”, “Swapped operands”) indicating if this is a buggy or bug-free example.info
: string, an unformatted description of how the example was constructed, including the source dataset (always “ETHPy150Open”), the repository and filepath, the function, and whether the example is bugfree (marked “original”) or the operand swap has occurred (e.g., “swapped operands of not in
”).function
: string, the source code of a function as text.label
: string, one of (“Correct”, “Wrong binary operator”) indicating if this is a buggy or bug-free example.info
: string, an unformatted description of how the example was constructed, including the source dataset (always “ETHPy150Open”), the repository and filepath, the function, and whether the example is bugfree (marked “original”) or the operator replacement has occurred (e.g., “==
-> !=
”).function
: a list of strings, the source code of a function, tokenized with the vocabulary from item b. Note that, unlike other task datasets, this dataset gives a tokenized function, rather than the code as a single string.target_mask
: a list of integers (0 or 1). If the integer at some position is 1, then the token at the corresponding position of the function token list is a correct repair for the introduced bug. If a variable has been split into multiple tokens, only the first subtoken is marked in this mask. If the example is bug-free, all integers are 0.error_location_mask
: a list of integers (0 or 1). If the integer at some position is 1, then there is a variable-misuse bug at the corresponding location of the tokenized function. In a bug-free example, the first integer is 1. There is exactly one integer set to 1 for all examples. If a variable has been split into multiple tokens, only the first subtoken is marked in this mask.candidate_mask
: a list of integers (0 or 1). If the integer at some position is 1, then the variable starting at that position in the tokenized function is a candidate to consider when repairing a bug. Candidates are all variables defined in the function parameters or via variable declarations in the function. If a variable has been split into multiple tokens, only the first subtoken is marked in this mask, for each candidate.provenance
: string, an unformatted description of how the example was constructed, including the source dataset (always “ETHPy150Open”), the repository and filepath, the function, and whether the example is bugfree (marked “original”) or the buggy/repair token positions and variables (e.g., “16/18 kwargs
→ self
”). 16 is the position of the introduced error, 18 is the location of the repair.@inproceedings{cubert,
author = {Aditya Kanade and
Petros Maniatis and
Gogul Balakrishnan and
Kensen Shi},
title = {Learning and evaluating contextual embedding of source code},
booktitle = {Proceedings of the 37th International Conference on Machine Learning,
{ICML} 2020, 12-18 July 2020},
series = {Proceedings of Machine Learning Research},
publisher = {{PMLR}},
year = {2020},
}