function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def generate_tokens(readline): """ The generate_tokens() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while 1: # loop over lines in stream try: line = readline() except __HOLE__: line = '' lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError, ("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError, ("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars or \ (initial == '.' and token != '.'): # ordinary number yield (NUMBER, token, spos, epos, line) elif initial in '\r\n': yield (parenlev > 0 and NL or NEWLINE, token, spos, epos, line) elif initial == '#': yield (COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield (NAME, token, spos, epos, line) elif initial == '\\': # continued stmt continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels yield (DEDENT, '', (lnum, 0), (lnum, 0), '') yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
StopIteration
dataset/ETHPy150Open babble/babble/include/jython/Lib/tokenize.py/generate_tokens
def mean_tl(tl, surfaces): try: tau_axis = tl.ndim - 1 except __HOLE__: tau_axis = 0 tau = 1.0 / (10.0**(tl/10.0)) return 10.0 * np.log10(1.0 / np.average(tau, tau_axis, surfaces))
AttributeError
dataset/ETHPy150Open python-acoustics/python-acoustics/acoustics/utils.py/mean_tl
def _cusp_solver(M, parameters): cache_key = lambda t, p: (t, p['ksp_type'], p['pc_type'], p['ksp_rtol'], p['ksp_atol'], p['ksp_max_it'], p['ksp_gmres_restart'], p['ksp_monitor']) module = _cusp_cache.get(cache_key(M.ctype, parameters)) if module: return module import codepy.toolchain from cgen import FunctionBody, FunctionDeclaration from cgen import Block, Statement, Include, Value from codepy.bpl import BoostPythonModule from codepy.cuda import CudaModule gcc_toolchain = codepy.toolchain.guess_toolchain() nvcc_toolchain = codepy.toolchain.guess_nvcc_toolchain() if 'CUSP_HOME' in os.environ: nvcc_toolchain.add_library('cusp', [os.environ['CUSP_HOME']], [], []) host_mod = BoostPythonModule() nvcc_mod = CudaModule(host_mod) nvcc_includes = ['thrust/device_vector.h', 'thrust/fill.h', 'cusp/csr_matrix.h', 'cusp/krylov/cg.h', 'cusp/krylov/bicgstab.h', 'cusp/krylov/gmres.h', 'cusp/precond/diagonal.h', 'cusp/precond/smoothed_aggregation.h', 'cusp/precond/ainv.h', 'string'] nvcc_mod.add_to_preamble([Include(s) for s in nvcc_includes]) nvcc_mod.add_to_preamble([Statement('using namespace std')]) # We're translating PETSc preconditioner types to CUSP diag = Statement('cusp::precond::diagonal< ValueType, cusp::device_memory >M(A)') ainv = Statement( 'cusp::precond::scaled_bridson_ainv< ValueType, cusp::device_memory >M(A)') amg = Statement( 'cusp::precond::smoothed_aggregation< IndexType, ValueType, cusp::device_memory >M(A)') none = Statement( 'cusp::identity_operator< ValueType, cusp::device_memory >M(nrows, ncols)') preconditioners = { 'diagonal': diag, 'jacobi': diag, 'ainv': ainv, 'ainvcusp': ainv, 'amg': amg, 'hypre': amg, 'none': none, None: none } try: precond_call = preconditioners[parameters['pc_type']] except __HOLE__: raise RuntimeError("Cusp does not support preconditioner type %s" % parameters['pc_type']) solvers = { 'cg': Statement('cusp::krylov::cg(A, x, b, monitor, M)'), 'bicgstab': Statement('cusp::krylov::bicgstab(A, x, b, monitor, M)'), 'gmres': Statement('cusp::krylov::gmres(A, x, b, %(ksp_gmres_restart)d, monitor, M)' % parameters) } try: solve_call = solvers[parameters['ksp_type']] except KeyError: raise RuntimeError("Cusp does not support solver type %s" % parameters['ksp_type']) monitor = 'monitor(b, %(ksp_max_it)d, %(ksp_rtol)g, %(ksp_atol)g)' % parameters nvcc_function = FunctionBody( FunctionDeclaration(Value('void', '__cusp_solve'), [Value('CUdeviceptr', '_rowptr'), Value('CUdeviceptr', '_colidx'), Value('CUdeviceptr', '_csrdata'), Value('CUdeviceptr', '_b'), Value('CUdeviceptr', '_x'), Value('int', 'nrows'), Value('int', 'ncols'), Value('int', 'nnz')]), Block([ Statement('typedef int IndexType'), Statement('typedef %s ValueType' % M.ctype), Statement( 'typedef typename cusp::array1d_view< thrust::device_ptr<IndexType> > indices'), Statement( 'typedef typename cusp::array1d_view< thrust::device_ptr<ValueType> > values'), Statement( 'typedef cusp::csr_matrix_view< indices, indices, values, IndexType, ValueType, cusp::device_memory > matrix'), Statement('thrust::device_ptr< IndexType > rowptr((IndexType *)_rowptr)'), Statement('thrust::device_ptr< IndexType > colidx((IndexType *)_colidx)'), Statement('thrust::device_ptr< ValueType > csrdata((ValueType *)_csrdata)'), Statement('thrust::device_ptr< ValueType > d_b((ValueType *)_b)'), Statement('thrust::device_ptr< ValueType > d_x((ValueType *)_x)'), Statement('indices row_offsets(rowptr, rowptr + nrows + 1)'), Statement('indices column_indices(colidx, colidx + nnz)'), Statement('values matrix_values(csrdata, csrdata + nnz)'), Statement('values b(d_b, d_b + nrows)'), Statement('values x(d_x, d_x + ncols)'), Statement('thrust::fill(x.begin(), x.end(), (ValueType)0)'), Statement( 'matrix A(nrows, ncols, nnz, row_offsets, column_indices, matrix_values)'), Statement('cusp::%s_monitor< ValueType > %s' % ('verbose' if parameters['ksp_monitor'] else 'default', monitor)), precond_call, solve_call ])) host_mod.add_to_preamble([Include('boost/python/extract.hpp'), Include('string')]) host_mod.add_to_preamble([Statement('using namespace boost::python')]) host_mod.add_to_preamble([Statement('using namespace std')]) nvcc_mod.add_function(nvcc_function) host_mod.add_function( FunctionBody( FunctionDeclaration(Value('void', 'solve'), [Value('object', '_rowptr'), Value('object', '_colidx'), Value('object', '_csrdata'), Value('object', '_b'), Value('object', '_x'), Value('object', '_nrows'), Value('object', '_ncols'), Value('object', '_nnz')]), Block([ Statement( 'CUdeviceptr rowptr = extract<CUdeviceptr>(_rowptr.attr("gpudata"))'), Statement( 'CUdeviceptr colidx = extract<CUdeviceptr>(_colidx.attr("gpudata"))'), Statement( 'CUdeviceptr csrdata = extract<CUdeviceptr>(_csrdata.attr("gpudata"))'), Statement('CUdeviceptr b = extract<CUdeviceptr>(_b.attr("gpudata"))'), Statement('CUdeviceptr x = extract<CUdeviceptr>(_x.attr("gpudata"))'), Statement('int nrows = extract<int>(_nrows)'), Statement('int ncols = extract<int>(_ncols)'), Statement('int nnz = extract<int>(_nnz)'), Statement('__cusp_solve(rowptr, colidx, csrdata, b, x, nrows, ncols, nnz)') ]))) nvcc_toolchain.cflags.append('-arch') nvcc_toolchain.cflags.append('sm_20') nvcc_toolchain.cflags.append('-O3') module = nvcc_mod.compile(gcc_toolchain, nvcc_toolchain, debug=configuration["debug"]) _cusp_cache[cache_key(M.ctype, parameters)] = module return module # FIXME: inherit from base while device gives us the PETSc solver
KeyError
dataset/ETHPy150Open OP2/PyOP2/pyop2/cuda.py/_cusp_solver
def get(self, entity_id=None): """HTML GET handler. Check the query parameters for the ID of the monster to be displayed. If found, disply that monster using the standard template.""" template_values = self.build_template_values() if entity_id: try: monster = Monster.get_by_id_safe(int(entity_id), template_values[handlers.base.PROFILE_KEY]) except __HOLE__: return self.not_found() if not monster: self.not_found() return template_values['monster'] = monster else: self.redirect("/view/"+str(Monster.all().order("-creation_time").get().key().id())) return if handlers.base.PROFILE_KEY in template_values: template_values['vote'] = Vote.all().filter("monster = ", template_values['monster']).filter("voter = ", template_values[handlers.base.PROFILE_KEY]).get() template_values['edit_url'] = self.uri_for('monster.edit', entity_id=r'%s') template_values['delete_url'] = self.uri_for('monster.delete', entity_id=entity_id) template_values['profile_url'] = self.uri_for('profile', profile_id=monster.creator.key().id()) template = configuration.site.jinja_environment.get_template('monster/view.html') self.response.write(template.render(template_values))
ValueError
dataset/ETHPy150Open Sagelt/dungeon-world-codex/handlers/monster.py/ViewHandler.get
def DropPrivileges(): """Attempt to drop privileges if required.""" if config_lib.CONFIG["Server.username"]: try: os.setuid(pwd.getpwnam(config_lib.CONFIG["Server.username"]).pw_uid) except (KeyError, __HOLE__): logging.exception("Unable to switch to user %s", config_lib.CONFIG["Server.username"]) raise
OSError
dataset/ETHPy150Open google/grr/grr/lib/startup.py/DropPrivileges
def resend_confirmation(): """View for resending an email confirmation email. """ form = ResendConfirmationForm(request.form) if request.method == 'POST': if form.validate(): clean_email = form.email.data user = get_user(email=clean_email) if not user: return {'form': form} try: send_confirm_email(user, clean_email) except __HOLE__: # already confirmed, redirect to dashboard status_message = 'Email has already been confirmed.' kind = 'warning' else: status_message = 'Resent email to {0}'.format(clean_email) kind = 'success' status.push_status_message(status_message, kind=kind, trust=False) else: forms.push_errors_to_status(form.errors) # Don't go anywhere return {'form': form} # TODO: shrink me
KeyError
dataset/ETHPy150Open CenterForOpenScience/osf.io/framework/auth/views.py/resend_confirmation
@staticmethod def _new_ingress_rule(ip_protocol, from_port, to_port, group_id=None, cidr=None): values = {} if group_id: values['group_id'] = group_id # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 elif cidr: values['cidr'] = cidr if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except __HOLE__: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason=_("Type and" " Code must be integers for ICMP protocol type")) else: raise exception.InvalidInput(reason=_("To and From ports " "must be integers")) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid %s ports should" " be between 1-65535" % ip_protocol.upper()) # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if cidr: return None return values
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/network/security_group/security_group_base.py/SecurityGroupBase._new_ingress_rule
def load_tests(loader, tests, pattern): """Specify which test cases to run.""" del pattern suite = unittest.TestSuite() # add all TestCase classes from this (current) module for attr in globals().values(): try: if not issubclass(attr, unittest.TestCase): continue # not subclass of TestCase except __HOLE__: continue # not a class tests = loader.loadTestsFromTestCase(attr) suite.addTests(tests) # add doctests defined in DOCTEST_MODULES for doctest_module in DOCTEST_MODULES: suite.addTest(doctest.DocTestSuite(doctest_module)) return suite
TypeError
dataset/ETHPy150Open django-ddp/django-ddp/tests/django_todos/tests.py/load_tests
def _run(nodes, get_command, env, progress_status): # nodes: dictionary of node names mapped to node objects # Node objects can be anything. They're just passed to the get_command function # get_command: function that takes a node object and returns a command to execute via Popen # env: name of the environment # progress_status: the status to broadcast to the websockets when the command is executing env_greenlets = greenlets.get(env) if env_greenlets is None: greenlets[env] = env_greenlets = { } for node in nodes: try: del env_greenlets[node] except KeyError: pass for hostname in nodes: node_object = nodes[hostname] p = subprocess.Popen(get_command(node_object), shell = False, stdout = subprocess.PIPE, stderr = subprocess.PIPE) p.chunks = [] # Chunks of stdout data fcntl.fcntl(p.stdout, fcntl.F_SETFL, os.O_NONBLOCK) # make the file nonblocking def read(host, process): broadcast(env, { 'host': host, 'status': progress_status }) while True: chunk = None try: chunk = process.stdout.read(4096) if not chunk: break except __HOLE__, e: chunk = None if e[0] != errno.EAGAIN: raise sys.exc_clear() if chunk: process.chunks.append(chunk) broadcast(env, { 'host': host, 'data': chunk, }) gevent.socket.wait_read(process.stdout.fileno()) process.stdout.close() process.wait() errors = process.stderr.read() process.chunks.append(errors) broadcast(env, { 'host': host, 'status': 'ready' if process.returncode == 0 else 'error', 'data': errors }) if len(processes(env, only_executing = True)) <= 1: broadcast(env, { 'status': 'ready' }) return process.returncode greenlet = gevent.spawn(read, host = hostname, process = p) greenlet.process = p env_greenlets[hostname] = greenlet broadcast(env, { 'status': progress_status }) return ujson.encode({ 'status': progress_status if len(nodes) > 0 else 'ready' })
IOError
dataset/ETHPy150Open sidebolt/chefdash/chefdash/__init__.py/_run
def memoize_lookuptable(func, *args, **kwargs): """A decorator that memoizes the results of a decorated function. Instead of checking if key is in keys, it attempts to access the property directly, like a typical lookup table.""" funcname = func.__name__ @wraps(func) def _inner(*args, **kwargs): key = (funcname, args) try: key = (funcname, args) return memoized[key] except __HOLE__: res = func(*args, **kwargs) memoized[key] = res return res return _inner
KeyError
dataset/ETHPy150Open christabor/MoAL/MOAL/maths/applied/optimization/memoization.py/memoize_lookuptable
def test_missing_stats_file(self): stats_file = settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'] if os.path.exists(stats_file): os.remove(stats_file) try: get_loader(DEFAULT_CONFIG).get_assets() except __HOLE__ as e: expected = ( 'Error reading {0}. Are you sure webpack has generated the ' 'file and the path is correct?' ).format(stats_file) self.assertIn(expected, str(e))
IOError
dataset/ETHPy150Open owais/django-webpack-loader/tests/app/tests/test_webpack.py/LoaderTestCase.test_missing_stats_file
def check_rows(prediction_rows, test_rows): for row in prediction_rows: check_row = next(test_rows) assert len(check_row) == len (row) for index in range(len(row)): dot = row[index].find(".") if dot > 0: try: decs = min(len(row[index]), len(check_row[index])) - dot - 1 row[index] = round(float(row[index]), decs) check_row[index] = round(float(check_row[index]), decs) except __HOLE__: pass assert check_row[index] == row[index], ("Got: %s/ Expected: %s" % (row, check_row)) #@step(r'the batch prediction file is like "(.*)"')
ValueError
dataset/ETHPy150Open bigmlcom/python/bigml/tests/create_batch_prediction_steps.py/check_rows
def module_has_submodule(package, module_name): """See if 'module' is in 'package'.""" name = ".".join([package.__name__, module_name]) try: # None indicates a cached miss; see mark_miss() in Python/import.c. return sys.modules[name] is not None except KeyError: pass try: package_path = package.__path__ # No __path__, then not a package. except AttributeError: # Since the remainder of this function assumes that we're dealing with # a package (module with a __path__), so if it's not, then bail here. return False for finder in sys.meta_path: if finder.find_module(name, package_path): return True for entry in package_path: try: # Try the cached finder. finder = sys.path_importer_cache[entry] if finder is None: # Implicit import machinery should be used. try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except __HOLE__: continue # Else see if the finder knows of a loader. elif finder.find_module(name): return True else: continue except KeyError: # No cached finder, so try and make one. for hook in sys.path_hooks: try: finder = hook(entry) # XXX Could cache in sys.path_importer_cache if finder.find_module(name): return True else: # Once a finder is found, stop the search. break except ImportError: # Continue the search for a finder. continue else: # No finder found. # Try the implicit import machinery if searching a directory. if os.path.isdir(entry): try: file_, _, _ = imp.find_module(module_name, [entry]) if file_: file_.close() return True except ImportError: pass # XXX Could insert None or NullImporter else: # Exhausted the search, so the module cannot be found. return False
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/utils/module_loading.py/module_has_submodule
def git_version(): cmd = ['git', 'describe', '--abbrev=4'] try: proc = sp.Popen(cmd, stdout=sp.PIPE) stdout = proc.communicate()[0].rstrip('\n') except __HOLE__: sys.stderr.write('git not found: leaving __version__ alone\n') return __version__ if proc.returncode != 0: sys.stderr.write('git describe failed: leaving __version__ alone\n') return __version__ ver = stdout.lstrip('mkvtomp4-v') write_version(ver) try: proc = sp.Popen(['git', 'update-index', '-q', '--refresh']) proc.communicate() except OSError: return ver if proc.returncode != 0: return ver try: proc = sp.Popen(['git', 'diff-index', '--name-only', 'HEAD', '--'], stdout=sp.PIPE) stdout = proc.communicate()[0] except OSError: sys.stderr.write('git diff-index failed\n') if stdout.strip('\n'): ver = ver + '-dirty' write_version(ver) return ver
OSError
dataset/ETHPy150Open gavinbeatty/mkvtomp4/setup.py/git_version
def delete_digram(self): """Removes the digram from the hash table.""" try: if digrams[self.digram()] == self: digrams.pop(self.digram()) except __HOLE__: pass
KeyError
dataset/ETHPy150Open ResilientScience/wot/wot/sequitur.py/Symbol.delete_digram
def __init__(self, domain, xmlns, app_id=None): self.domain = domain self.xmlns = xmlns if app_id: self.app_id = app_id else: form = get_form_analytics_metadata(domain, app_id, xmlns) try: self.app_id = form['app']['id'] if form else None except __HOLE__: self.app_id = None
KeyError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/reports/display.py/_FormType.__init__
def getPostgreSQLRootPath(): try: postgreSQLRootPath = os.environ['ops_postgresqlInstallDIR'] return postgreSQLRootPath except __HOLE__, e: return None
KeyError
dataset/ETHPy150Open Esri/ops-server-config/SupportFiles/OpsServerConfig.py/getPostgreSQLRootPath
def __new__(meta, name, bases, dct): actions = dct.pop('ACTIONS', []) for on_action, actioned, disconnect in actions: def on_f(self, f, args=None, kw=None): args = args if args is not None else tuple() kw = kw if kw is not None else dict() cid = self.cid self._callbacks[on_action][cid] = (f, args, kw) self.cid += 1 return cid on_f.__doc__ = on_f_docstring.format(actioned=actioned) on_f.__name__ = on_action dct[on_action] = on_f def fed(self): for f, args, kw in self._callbacks[on_action].values(): f(*args, **kw) fed.__doc__ = fed_docstring.format(on_action=on_action) fed.__name__ = actioned dct[actioned] = fed def disf(self, cid): try: del self._callbacks[on_action][cid] except __HOLE__: pass disf.__doc__ = disf_docstring.format(on_action=on_action) disf.__name__ = disconnect dct[disconnect] = disf return super(ActionableMeta, meta).__new__(meta, name, bases, dct)
KeyError
dataset/ETHPy150Open mrterry/yoink/yoink/has_actions.py/ActionableMeta.__new__
def get_credential(section='default'): if os.environ.get('TRAVIS_TEST'): username = os.environ.get('TEST_USER_USERNAME') password = os.environ.get('TEST_USER_PASSWORD') if username is None or password is None: msg = 'No credentials environment variables found.' raise ConfigError(msg) elif CREDENTIALS is not None: CONFIG.read(CREDENTIALS) if CONFIG.has_section(section): items = dict(CONFIG.items(section)) try: username = items['username'] password = items['password'] except __HOLE__ as e: msg = 'Key "%s" not found in credentials file.' % e.args[0] raise ConfigError(msg) else: msg = 'No section named "%s" found in credentials file.' % section raise ConfigError(msg) else: msg = 'No credentials file found.' raise ConfigError(msg) return {'username': username, 'password': password}
KeyError
dataset/ETHPy150Open shichao-an/115wangpan/u115/conf.py/get_credential
def test_import_lock_fork(self): import_started = threading.Event() fake_module_name = "fake test module" partial_module = "partial" complete_module = "complete" def importer(): imp.acquire_lock() sys.modules[fake_module_name] = partial_module import_started.set() time.sleep(0.01) # Give the other thread time to try and acquire. sys.modules[fake_module_name] = complete_module imp.release_lock() t = threading.Thread(target=importer) t.start() import_started.wait() pid = os.fork() try: if not pid: m = __import__(fake_module_name) if m == complete_module: os._exit(0) else: os._exit(1) else: t.join() # Exitcode 1 means the child got a partial module (bad.) No # exitcode (but a hang, which manifests as 'got pid 0') # means the child deadlocked (also bad.) self.wait_impl(pid) finally: try: os.kill(pid, signal.SIGKILL) except __HOLE__: pass
OSError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_fork1.py/ForkTest.test_import_lock_fork
def format_unencoded(self, tokensource, outfile): # TODO: add support for background colors t2n = self.ttype2name cp = self.commandprefix if self.full: realoutfile = outfile outfile = StringIO() outfile.write(r'\begin{Verbatim}[commandchars=\\\{\}') if self.linenos: start, step = self.linenostart, self.linenostep outfile.write(',numbers=left' + (start and ',firstnumber=%d' % start or '') + (step and ',stepnumber=%d' % step or '')) if self.mathescape or self.texcomments: outfile.write(r',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}') if self.verboptions: outfile.write(',' + self.verboptions) outfile.write(']\n') for ttype, value in tokensource: if ttype in Token.Comment: if self.texcomments: # Try to guess comment starting lexeme and escape it ... start = value[0:1] for i in xrange(1, len(value)): if start[0] != value[i]: break start += value[i] value = value[len(start):] start = escape_tex(start, self.commandprefix) # ... but do not escape inside comment. value = start + value elif self.mathescape: # Only escape parts not inside a math environment. parts = value.split('$') in_math = False for i, part in enumerate(parts): if not in_math: parts[i] = escape_tex(part, self.commandprefix) in_math = not in_math value = '$'.join(parts) else: value = escape_tex(value, self.commandprefix) else: value = escape_tex(value, self.commandprefix) styles = [] while ttype is not Token: try: styles.append(t2n[ttype]) except __HOLE__: # not in current style styles.append(_get_ttype_name(ttype)) ttype = ttype.parent styleval = '+'.join(reversed(styles)) if styleval: spl = value.split('\n') for line in spl[:-1]: if line: outfile.write("\\%s{%s}{%s}" % (cp, styleval, line)) outfile.write('\n') if spl[-1]: outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1])) else: outfile.write(value) outfile.write('\\end{Verbatim}\n') if self.full: realoutfile.write(DOC_TEMPLATE % dict(docclass = self.docclass, preamble = self.preamble, title = self.title, encoding = self.encoding or 'latin1', styledefs = self.get_style_defs(), code = outfile.getvalue()))
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/latex.py/LatexFormatter.format_unencoded
def testCatchStringException(self): # str exceptions were removed since 2.6 if sys.version_info >= (2, 6): return try: raise "test" except "test": return except __HOLE__, e: self.fail(e) self.fail('"test" was not caught or raised')
TypeError
dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ExceptionTest.py/ExceptionTest.testCatchStringException
def testSyntax(self): try: pass except KeyError, e: pass except (TypeError, LookupError), e: pass except: pass finally: pass try: a = 1 except: a = 2 else: a = 3 finally: self.assertEqual(a, 3) a = 4 self.assertEqual(a, 4) try: a = 11 raise KeyError('test') except: a = 12 else: a = 13 finally: self.assertEqual(a, 12) a = 14 self.assertEqual(a, 14) try: a = 1 finally: a = 2 self.assertEqual(a, 2) try: a = 1 try: b = 1 except: b = 2 else: b = 3 finally: self.assertEqual(b, 3) b = 4 except: a = 2 else: a = 3 finally: self.assertEqual(a, 3) a = 4 self.assertEqual(a, 4) self.assertEqual(b, 4) sys.exc_clear() try: raise self.fail("No error raised on 'raise' after 'sys.exc_clear()'") except TypeError, e: # use message which works for both Python 2.5 and 2.6 self.assertTrue(e.args[0].startswith('exceptions must be'), e.args[0]) except: e = sys.exc_info() self.fail('TypeError expected, got %s' % e[0]) try: raise KeyError, 'test' self.fail('No error raised') except KeyError, e: self.assertEqual(e.args[0], 'test') except: e = sys.exc_info() self.fail('KeyError expected, got %s' % e[0]) e = e[1] try: raise except: err = sys.exc_info() self.assertEqual(e.args[0], err[1].args[0]) raise_errors = [KeyError('KeyError'), TypeError('TypeError'), AttributeError('AttributeError'), LookupError('LookupError')] raised_errors = [] for err in raise_errors: try: raise err self.fail("Failed to raise exception") except (KeyError, TypeError), e1: raised_errors.append(e1) if isinstance(e1, KeyError): self.assertEqual(e1.args[0], 'KeyError') elif isinstance(e1, TypeError): self.assertEqual(e1.args[0], 'TypeError') else: self.fail('neither KeyError nor TypeError in except (KeyError, TypeError)') except __HOLE__, e2: raised_errors.append(e2) self.assertEqual(e2.args[0], 'AttributeError') except: e3 = sys.exc_info()[1] raised_errors.append(e3) self.assertEqual(e3.args[0], 'LookupError') self.assertEqual(len(raised_errors), len(raise_errors)) try: try: raise TypeError('TypeError') except KeyError, e: self.fail("Got KeyError") self.fail("TypeError should not be ignored") except TypeError, e: self.assertEqual(e.args[0], 'TypeError')
AttributeError
dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ExceptionTest.py/ExceptionTest.testSyntax
def testAssertionError(self): try: assert True self.assertTrue(True) except AssertionError, e: self.fail("Got an unexpected assertion error: %r" % e) try: assert False self.fail("AssertionError expected") except AssertionError, e: self.assertTrue(True) try: assert False, 'reason' self.fail("AssertionError expected") except __HOLE__, e: self.assertEqual(e.args[0], 'reason')
AssertionError
dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ExceptionTest.py/ExceptionTest.testAssertionError
def ConfigureLogging(debug_level=logging.INFO, show_level=True, stderr=True, syslog=True, facility=None): """Sets up logging defaults for the root logger. LaunchDaemons should use syslog and disable stderr (or send it to /dev/null in the launchd plist). This ensures that multi-line logs (such as those from logging.exception) are not split and we don't get dups. Other programs should use both stderr and syslog (the default). Possible syslog facility names come from logging.handlers.SysLogHandler.facility_names.keys(): ['ftp', 'daemon', 'uucp', 'security', 'local7', 'local4', 'lpr', 'auth', 'local0', 'cron', 'syslog', 'user', 'mail', 'local5', 'kern', 'news', 'local6', 'local1', 'authpriv', 'local3', 'local2'] Args: debug_level: python logging level show_level: show the logging level in the message for stderr stderr: If true, log to stderr syslog: If true log to syslog facility: string, syslog facility to use Raises: LogConfigurationError: if no handers are set """ if not stderr and not syslog: raise LogConfigurationError('Neither syslog nor stdout handlers set.') if facility and not syslog: raise LogConfigurationError('facility can only be used with syslog.') logger = logging.getLogger() # Clear any existing handlers logger.handlers = [] logger.setLevel(debug_level) if syslog: # Get the default syslog facility facility_id = logging.handlers.SysLogHandler.LOG_USER if facility: try: facility_id = logging.handlers.SysLogHandler.facility_names[facility] except __HOLE__: logging.error('%s is an invalid facility, using default.', facility) try: syslog_handler = MultilineSysLogHandler(facility=facility_id) _ConfigureHandler(syslog_handler, logger, LOG_FORMAT_SYSLOG, debug_level) except socket.error: print >>sys.stderr, 'Warning: Could not configure syslog based logging.' stderr = True if stderr: stderr_handler = logging.StreamHandler() if show_level: _ConfigureHandler(stderr_handler, logger, LOG_FORMAT_STDERR_LEVEL, debug_level) else: _ConfigureHandler(stderr_handler, logger, LOG_FORMAT_STDERR, debug_level) logging.debug('Logging enabled at level %s', debug_level)
KeyError
dataset/ETHPy150Open google/macops/gmacpyutil/gmacpyutil/gmacpyutil.py/ConfigureLogging
def _RunProcess(cmd, stdinput=None, env=None, cwd=None, sudo=False, sudo_password=None, background=False, stream_output=False, timeout=0, waitfor=0): """Executes cmd using suprocess. Args: cmd: An array of strings as the command to run stdinput: An optional sting as stdin env: An optional dictionary as the environment cwd: An optional string as the current working directory sudo: An optional boolean on whether to do the command via sudo sudo_password: An optional string of the password to use for sudo background: Launch command in background mode stream_output: An optional boolean on whether to send output to the screen timeout: An optional int or float; if >0, Exec() will stop waiting for output after timeout seconds and kill the process it started. Return code might be undefined, or -SIGTERM, use waitfor to make sure to obtain it. values <1 will be crudely rounded off because of select() sleep time. waitfor: An optional int or float, if >0, Exec() will wait waitfor seconds before asking for the process exit status one more time. Returns: Tuple: two strings and an integer: (stdout, stderr, returncode); stdout/stderr may also be None. If the process is set to launch in background mode, a tuple of (<subprocess.Popen object>, None, None) is returned, in order to be able to read from its pipes *and* use poll() to check when it is finished Raises: GmacpyutilException: If both stdinput and sudo_password are specified GmacpyutilException: If both sudo and background are specified GmacpyutilException: If both timeout and background, stream_output, sudo, or sudo_password, or stdinput are specified GmacpyutilException: If timeout is less than 0 GmacpyutilException: If subprocess raises an OSError """ if timeout and (background or stream_output or sudo or sudo_password or stdinput): raise GmacpyutilException('timeout is not compatible with background, ' 'stream_output, sudo, sudo_password, or ' 'stdinput.') if waitfor and not timeout: raise GmacpyutilException('waitfor only valid with timeout.') if timeout < 0: raise GmacpyutilException('timeout must be greater than 0.') if stream_output: stdoutput = None stderror = None else: stdoutput = subprocess.PIPE stderror = subprocess.PIPE if sudo and not background: sudo_cmd = ['sudo'] if sudo_password and not stdinput: # Set sudo to get password from stdin sudo_cmd.extend(['-S']) stdinput = sudo_password + '\n' elif sudo_password and stdinput: raise GmacpyutilException('stdinput and sudo_password are mutually ' 'exclusive') else: sudo_cmd.extend(['-p', "%u's password is required for admin access: "]) sudo_cmd.extend(cmd) cmd = sudo_cmd elif sudo and background: raise GmacpyutilException('sudo is not compatible with background.') environment = os.environ.copy() if env is not None: environment.update(env) try: task = subprocess.Popen(cmd, stdout=stdoutput, stderr=stderror, stdin=subprocess.PIPE, env=environment, cwd=cwd) except __HOLE__, e: raise GmacpyutilException('Could not execute: %s' % e.strerror) if timeout == 0: # communicate() will wait until the process is finished, so if we are in # background mode, just send the input and take the pipe objects as output. if not background: (stdout, stderr) = task.communicate(input=stdinput) return (stdout, stderr, task.returncode) else: if stdinput: task.stdin.write(stdinput) return task else: # TODO(user): See if it's possible to pass stdinput when using a timeout inactive = 0 stdoutput = [] stderror = [] SetFileNonBlocking(task.stdout) SetFileNonBlocking(task.stderr) returncode = None while returncode is None: rlist, _, _ = select.select([task.stdout, task.stderr], [], [], 1.0) if not rlist: inactive += 1 if inactive >= timeout: logging.error('cmd has timed out: %s', cmd) logging.error('Sending SIGTERM to PID=%s', task.pid) os.kill(task.pid, signal.SIGTERM) break # note: this is a hard timeout, we don't read() again else: inactive = 0 for fd in rlist: if fd is task.stdout: stdoutput.append(fd.read()) elif fd is task.stderr: stderror.append(fd.read()) returncode = task.poll() # if the process was just killed, wait for waitfor seconds. if inactive >= timeout and waitfor > 0: time.sleep(waitfor) # attempt to obtain returncode one last chance returncode = task.poll() stdoutput = ''.join(stdoutput) stderror = ''.join(stderror) return (stdoutput, stderror, task.returncode)
OSError
dataset/ETHPy150Open google/macops/gmacpyutil/gmacpyutil/gmacpyutil.py/_RunProcess
def GetAirportInfo(include_nearby_networks=False): """Returns information about current AirPort connection. Args: include_nearby_networks: bool, if True a nearby_networks key will be in the returned dict with a list of detected SSIDs nearby. Returns: dict: key value pairs from CWInterface data. If an error occurs or there is no Wi-Fi interface: the dict will be empty. """ airport_info = {} try: objc.loadBundle('CoreWLAN', globals(), bundle_path='/System/Library/Frameworks/CoreWLAN.framework') except ImportError: logging.error('Could not load CoreWLAN framework.') return airport_info cw_interface_state = {0: u'Inactive', 1: u'Scanning', 2: u'Authenticating', 3: u'Associating', 4: u'Running'} cw_security = {-1: u'Unknown', 0: u'None', 1: u'WEP', 2: u'WPA Personal', 3: u'WPA Personal Mixed', 4: u'WPA2 Personal', 6: u'Dynamic WEP', 7: u'WPA Enterprise', 8: u'WPA Enterprise Mixed', 9: u'WPA2 Enterprise'} cw_phy_mode = {0: u'None', 1: u'802.11a', 2: u'802.11b', 3: u'802.11g', 4: u'802.11n'} cw_channel_band = {0: u'Unknown', 1: u'2 GHz', 2: u'5 GHz'} iface = CWInterface.interface() # pylint: disable=undefined-variable if not iface: return airport_info airport_info['name'] = iface.interfaceName() airport_info['hw_address'] = iface.hardwareAddress() airport_info['service_active'] = bool(iface.serviceActive()) airport_info['country_code'] = iface.countryCode() airport_info['power'] = bool(iface.powerOn()) airport_info['SSID'] = iface.ssid() airport_info['BSSID'] = iface.bssid() airport_info['noise_measurement'] = iface.noiseMeasurement() airport_info['phy_mode'] = iface.activePHYMode() airport_info['phy_mode_name'] = cw_phy_mode[iface.activePHYMode()] airport_info['rssi'] = iface.rssiValue() airport_info['state'] = iface.interfaceState() airport_info['state_name'] = cw_interface_state[iface.interfaceState()] airport_info['transmit_power'] = iface.transmitPower() airport_info['transmit_rate'] = iface.transmitRate() # Get channel information cw_channel = iface.wlanChannel() if cw_channel: airport_info['channel_number'] = cw_channel.channelNumber() airport_info['channel_band'] = cw_channel_band[cw_channel.channelBand()] # Get security information # If the security setting is unknown iface.security() returns NSIntegerMax # which is a very large number and annoying to test for in calling scripts. # Change any value larger than 100 (the enum currently ends at 10) to -1. security = iface.security() if security > 100: security = -1 airport_info['security'] = security airport_info['security_name'] = cw_security[security] # Get nearby network information, if requested if include_nearby_networks: nearby_networks = [] try: for nw in iface.scanForNetworksWithName_error_(None, None): ssid = nw.ssid() if ssid not in nearby_networks: nearby_networks.append(ssid) except __HOLE__: pass airport_info['nearby_networks'] = nearby_networks return airport_info
TypeError
dataset/ETHPy150Open google/macops/gmacpyutil/gmacpyutil/gmacpyutil.py/GetAirportInfo
def ReleasePowerAssertion(io_lib, assertion_id): """Releases a power assertion. Assertions are released with IOPMAssertionRelease, however if they are not, assertions are automatically released when the process exits, dies or crashes, i.e. a crashed process will not prevent idle sleep indefinitely. Args: io_lib: IOKit library from ConfigureIOKit() assertion_id: c_uint, assertion identification number from CreatePowerAssertion() Returns: 0 if successful, stderr otherwise. """ try: return io_lib.IOPMAssertionRelease(assertion_id) except __HOLE__: return 'IOKit library returned an error.'
AttributeError
dataset/ETHPy150Open google/macops/gmacpyutil/gmacpyutil/gmacpyutil.py/ReleasePowerAssertion
def IsTextConsole(): """Checks if console is test only or GUI. Returns: True if the console is text-only, False if GUI is available """ try: # see TN2083 security_lib = ctypes.cdll.LoadLibrary( '/System/Library/Frameworks/Security.framework/Security') # Security.Framework/Headers/AuthSession.h session = -1 session_id = ctypes.c_int(0) attributes = ctypes.c_int(0) ret = security_lib.SessionGetInfo( session, ctypes.byref(session_id), ctypes.byref(attributes)) if ret != 0: return True return not attributes.value & SESSIONHASGRAPHICACCESS except __HOLE__: return True
OSError
dataset/ETHPy150Open google/macops/gmacpyutil/gmacpyutil/gmacpyutil.py/IsTextConsole
def luhn(candidate): """ Checks a candidate number for validity according to the Luhn algorithm (used in validation of, for example, credit cards). Both numeric and string candidates are accepted. """ if not isinstance(candidate, basestring): candidate = str(candidate) try: evens = sum([int(c) for c in candidate[-1::-2]]) odds = sum([LUHN_ODD_LOOKUP[int(c)] for c in candidate[-2::-2]]) return ((evens + odds) % 10 == 0) except __HOLE__: # Raised if an int conversion fails return False
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/_internal/django/utils/checksums.py/luhn
def parse(self): _config = self.read_config(self.config) for src, path, configsection, key, value in _config: if ':' in configsection: sectiongroupname, sectionname = configsection.split(':') else: sectiongroupname, sectionname = 'global', configsection if sectiongroupname == 'global' and sectionname == 'global' and key == 'extends': continue sectiongroup = self.setdefault(sectiongroupname, ConfigSection()) self.get_section(sectiongroupname, sectionname) if key is not None: if key == 'massagers': for spec in value.splitlines(): spec = spec.strip() if not spec: continue if '=' not in spec: log.error("Invalid massager spec '%s' in section '%s:%s'.", spec, sectiongroupname, sectionname) sys.exit(1) massager_key, massager = spec.split('=') massager_key = massager_key.strip() massager = massager.strip() if ':' in massager_key: parts = tuple(x.strip() for x in massager_key.split(':')) if len(parts) == 2: massager_sectiongroupname, massager_key = parts massager_sectionname = None elif len(parts) == 3: massager_sectiongroupname, massager_sectionname, massager_key = parts else: log.error("Invalid massager spec '%s' in section '%s:%s'.", spec, sectiongroupname, sectionname) sys.exit(1) if massager_sectiongroupname == '': massager_sectiongroupname = sectiongroupname if massager_sectiongroupname == '*': massager_sectiongroupname = None if massager_sectionname == '': massager_sectionname = sectionname else: massager_sectiongroupname = sectiongroupname massager_sectionname = sectionname try: massager = resolve_dotted_name(massager) except ImportError as e: log.error("Can't import massager from '%s'.\n%s", massager, unicode(e)) sys.exit(1) except __HOLE__ as e: log.error("Can't import massager from '%s'.\n%s", massager, unicode(e)) sys.exit(1) massager = massager(massager_sectiongroupname, massager_key) if massager_sectionname is None: self.add_massager(massager) else: massager_section = self.get_section( sectiongroupname, massager_sectionname) massager_section.add_massager(massager) else: sectiongroup[sectionname][key] = ConfigValue(path, value, src=src) if 'plugin' in self: # pragma: no cover warnings.warn("The 'plugin' section isn't used anymore.") del self['plugin'] seen = set() for sectiongroupname in self: sectiongroup = self[sectiongroupname] for sectionname in sectiongroup: section = sectiongroup[sectionname] if '<' in section: self._expand(sectiongroupname, sectionname, section, seen) return self
AttributeError
dataset/ETHPy150Open ployground/ploy/ploy/config.py/Config.parse
def testIterators_13_Large(self): n_iter = 300 n_node = 300 tml = random_node_list(122, n_node, 0.75) random.seed(0) p = makeTDInstance() p.update(dict((t, 1) for t in tml)) # set up a list of iterators def newIter(): recursive = random.random() < 0.7 brv = random.random() if 0 <= brv <= 0.33: branch_mode = "all" elif 0.33 < brv <= 0.66: branch_mode = "only" else: branch_mode = "none" irv = random.random() if 0 <= brv <= 0.33: return p.iterkeys(recursive, branch_mode) elif 0.33 < brv <= 0.66: return p.itervalues(recursive, branch_mode) else: return p.iteritems(recursive, branch_mode) iter_list = [newIter() for i in range(n_iter)] while len(iter_list) > 0: del_queue = [] for i, it in enumerate(iter_list): try: it.__next__() except __HOLE__: self.assert_(p._iteratorRefCount() <= len(iter_list)) del_queue.append(i) for i in sorted(del_queue, reverse=True): del iter_list[i] # Now make sure that everything is cool self.assert_(p._iteratorRefCount() == 0, p._iteratorRefCount()) for tm in tml: p.pop(tm)
StopIteration
dataset/ETHPy150Open hoytak/treedict/tests/test_iterators_lists.py/TestIteratorsLists.testIterators_13_Large
def lookup(twitter, user_ids): """Resolve an entire list of user ids to screen names.""" users = {} api_limit = 100 for i in range(0, len(user_ids), api_limit): fail = Fail() while True: try: portion = lookup_portion(twitter, user_ids[i:][:api_limit]) except TwitterError as e: if e.e.code == 429: err("Fail: %i API rate limit exceeded" % e.e.code) rls = twitter.application.rate_limit_status() reset = rls.rate_limit_reset reset = time.asctime(time.localtime(reset)) delay = int(rls.rate_limit_reset - time.time()) + 5 # avoid race err("Interval limit of %i requests reached, next reset on " "%s: going to sleep for %i secs" % (rls.rate_limit_limit, reset, delay)) fail.wait(delay) continue elif e.e.code == 502: err("Fail: %i Service currently unavailable, retrying..." % e.e.code) else: err("Fail: %s\nRetrying..." % str(e)[:500]) fail.wait(3) except urllib2.URLError as e: err("Fail: urllib2.URLError %s - Retrying..." % str(e)) fail.wait(3) except httplib.error as e: err("Fail: httplib.error %s - Retrying..." % str(e)) fail.wait(3) except __HOLE__ as e: err("Fail: KeyError %s - Retrying..." % str(e)) fail.wait(3) else: users.update(portion) err("Resolving user ids to screen names: %i/%i" % (len(users), len(user_ids))) break return users
KeyError
dataset/ETHPy150Open sixohsix/twitter/twitter/follow.py/lookup
def follow(twitter, screen_name, followers=True): """Get the entire list of followers/following for a user.""" user_ids = [] cursor = -1 fail = Fail() while True: try: portion, cursor = follow_portion(twitter, screen_name, cursor, followers) except TwitterError as e: if e.e.code == 401: reason = ("follow%s of that user are protected" % ("ers" if followers else "ing")) err("Fail: %i Unauthorized (%s)" % (e.e.code, reason)) break elif e.e.code == 429: err("Fail: %i API rate limit exceeded" % e.e.code) rls = twitter.application.rate_limit_status() reset = rls.rate_limit_reset reset = time.asctime(time.localtime(reset)) delay = int(rls.rate_limit_reset - time.time()) + 5 # avoid race err("Interval limit of %i requests reached, next reset on %s: " "going to sleep for %i secs" % (rls.rate_limit_limit, reset, delay)) fail.wait(delay) continue elif e.e.code == 502: err("Fail: %i Service currently unavailable, retrying..." % e.e.code) else: err("Fail: %s\nRetrying..." % str(e)[:500]) fail.wait(3) except urllib2.URLError as e: err("Fail: urllib2.URLError %s - Retrying..." % str(e)) fail.wait(3) except httplib.error as e: err("Fail: httplib.error %s - Retrying..." % str(e)) fail.wait(3) except __HOLE__ as e: err("Fail: KeyError %s - Retrying..." % str(e)) fail.wait(3) else: new = -len(user_ids) user_ids = list(set(user_ids + portion)) new += len(user_ids) what = "follow%s" % ("ers" if followers else "ing") err("Browsing %s %s, new: %i" % (screen_name, what, new)) if cursor == 0: break fail = Fail() return user_ids
KeyError
dataset/ETHPy150Open sixohsix/twitter/twitter/follow.py/follow
def main(args=sys.argv[1:]): options = { 'oauth': False, 'followers': True, 'api-rate': False, 'show_id': False } try: parse_args(args, options) except GetoptError as e: err("I can't do that, %s." % e) raise SystemExit(1) # exit if no user or given, except if asking for API rate if not options['extra_args'] and not options['api-rate']: print(__doc__) raise SystemExit(1) # authenticate using OAuth, asking for token if necessary if options['oauth']: oauth_filename = (os.getenv("HOME", "") + os.sep + ".twitter-follow_oauth") if not os.path.exists(oauth_filename): oauth_dance("Twitter-Follow", CONSUMER_KEY, CONSUMER_SECRET, oauth_filename) oauth_token, oauth_token_secret = read_token_file(oauth_filename) auth = OAuth(oauth_token, oauth_token_secret, CONSUMER_KEY, CONSUMER_SECRET) else: auth = NoAuth() twitter = Twitter(auth=auth, api_version='1.1', domain='api.twitter.com') if options['api-rate']: rate_limit_status(twitter) return # obtain list of followers (or following) for every given user for user in options['extra_args']: user_ids, users = [], {} try: user_ids = follow(twitter, user, options['followers']) users = lookup(twitter, user_ids) except __HOLE__ as e: err() err("Interrupted.") raise SystemExit(1) for uid in user_ids: if options['show_id']: try: print(str(uid) + "\t" + users[uid].encode("utf-8")) except KeyError: pass else: try: print(users[uid].encode("utf-8")) except KeyError: pass # print total on stderr to separate from user list on stdout if options['followers']: err("Total followers for %s: %i" % (user, len(user_ids))) else: err("Total users %s is following: %i" % (user, len(user_ids)))
KeyboardInterrupt
dataset/ETHPy150Open sixohsix/twitter/twitter/follow.py/main
def targets(tgt, tgt_type='range', **kwargs): ''' Return the targets from a range query ''' r = seco.range.Range(__opts__['range_server']) log.debug('Range connection to \'{0}\' established'.format(__opts__['range_server'])) hosts = [] try: log.debug('Querying range for \'{0}\''.format(tgt)) hosts = r.expand(tgt) except seco.range.RangeException as err: log.error('Range server exception: %s', err) return {} log.debug('Range responded with: \'{0}\''.format(hosts)) # Currently we only support giving a raw range entry, no target filtering supported other than what range returns :S tgt_func = { 'range': target_range, 'glob': target_range, # 'glob': target_glob, } log.debug('Filtering using tgt_type: \'{0}\''.format(tgt_type)) try: targeted_hosts = tgt_func[tgt_type](tgt, hosts) except __HOLE__: raise NotImplementedError log.debug('Targeting data for salt-ssh: \'{0}\''.format(targeted_hosts)) return targeted_hosts
KeyError
dataset/ETHPy150Open saltstack/salt/salt/roster/range.py/targets
def _find_closest_centroids(self, x): try: ceil_key = self.C.ceiling_key(x) except KeyError: floor_key = self.C.floor_key(x) return [self.C[floor_key]] try: floor_key = self.C.floor_key(x) except __HOLE__: ceil_key = self.C.ceiling_key(x) return [self.C[ceil_key]] if abs(floor_key - x) < abs(ceil_key - x): return [self.C[floor_key]] elif abs(floor_key - x) == abs(ceil_key - x) and (ceil_key != floor_key): return [self.C[ceil_key], self.C[floor_key]] else: return [self.C[ceil_key]]
KeyError
dataset/ETHPy150Open CamDavidsonPilon/tdigest/tdigest/tdigest.py/TDigest._find_closest_centroids
def __arrayPlugGetItem( self, key ) : if getattr( self, "enableInputGeneratorCompatibility", False ) : try : return Gaffer.ArrayPlug.__originalGetItem( self, key ) except __HOLE__ : if key == self.getName() : # Some nodes (I'm looking at you UnionFilter) used to # name their first child without a numeric suffix. return Gaffer.ArrayPlug.__originalGetItem( self, 0 ) else : # Simulate access to the child of the first plug in an # old InputGenerator. return Gaffer.ArrayPlug.__originalGetItem( self, 0 )[key] return Gaffer.ArrayPlug.__originalGetItem( self, key )
KeyError
dataset/ETHPy150Open ImageEngine/gaffer/startup/Gaffer/inputGeneratorCompatibility.py/__arrayPlugGetItem
def replace_with_repr(plan): r = repr(plan) try: return eval(r) except (TypeError, __HOLE__, SyntaxError): print 'Error with repr {r} of plan {p}'.format(r=r, p=plan) raise
AttributeError
dataset/ETHPy150Open uwescience/raco/raco/replace_with_repr.py/replace_with_repr
def __call__(self, name, args): """ Send message to each listener. @param name method name @param args arguments for message instance @return None """ results = [] try: messageType = self.messageTypes[name] listeners = self.listeners[maybeName(messageType[0])] except (__HOLE__, ): return results message = messageType[0](**args) for listener in listeners: try: results.append(listener(message)) except (Exception, ): errmsg = ("Exception in message dispatch. " "Handler '%s' for '%s'") self.logger.exception(errmsg, maybeName(listener), name) results.append(None) return results
KeyError
dataset/ETHPy150Open CarterBain/Medici/ib/opt/dispatcher.py/Dispatcher.__call__
def unregister(self, listener, *types): """ Disassociate listener with message types created by this Dispatcher. @param listener callable to no longer receive messages @param *types zero or more message types to disassociate with listener @return True if disassociated with one or more handler; otherwise False """ count = 0 for messagetype in types: try: listeners = self.listeners[maybeName(messagetype)] except (__HOLE__, ): pass else: if listener in listeners: listeners.remove(listener) count += 1 return count > 0
KeyError
dataset/ETHPy150Open CarterBain/Medici/ib/opt/dispatcher.py/Dispatcher.unregister
def pag_story_detail(request, year, month, day, slug, p_per_page=settings.PAGINATION['P_PER_PAGE'], orphans=settings.PAGINATION['ORPHANS'], p_object_name="story_content", template_object_name="story", template_name="stories/pag_story.html", extra_context={}): """ A detail view for stories that can paginates the story by paragraph. If a story is not found a 404 or a custom template can be rendered by setting `THROW_404` to `False`. By default, template `stories/pag_story.html` is used to render the story which expects the story to be paginated by paragraphs. If the paragraph paginator is not used, the template `stories/story_detail.html` is used to render the story. There is two main variables passed to the template `p_object_name`, which is the stories body field, and `template_object_name` which is the story instance it self. Argument List: * **year** - Four digets, `2012`, `1997`, `2004` * **month** - `jul` `jan` `aug` * **day** - Two digits, `01` `23`, `31` * **slug** - slugified string, `this-is-a-slug` * **p_per_page** - pagination setting, paragraphs per page * **orphans** - pagination setting, number of orphans * **p_object_name** - the story body variable name * **template_object_name** - the story variable name * **template_name** - the name of the template * **extra_context** - dictionary containing any extra context """ import datetime import time try: pub_date = datetime.date(*time.strptime(year + month + day, '%Y%b%d')[:3]) except ValueError: raise Http404 qs = Story.published.get if request.user.is_staff: qs = Story.objects.get try: story = qs(publish_date=pub_date, slug=slug) except Story.DoesNotExist: if not settings.THROW_404: return render_to_response('stories/story_removed.html', {}, context_instance=RequestContext(request)) else: raise Http404 if settings.PAGINATION['PAGINATE']: paginator = ParagraphPaginator(story.body, p_per_page, orphans=orphans) # Make sure page request is an int. If not, deliver first page. try: page = int(request.GET.get('page', '1')) except __HOLE__: page = 1 # If page request (9999) is out of range, deliver last page of results. try: story_content = paginator.page(page) except (EmptyPage, InvalidPage): story_content = paginator.page(paginator.num_pages) else: story_content = story.body # If the default template hasn't been changed, use the non-pagination # template if template_name == "stories/pag_story.html": template_name = "stories/story_detail.html" context = { p_object_name: story_content, template_object_name: story } if extra_context: context.update(extra_context) return render_to_response(template_name, context, context_instance=RequestContext(request))
ValueError
dataset/ETHPy150Open callowayproject/django-stories/stories/views.py/pag_story_detail
def unregister(self, path, watch_type=None, watcher=None, handler=None): """Removes an existing watch or handler. This unregisters an object's watch and handler callback functions. It doesn't actually prevent the watch or handler from triggering but it does remove all references fromo the object and prevent the functions from being called. This allows garbage collection of the object. Args: path: The znode being watched. watch_type: Type of watcher to unregister - must be in (core.WATCH_DATA, core.WATCH_CHILDREN) watcher: The watcher function that should be removed. handler: The handler function that should be removed. Returns: Nothing. """ if watch_type is core.WATCH_CHILDREN: watches = self._children_watches handlers = self._children_handlers else: watches = self._watches handlers = self._handlers if watcher: try: while True: watches.get(path, []).remove(watcher) except ValueError: pass if handler: try: while True: handlers.get(path, []).remove(handler) except __HOLE__: pass
ValueError
dataset/ETHPy150Open liquidgecka/twitcher/twitcher/zkwrapper.py/ZKWrapper.unregister
def align(args): """ %prog align database.fasta read1.fq read2.fq Wrapper for `gsnap` single-end or paired-end, depending on the number of args. """ from jcvi.formats.fastq import guessoffset p = OptionParser(align.__doc__) p.add_option("--rnaseq", default=False, action="store_true", help="Input is RNA-seq reads, turn splicing on") p.add_option("--native", default=False, action="store_true", help="Convert GSNAP output to NATIVE format") p.set_home("eddyyeh") p.set_outdir() p.set_cpus() opts, args = p.parse_args(args) if len(args) == 2: logging.debug("Single-end alignment") elif len(args) == 3: logging.debug("Paired-end alignment") else: sys.exit(not p.print_help()) dbfile, readfile = args[:2] outdir = opts.outdir assert op.exists(dbfile) and op.exists(readfile) prefix = get_prefix(readfile, dbfile) logfile = op.join(outdir, prefix + ".log") gsnapfile = op.join(outdir, prefix + ".gsnap") nativefile = gsnapfile.rsplit(".", 1)[0] + ".unique.native" if not need_update((dbfile, readfile), gsnapfile): logging.error("`{0}` exists. `gsnap` already run.".format(gsnapfile)) else: dbdir, dbname = check_index(dbfile) cmd = "gsnap -D {0} -d {1}".format(dbdir, dbname) cmd += " -B 5 -m 0.1 -i 2 -n 3" # memory, mismatch, indel penalty, nhits if opts.rnaseq: cmd += " -N 1" cmd += " -t {0}".format(opts.cpus) cmd += " --gmap-mode none --nofails" if readfile.endswith(".gz"): cmd += " --gunzip" try: offset = "sanger" if guessoffset([readfile]) == 33 else "illumina" cmd += " --quality-protocol {0}".format(offset) except __HOLE__: pass cmd += " " + " ".join(args[1:]) sh(cmd, outfile=gsnapfile, errfile=logfile) if opts.native: EYHOME = opts.eddyyeh_home if need_update(gsnapfile, nativefile): cmd = op.join(EYHOME, "convert2native.pl") cmd += " --gsnap {0} -o {1}".format(gsnapfile, nativefile) cmd += " -proc {0}".format(opts.cpus) sh(cmd) return gsnapfile, logfile
AssertionError
dataset/ETHPy150Open tanghaibao/jcvi/apps/gmap.py/align
def ingest ( self ): """Read the stack and ingest""" # for all specified resolutions for resolution in reversed(self.proj.datasetcfg.resolutions): print "Building DB for resolution ", resolution, " imagesize ", self.proj.datasetcfg.imagesz[resolution] zstart = self.proj.datasetcfg.slicerange[0] zend = self.proj.datasetcfg.slicerange[1] # slices per ingest group zslices = self.proj.datasetcfg.cubedim[resolution][2] # extract parameters for iteration numxtiles = self.proj.datasetcfg.imagesz[resolution][0]/self.tilesz numytiles = self.proj.datasetcfg.imagesz[resolution][1]/self.tilesz numzslabs = (zend-zstart+1)/zslices + 1 # Ingest in database aligned slabs in the z dimension for zslab in range(numzslabs): # over all tiles in that slice for ytile in range(numytiles): for xtile in range(numxtiles): # RBTODO need to generalize to other project types cuboid = np.zeros ( [zslices,self.tilesz,self.tilesz], dtype=np.uint8 ) # over each slice for zslice in range(zslices): #if we are at the end of the space, quit if zslab*zslices+zstart+zslice > zend: break filename = '{}/{}/{}/{}/{}.jpg'.format(self.prefix,resolution,zslab*zslices+zslice+zstart,ytile,xtile) print filename try: # add tile to stack tileimage = Image.open ( filename, 'r' ) cuboid [zslice,:,:] = np.asarray ( tileimage ) except __HOLE__, e: print "Failed to open file %s" % (e) raise continue # above line added Kunal # here we have continuous cuboid, let's upload it to the database corner = [ xtile*self.tilesz, ytile*self.tilesz, zslab*zslices ] self.db.writeImageCuboid ( corner, resolution, cuboid) self.db.commit()
IOError
dataset/ETHPy150Open neurodata/ndstore/ingest/catmaid/catmaid.py/CatmaidIngest.ingest
@register.tag def nickname(_parser, token): """Almost the same as nickname filter but the result is cached.""" try: _, email_address, never_me = token.split_contents() except __HOLE__: try: _, email_address = token.split_contents() never_me = '' except ValueError: raise django.template.TemplateSyntaxError( "%r requires exactly one or two arguments" % token.contents.split()[0]) return NicknameNode(email_address, never_me)
ValueError
dataset/ETHPy150Open rietveld-codereview/rietveld/codereview/library.py/nickname
@classmethod def match(cls, item): try: return item[cls.field] is None except __HOLE__: return True
KeyError
dataset/ETHPy150Open beetbox/beets/beets/dbcore/query.py/NoneQuery.match
def _convert(self, s): """Convert a string to a numeric type (float or int). Return None if `s` is empty. Raise an InvalidQueryError if the string cannot be converted. """ # This is really just a bit of fun premature optimization. if not s: return None try: return int(s) except __HOLE__: try: return float(s) except ValueError: raise InvalidQueryArgumentTypeError(s, u"an int or a float")
ValueError
dataset/ETHPy150Open beetbox/beets/beets/dbcore/query.py/NumericQuery._convert
@classmethod def parse(cls, string): """Parse a date and return a `Period` object or `None` if the string is empty. """ if not string: return None ordinal = string.count('-') if ordinal >= len(cls.date_formats): # Too many components. return None date_format = cls.date_formats[ordinal] try: date = datetime.strptime(string, date_format) except __HOLE__: # Parsing failed. return None precision = cls.precisions[ordinal] return cls(date, precision)
ValueError
dataset/ETHPy150Open beetbox/beets/beets/dbcore/query.py/Period.parse
def _convert(self, s): """Convert a M:SS or numeric string to a float. Return None if `s` is empty. Raise an InvalidQueryError if the string cannot be converted. """ if not s: return None try: return util.raw_seconds_short(s) except ValueError: try: return float(s) except __HOLE__: raise InvalidQueryArgumentTypeError( s, u"a M:SS string or a float") # Sorting.
ValueError
dataset/ETHPy150Open beetbox/beets/beets/dbcore/query.py/DurationQuery._convert
def test_crack2(self): w = Grammar('root b\n' 'b{7} c\n' 'c 1 "1"\n' ' 1 "2"\n' ' 1 "3"\n' ' 4 "4"') r = GrammarCracker(w) for _ in range(10): g = w.generate() nw = r.crack(g) ref = {} for c in g: try: ref[c] += 1 except __HOLE__: ref[c] = 1 stats = {} for _ in range(100): for c in nw.generate(): try: stats[c] += 1 except KeyError: stats[c] = 1 for c, v in stats.items(): self.assertAlmostEqual(v / 100, ref[c], 0)
KeyError
dataset/ETHPy150Open blackberry/ALF/alf/fuzz/grammr2_test.py/GrammarTests.test_crack2
def __getattr__(self, key): try: return getattr(self.comparator, key) except __HOLE__: raise AttributeError( 'Neither %r object nor %r object associated with %s ' 'has an attribute %r' % ( type(self).__name__, type(self.comparator).__name__, self, key) )
AttributeError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py/QueryableAttribute.__getattr__
def create_proxied_attribute(descriptor): """Create an QueryableAttribute / user descriptor hybrid. Returns a new QueryableAttribute type that delegates descriptor behavior and getattr() to the given descriptor. """ # TODO: can move this to descriptor_props if the need for this # function is removed from ext/hybrid.py class Proxy(QueryableAttribute): """Presents the :class:`.QueryableAttribute` interface as a proxy on top of a Python descriptor / :class:`.PropComparator` combination. """ def __init__(self, class_, key, descriptor, comparator, adapt_to_entity=None, doc=None, original_property=None): self.class_ = class_ self.key = key self.descriptor = descriptor self.original_property = original_property self._comparator = comparator self._adapt_to_entity = adapt_to_entity self.__doc__ = doc @property def property(self): return self.comparator.property @util.memoized_property def comparator(self): if util.callable(self._comparator): self._comparator = self._comparator() if self._adapt_to_entity: self._comparator = self._comparator.adapt_to_entity( self._adapt_to_entity) return self._comparator def adapt_to_entity(self, adapt_to_entity): return self.__class__(adapt_to_entity.entity, self.key, self.descriptor, self._comparator, adapt_to_entity) def __get__(self, instance, owner): if instance is None: return self else: return self.descriptor.__get__(instance, owner) def __str__(self): return "%s.%s" % (self.class_.__name__, self.key) def __getattr__(self, attribute): """Delegate __getattr__ to the original descriptor and/or comparator.""" try: return getattr(descriptor, attribute) except AttributeError: try: return getattr(self.comparator, attribute) except __HOLE__: raise AttributeError( 'Neither %r object nor %r object associated with %s ' 'has an attribute %r' % ( type(descriptor).__name__, type(self.comparator).__name__, self, attribute) ) Proxy.__name__ = type(descriptor).__name__ + 'Proxy' util.monkeypatch_proxied_specials(Proxy, type(descriptor), name='descriptor', from_instance=descriptor) return Proxy
AttributeError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py/create_proxied_attribute
def get(self, state, dict_, passive=PASSIVE_OFF): """Retrieve a value from the given object. If a callable is assembled on this object's attribute, and passive is False, the callable will be executed and the resulting value will be set as the new value for this attribute. """ if self.key in dict_: return dict_[self.key] else: # if history present, don't load key = self.key if key not in state.committed_state or \ state.committed_state[key] is NEVER_SET: if not passive & CALLABLES_OK: return PASSIVE_NO_RESULT if key in state.callables: callable_ = state.callables[key] value = callable_(state, passive) elif self.callable_: value = self.callable_(state, passive) else: value = ATTR_EMPTY if value is PASSIVE_NO_RESULT or value is NEVER_SET: return value elif value is ATTR_WAS_SET: try: return dict_[key] except __HOLE__: # TODO: no test coverage here. raise KeyError( "Deferred loader for attribute " "%r failed to populate " "correctly" % key) elif value is not ATTR_EMPTY: return self.set_committed_value(state, dict_, value) if not passive & INIT_OK: return NEVER_SET else: # Return a new, empty value return self.initialize(state, dict_)
KeyError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py/AttributeImpl.get
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): try: # TODO: better solution here would be to add # a "popper" role to collections.py to complement # "remover". self.remove(state, dict_, value, initiator, passive=passive) except (__HOLE__, KeyError, IndexError): pass
ValueError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py/CollectionAttributeImpl.pop
def update_parameters(): """Try to download a new version of the parameter file. """ global parameters if parameters is not None: return url = 'https://reprozip-stats.poly.edu/parameters/' env_var = os.environ.get('REPROZIP_PARAMETERS') if env_var not in (None, '', '1', 'on', 'enabled', 'yes', 'true') and ( env_var.startswith('http://') or env_var.startswith('https://')): # This is only used for testing # Note that this still expects the ReproZip CA url = env_var try: from reprounzip.main import __version__ as version filename = download_file( '%s%s' % (url, version), None, cachename='parameters.json', ssl_verify=get_reprozip_ca_certificate().path) except Exception: logging.info("Can't download parameters.json, using bundled " "parameters") else: try: with filename.open() as fp: parameters = json.load(fp) return except ValueError: logging.info("Downloaded parameters.json doesn't load, using " "bundled parameters") try: filename.remove() except __HOLE__: pass parameters = json.loads(bundled_parameters)
OSError
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip/reprounzip/parameters.py/update_parameters
def run(self): '''Run loop''' logger.info("result_worker starting...") while not self._quit: try: task, result = self.inqueue.get(timeout=1) self.on_result(task, result) except Queue.Empty as e: continue except KeyboardInterrupt: break except __HOLE__ as e: logger.error(e) continue except Exception as e: logger.exception(e) continue logger.info("result_worker exiting...")
AssertionError
dataset/ETHPy150Open binux/pyspider/pyspider/result/result_worker.py/ResultWorker.run
def raw_api_call(self, url, parameters={}, http_method="GET", max_timeout=4): """ Make an API Call to GitHub """ # limit to 1 call per 1.15 seconds if time.time() - self._fetched <= 1.15: time.sleep(1.15 - (time.time() - self._fetched)) self._fetched = time.time() sock = httplib2.Http(timeout=max_timeout) request_headers = { 'User-Agent': 'Python-httplib2' } parameters.update({ 'username': self.username, 'token': self.token }) if http_method == 'POST': post_data = urlencode(parameters) elif parameters: url += '?%s' % urlencode(parameters) try: if http_method == 'POST': headers, response = sock.request(url, "POST", post_data, headers=request_headers) else: headers, response = sock.request(url) except socket.timeout: raise ValueError('Socket timed out') status = int(headers.pop('status', 200)) if status != 200: raise ValueError('Returned status: %s' % (status)) try: processed_response = simplejson.loads(response) except __HOLE__, e: raise ValueError('Error in data from GitHub API: %s' % e.message) return processed_response
ValueError
dataset/ETHPy150Open frozenskys/django-github/github/libs/github.py/GithubAPI.raw_api_call
def save(self): # We go back to the object to read (in order to reapply) the # permissions which were set on this group, but which are not # accessible in the wagtail admin interface, as otherwise these would # be clobbered by this form. try: untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions) bool(untouchable_permissions) # force this to be evaluated, as it's about to change except __HOLE__: # this form is not bound; we're probably creating a new group untouchable_permissions = [] group = super(GroupForm, self).save() group.permissions.add(*untouchable_permissions) return group
ValueError
dataset/ETHPy150Open torchbox/wagtail/wagtail/wagtailusers/forms.py/GroupForm.save
def getNewApplication(self, request): # Creates a new application instance try: applicationClass = self.getApplicationClass() application = applicationClass() except __HOLE__: raise ServletException, "getNewApplication failed" return application
TypeError
dataset/ETHPy150Open rwl/muntjac/muntjac/terminal/gwt/server/application_servlet.py/ApplicationServlet.getNewApplication
def save_files_classification(self): """ Save solver, train_val and deploy files to disk """ network = cleanedUpClassificationNetwork(self.network, len(self.get_labels())) data_layers, train_val_layers, deploy_layers = filterLayersByState(network) ### Write train_val file train_val_network = caffe_pb2.NetParameter() # Data layers # TODO clean this up train_data_layer = None val_data_layer = None for layer in data_layers.layer: for rule in layer.include: if rule.phase == caffe_pb2.TRAIN: assert train_data_layer is None, 'cannot specify two train data layers' train_data_layer = layer elif rule.phase == caffe_pb2.TEST: assert val_data_layer is None, 'cannot specify two test data layers' val_data_layer = layer if train_data_layer is None: assert val_data_layer is None, 'cannot specify a test data layer without a train data layer' dataset_backend = self.dataset.train_db_task().backend has_val_set = self.dataset.val_db_task() is not None if train_data_layer is not None: if dataset_backend == 'lmdb': assert train_data_layer.type == 'Data', 'expecting a Data layer' elif dataset_backend == 'hdf5': assert train_data_layer.type == 'HDF5Data', 'expecting an HDF5Data layer' if dataset_backend == 'lmdb' and train_data_layer.HasField('data_param'): assert not train_data_layer.data_param.HasField('source'), "don't set the data_param.source" assert not train_data_layer.data_param.HasField('backend'), "don't set the data_param.backend" if dataset_backend == 'hdf5' and train_data_layer.HasField('hdf5_data_param'): assert not train_data_layer.hdf5_data_param.HasField('source'), "don't set the hdf5_data_param.source" max_crop_size = min(self.dataset.image_dims[0], self.dataset.image_dims[1]) if self.crop_size: assert dataset_backend != 'hdf5', 'HDF5Data layer does not support cropping' assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size' train_data_layer.transform_param.crop_size = self.crop_size elif train_data_layer.transform_param.HasField('crop_size'): cs = train_data_layer.transform_param.crop_size if cs > max_crop_size: # don't throw an error here cs = max_crop_size train_data_layer.transform_param.crop_size = cs self.crop_size = cs train_val_network.layer.add().CopyFrom(train_data_layer) train_data_layer = train_val_network.layer[-1] if val_data_layer is not None and has_val_set: if dataset_backend == 'lmdb': assert val_data_layer.type == 'Data', 'expecting a Data layer' elif dataset_backend == 'hdf5': assert val_data_layer.type == 'HDF5Data', 'expecting an HDF5Data layer' if dataset_backend == 'lmdb' and val_data_layer.HasField('data_param'): assert not val_data_layer.data_param.HasField('source'), "don't set the data_param.source" assert not val_data_layer.data_param.HasField('backend'), "don't set the data_param.backend" if dataset_backend == 'hdf5' and val_data_layer.HasField('hdf5_data_param'): assert not val_data_layer.hdf5_data_param.HasField('source'), "don't set the hdf5_data_param.source" if self.crop_size: # use our error checking from the train layer val_data_layer.transform_param.crop_size = self.crop_size train_val_network.layer.add().CopyFrom(val_data_layer) val_data_layer = train_val_network.layer[-1] else: layer_type = 'Data' if dataset_backend == 'hdf5': layer_type = 'HDF5Data' train_data_layer = train_val_network.layer.add(type = layer_type, name = 'data') train_data_layer.top.append('data') train_data_layer.top.append('label') train_data_layer.include.add(phase = caffe_pb2.TRAIN) if dataset_backend == 'lmdb': train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE elif dataset_backend == 'hdf5': train_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE if self.crop_size: assert dataset_backend != 'hdf5', 'HDF5Data layer does not support cropping' train_data_layer.transform_param.crop_size = self.crop_size if has_val_set: val_data_layer = train_val_network.layer.add(type = layer_type, name = 'data') val_data_layer.top.append('data') val_data_layer.top.append('label') val_data_layer.include.add(phase = caffe_pb2.TEST) if dataset_backend == 'lmdb': val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE elif dataset_backend == 'hdf5': val_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE if self.crop_size: val_data_layer.transform_param.crop_size = self.crop_size if dataset_backend == 'lmdb': train_data_layer.data_param.source = self.dataset.path(self.dataset.train_db_task().db_name) train_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB if val_data_layer is not None and has_val_set: val_data_layer.data_param.source = self.dataset.path(self.dataset.val_db_task().db_name) val_data_layer.data_param.backend = caffe_pb2.DataParameter.LMDB elif dataset_backend == 'hdf5': train_data_layer.hdf5_data_param.source = self.dataset.path(self.dataset.train_db_task().textfile) if val_data_layer is not None and has_val_set: val_data_layer.hdf5_data_param.source = self.dataset.path(self.dataset.val_db_task().textfile) if self.use_mean == 'pixel': assert dataset_backend != 'hdf5', 'HDF5Data layer does not support mean subtraction' mean_pixel = self.get_mean_pixel(self.dataset.path(self.dataset.train_db_task().mean_file)) self.set_mean_value(train_data_layer, mean_pixel) if val_data_layer is not None and has_val_set: self.set_mean_value(val_data_layer, mean_pixel) elif self.use_mean == 'image': self.set_mean_file(train_data_layer, self.dataset.path(self.dataset.train_db_task().mean_file)) if val_data_layer is not None and has_val_set: self.set_mean_file(val_data_layer, self.dataset.path(self.dataset.train_db_task().mean_file)) if self.batch_size: if dataset_backend == 'lmdb': train_data_layer.data_param.batch_size = self.batch_size if val_data_layer is not None and has_val_set: val_data_layer.data_param.batch_size = self.batch_size elif dataset_backend == 'hdf5': train_data_layer.hdf5_data_param.batch_size = self.batch_size if val_data_layer is not None and has_val_set: val_data_layer.hdf5_data_param.batch_size = self.batch_size else: if dataset_backend == 'lmdb': if not train_data_layer.data_param.HasField('batch_size'): train_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE if val_data_layer is not None and has_val_set and not val_data_layer.data_param.HasField('batch_size'): val_data_layer.data_param.batch_size = constants.DEFAULT_BATCH_SIZE elif dataset_backend == 'hdf5': if not train_data_layer.hdf5_data_param.HasField('batch_size'): train_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE if val_data_layer is not None and has_val_set and not val_data_layer.hdf5_data_param.HasField('batch_size'): val_data_layer.hdf5_data_param.batch_size = constants.DEFAULT_BATCH_SIZE # Non-data layers train_val_network.MergeFrom(train_val_layers) # Write to file with open(self.path(self.train_val_file), 'w') as outfile: text_format.PrintMessage(train_val_network, outfile) # network sanity checks self.logger.debug("Network sanity check - train") CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TRAIN) if has_val_set: self.logger.debug("Network sanity check - val") CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TEST) ### Write deploy file deploy_network = caffe_pb2.NetParameter() # Input deploy_network.input.append('data') shape = deploy_network.input_shape.add() shape.dim.append(1) shape.dim.append(self.dataset.image_dims[2]) if self.crop_size: shape.dim.append(self.crop_size) shape.dim.append(self.crop_size) else: shape.dim.append(self.dataset.image_dims[0]) shape.dim.append(self.dataset.image_dims[1]) # Layers deploy_network.MergeFrom(deploy_layers) # Write to file with open(self.path(self.deploy_file), 'w') as outfile: text_format.PrintMessage(deploy_network, outfile) # network sanity checks self.logger.debug("Network sanity check - deploy") CaffeTrainTask.net_sanity_check(deploy_network, caffe_pb2.TEST) found_softmax = False for layer in deploy_network.layer: if layer.type == 'Softmax': found_softmax = True break assert found_softmax, 'Your deploy network is missing a Softmax layer! Read the documentation for custom networks and/or look at the standard networks for examples.' ### Write solver file solver = caffe_pb2.SolverParameter() # get enum value for solver type solver.solver_type = getattr(solver, self.solver_type) solver.net = self.train_val_file # Set CPU/GPU mode if config_value('caffe_root')['cuda_enabled'] and \ bool(config_value('gpu_list')): solver.solver_mode = caffe_pb2.SolverParameter.GPU else: solver.solver_mode = caffe_pb2.SolverParameter.CPU solver.snapshot_prefix = self.snapshot_prefix # Epochs -> Iterations train_iter = int(math.ceil(float(self.dataset.train_db_task().entries_count) / train_data_layer.data_param.batch_size)) solver.max_iter = train_iter * self.train_epochs snapshot_interval = self.snapshot_interval * train_iter if 0 < snapshot_interval <= 1: solver.snapshot = 1 # don't round down elif 1 < snapshot_interval < solver.max_iter: solver.snapshot = int(snapshot_interval) else: solver.snapshot = 0 # only take one snapshot at the end if has_val_set and self.val_interval: solver.test_iter.append(int(math.ceil(float(self.dataset.val_db_task().entries_count) / val_data_layer.data_param.batch_size))) val_interval = self.val_interval * train_iter if 0 < val_interval <= 1: solver.test_interval = 1 # don't round down elif 1 < val_interval < solver.max_iter: solver.test_interval = int(val_interval) else: solver.test_interval = solver.max_iter # only test once at the end # Learning rate solver.base_lr = self.learning_rate solver.lr_policy = self.lr_policy['policy'] scale = float(solver.max_iter)/100.0 if solver.lr_policy == 'fixed': pass elif solver.lr_policy == 'step': # stepsize = stepsize * scale solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale)) solver.gamma = self.lr_policy['gamma'] elif solver.lr_policy == 'multistep': for value in self.lr_policy['stepvalue'].split(','): # stepvalue = stepvalue * scale solver.stepvalue.append(int(math.ceil(float(value) * scale))) solver.gamma = self.lr_policy['gamma'] elif solver.lr_policy == 'exp': # gamma = gamma^(1/scale) solver.gamma = math.pow(self.lr_policy['gamma'], 1.0/scale) elif solver.lr_policy == 'inv': # gamma = gamma / scale solver.gamma = self.lr_policy['gamma'] / scale solver.power = self.lr_policy['power'] elif solver.lr_policy == 'poly': solver.power = self.lr_policy['power'] elif solver.lr_policy == 'sigmoid': # gamma = -gamma / scale solver.gamma = -1.0 * self.lr_policy['gamma'] / scale # stepsize = stepsize * scale solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale)) else: raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy) # These solver types don't support momentum unsupported = [solver.ADAGRAD] try: unsupported.append(solver.RMSPROP) except __HOLE__: pass # go with the suggested defaults if solver.solver_type not in unsupported: solver.momentum = 0.9 solver.weight_decay = 0.0005 # Display 8x per epoch, or once per 5000 images, whichever is more frequent solver.display = max(1, min( int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))), int(math.ceil(5000.0 / train_data_layer.data_param.batch_size)) )) if self.random_seed is not None: solver.random_seed = self.random_seed with open(self.path(self.solver_file), 'w') as outfile: text_format.PrintMessage(solver, outfile) self.solver = solver # save for later return True
AttributeError
dataset/ETHPy150Open NVIDIA/DIGITS/digits/model/tasks/caffe_train.py/CaffeTrainTask.save_files_classification
def save_files_generic(self): """ Save solver, train_val and deploy files to disk """ train_image_db = None train_labels_db = None val_image_db = None val_labels_db = None for task in self.dataset.tasks: if task.purpose == 'Training Images': train_image_db = task if task.purpose == 'Training Labels': train_labels_db = task if task.purpose == 'Validation Images': val_image_db = task if task.purpose == 'Validation Labels': val_labels_db = task assert train_image_db is not None, 'Training images are required' ### Split up train_val and deploy layers network = cleanedUpGenericNetwork(self.network) data_layers, train_val_layers, deploy_layers = filterLayersByState(network) ### Write train_val file train_val_network = caffe_pb2.NetParameter() # Data layers # TODO clean this up train_image_data_layer = None train_label_data_layer = None val_image_data_layer = None val_label_data_layer = None for layer in data_layers.layer: for rule in layer.include: if rule.phase == caffe_pb2.TRAIN: if 'data' in layer.top: assert train_image_data_layer is None, 'cannot specify two train image data layers' train_image_data_layer = layer elif 'label' in layer.top: assert train_label_data_layer is None, 'cannot specify two train label data layers' train_label_data_layer = layer elif rule.phase == caffe_pb2.TEST: if 'data' in layer.top: assert val_image_data_layer is None, 'cannot specify two val image data layers' val_image_data_layer = layer elif 'label' in layer.top: assert val_label_data_layer is None, 'cannot specify two val label data layers' val_label_data_layer = layer train_image_data_layer = self.make_generic_data_layer(train_image_db, train_image_data_layer, 'data', 'data', caffe_pb2.TRAIN) if train_image_data_layer is not None: train_val_network.layer.add().CopyFrom(train_image_data_layer) train_label_data_layer = self.make_generic_data_layer(train_labels_db, train_label_data_layer, 'label', 'label', caffe_pb2.TRAIN) if train_label_data_layer is not None: train_val_network.layer.add().CopyFrom(train_label_data_layer) val_image_data_layer = self.make_generic_data_layer(val_image_db, val_image_data_layer, 'data', 'data', caffe_pb2.TEST) if val_image_data_layer is not None: train_val_network.layer.add().CopyFrom(val_image_data_layer) val_label_data_layer = self.make_generic_data_layer(val_labels_db, val_label_data_layer, 'label', 'label', caffe_pb2.TEST) if val_label_data_layer is not None: train_val_network.layer.add().CopyFrom(val_label_data_layer) # Non-data layers train_val_network.MergeFrom(train_val_layers) # Write to file with open(self.path(self.train_val_file), 'w') as outfile: text_format.PrintMessage(train_val_network, outfile) # network sanity checks self.logger.debug("Network sanity check - train") CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TRAIN) self.logger.debug("Network sanity check - val") CaffeTrainTask.net_sanity_check(train_val_network, caffe_pb2.TEST) ### Write deploy file deploy_network = caffe_pb2.NetParameter() # Input deploy_network.input.append('data') shape = deploy_network.input_shape.add() shape.dim.append(1) shape.dim.append(train_image_db.image_channels) if train_image_data_layer.transform_param.HasField('crop_size'): shape.dim.append( train_image_data_layer.transform_param.crop_size) shape.dim.append( train_image_data_layer.transform_param.crop_size) else: shape.dim.append(train_image_db.image_height) shape.dim.append(train_image_db.image_width) # Layers deploy_network.MergeFrom(deploy_layers) # Write to file with open(self.path(self.deploy_file), 'w') as outfile: text_format.PrintMessage(deploy_network, outfile) # network sanity checks self.logger.debug("Network sanity check - deploy") CaffeTrainTask.net_sanity_check(deploy_network, caffe_pb2.TEST) ### Write solver file solver = caffe_pb2.SolverParameter() # get enum value for solver type solver.solver_type = getattr(solver, self.solver_type) solver.net = self.train_val_file # Set CPU/GPU mode if config_value('caffe_root')['cuda_enabled'] and \ bool(config_value('gpu_list')): solver.solver_mode = caffe_pb2.SolverParameter.GPU else: solver.solver_mode = caffe_pb2.SolverParameter.CPU solver.snapshot_prefix = self.snapshot_prefix # Epochs -> Iterations train_iter = int(math.ceil(float(train_image_db.image_count) / train_image_data_layer.data_param.batch_size)) solver.max_iter = train_iter * self.train_epochs snapshot_interval = self.snapshot_interval * train_iter if 0 < snapshot_interval <= 1: solver.snapshot = 1 # don't round down elif 1 < snapshot_interval < solver.max_iter: solver.snapshot = int(snapshot_interval) else: solver.snapshot = 0 # only take one snapshot at the end if val_image_data_layer: solver.test_iter.append(int(math.ceil(float(val_image_db.image_count) / val_image_data_layer.data_param.batch_size))) val_interval = self.val_interval * train_iter if 0 < val_interval <= 1: solver.test_interval = 1 # don't round down elif 1 < val_interval < solver.max_iter: solver.test_interval = int(val_interval) else: solver.test_interval = solver.max_iter # only test once at the end # Learning rate solver.base_lr = self.learning_rate solver.lr_policy = self.lr_policy['policy'] scale = float(solver.max_iter)/100.0 if solver.lr_policy == 'fixed': pass elif solver.lr_policy == 'step': # stepsize = stepsize * scale solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale)) solver.gamma = self.lr_policy['gamma'] elif solver.lr_policy == 'multistep': for value in self.lr_policy['stepvalue'].split(','): # stepvalue = stepvalue * scale solver.stepvalue.append(int(math.ceil(float(value) * scale))) solver.gamma = self.lr_policy['gamma'] elif solver.lr_policy == 'exp': # gamma = gamma^(1/scale) solver.gamma = math.pow(self.lr_policy['gamma'], 1.0/scale) elif solver.lr_policy == 'inv': # gamma = gamma / scale solver.gamma = self.lr_policy['gamma'] / scale solver.power = self.lr_policy['power'] elif solver.lr_policy == 'poly': solver.power = self.lr_policy['power'] elif solver.lr_policy == 'sigmoid': # gamma = -gamma / scale solver.gamma = -1.0 * self.lr_policy['gamma'] / scale # stepsize = stepsize * scale solver.stepsize = int(math.ceil(float(self.lr_policy['stepsize']) * scale)) else: raise Exception('Unknown lr_policy: "%s"' % solver.lr_policy) # These solver types don't support momentum unsupported = [solver.ADAGRAD] try: unsupported.append(solver.RMSPROP) except __HOLE__: pass # go with the suggested defaults if solver.solver_type not in unsupported: solver.momentum = 0.9 solver.weight_decay = 0.0005 # Display 8x per epoch, or once per 5000 images, whichever is more frequent solver.display = max(1, min( int(math.floor(float(solver.max_iter) / (self.train_epochs * 8))), int(math.ceil(5000.0 / train_image_data_layer.data_param.batch_size)) )) if self.random_seed is not None: solver.random_seed = self.random_seed with open(self.path(self.solver_file), 'w') as outfile: text_format.PrintMessage(solver, outfile) self.solver = solver # save for later return True
AttributeError
dataset/ETHPy150Open NVIDIA/DIGITS/digits/model/tasks/caffe_train.py/CaffeTrainTask.save_files_generic
def get_net(self, epoch=None, gpu=-1): """ Returns an instance of caffe.Net Keyword Arguments: epoch -- which snapshot to load (default is -1 to load the most recently generated snapshot) """ if not self.has_model(): return False file_to_load = None if not epoch: epoch = self.snapshots[-1][1] file_to_load = self.snapshots[-1][0] else: for snapshot_file, snapshot_epoch in self.snapshots: if snapshot_epoch == epoch: file_to_load = snapshot_file break if file_to_load is None: raise Exception('snapshot not found for epoch "%s"' % epoch) # check if already loaded if self.loaded_snapshot_file and self.loaded_snapshot_file == file_to_load \ and hasattr(self, '_caffe_net') and self._caffe_net is not None: return self._caffe_net CaffeTrainTask.set_mode(gpu) # Add job_dir to PATH to pick up any python layers used by the model sys.path.append(self.job_dir) # Attempt to force a reload of the "digits_python_layers" module loaded_module = sys.modules.get('digits_python_layers', None) if loaded_module: try: reload(loaded_module) except __HOLE__: # Let Caffe throw the error if the file is missing pass # Load the model self._caffe_net = caffe.Net( self.path(self.deploy_file), file_to_load, caffe.TEST) # Remove job_dir from PATH sys.path.remove(self.job_dir) self.loaded_snapshot_epoch = epoch self.loaded_snapshot_file = file_to_load return self._caffe_net
ImportError
dataset/ETHPy150Open NVIDIA/DIGITS/digits/model/tasks/caffe_train.py/CaffeTrainTask.get_net
def process_request(self, request): # Determine which theme the user has configured and store in local # thread storage so that it persists to the custom template loader try: _local.theme = request.COOKIES[get_theme_cookie_name()] except __HOLE__: _local.theme = get_default_theme()
KeyError
dataset/ETHPy150Open openstack/horizon/horizon/themes.py/ThemeMiddleware.process_request
def process_response(self, request, response): try: delattr(_local, 'theme') except __HOLE__: pass return response
AttributeError
dataset/ETHPy150Open openstack/horizon/horizon/themes.py/ThemeMiddleware.process_response
def get_template_sources(self, template_name): # If the cookie doesn't exist, set it to the default theme default_theme = get_default_theme() theme = getattr(_local, 'theme', default_theme) this_theme = find_theme(theme) # If the theme is not valid, check the default theme ... if not this_theme: this_theme = find_theme(get_default_theme()) # If the theme is still not valid, then move along ... # these aren't the templates you are looking for if not this_theme: pass try: if not template_name.startswith('/'): try: yield safe_join( 'openstack_dashboard', this_theme[2], 'templates', template_name ) except SuspiciousFileOperation: yield os.path.join( this_theme[2], 'templates', template_name ) except __HOLE__: # The template dir name wasn't valid UTF-8. raise except ValueError: # The joined path was located outside of template_dir. pass
UnicodeDecodeError
dataset/ETHPy150Open openstack/horizon/horizon/themes.py/ThemeTemplateLoader.get_template_sources
def load_template_source(self, template_name, template_dirs=None): for path in self.get_template_sources(template_name): try: with io.open(path, encoding=settings.FILE_CHARSET) as file: return file.read(), path except __HOLE__: pass raise TemplateDoesNotExist(template_name)
IOError
dataset/ETHPy150Open openstack/horizon/horizon/themes.py/ThemeTemplateLoader.load_template_source
def _load(self, typ, version): """Return class for *typ* and *version*.""" classes = self._get_type_dict() try: lst = classes[typ] dist = lst[0] groups = lst[1] klass = dist.load_entry_point(groups[0], typ) if version is not None and dist.version != version: return None return klass except __HOLE__: if self._search_path is None: return None # try to look in the whole environment for group in self._groups: for proj in self.env: for dist in self.env[proj]: if version is not None and version != dist.version: continue ep = dist.get_entry_info(group, typ) if ep is not None: dist.activate() klass = ep.load(require=True, env=self.env) self._have_new_types = True return klass if version is None: # newest version didn't have entry point, so skip to next project break return None
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/pkg_res_factory.py/PkgResourcesFactory._load
def request(method, url, data=None, headers={}, timeout=None): host_port = url.split('/')[2] timeout_set = False try: connection = httplib.HTTPConnection(host_port, timeout = timeout) timeout_set = True except __HOLE__: connection = httplib.HTTPConnection(host_port) with closing(connection): if not timeout_set: connection.connect() connection.sock.settimeout(timeout) timeout_set = True connection.request(method, url, data, headers) response = connection.getresponse() return (response.status, response.read())
TypeError
dataset/ETHPy150Open kmike/yandex-maps/yandex_maps/http.py/request
def test_TestFunctionality(self): bucket = PropBucket() try: bucket.prop.value = bucket.prop.value + 0 except __HOLE__: pass else: assert False, "PropBucket is not working"
AssertionError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_evalorder.py/EvaluationOrder.test_TestFunctionality
def _GetDecrypter(self): """Retrieves a decrypter. Returns: A decrypter object (instance of encryptions.Decrypter). Raises: IOError: if the decrypter cannot be initialized. """ try: credentials = resolver.Resolver.key_chain.GetCredentials(self._path_spec) return encryption_manager.EncryptionManager.GetDecrypter( self._encryption_method, **credentials) except __HOLE__ as exception: raise IOError(exception)
ValueError
dataset/ETHPy150Open log2timeline/dfvfs/dfvfs/file_io/encrypted_stream_io.py/EncryptedStream._GetDecrypter
def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the ISY994 platform.""" # pylint: disable=protected-access logger = logging.getLogger(__name__) devs = [] # Verify connection if ISY is None or not ISY.connected: logger.error('A connection has not been made to the ISY controller.') return False # Import weather if ISY.climate is not None: for prop in ISY.climate._id2name: if prop is not None: prefix = HIDDEN_STRING \ if prop in DEFAULT_HIDDEN_WEATHER else '' node = WeatherPseudoNode('ISY.weather.' + prop, prefix + prop, getattr(ISY.climate, prop), getattr(ISY.climate, prop + '_units')) devs.append(ISYSensorDevice(node)) # Import sensor nodes for (path, node) in ISY.nodes: if SENSOR_STRING in node.name: if HIDDEN_STRING in path: node.name += HIDDEN_STRING devs.append(ISYSensorDevice(node, [STATE_ON, STATE_OFF])) # Import sensor programs for (folder_name, states) in ( ('HA.locations', [STATE_HOME, STATE_NOT_HOME]), ('HA.sensors', [STATE_OPEN, STATE_CLOSED]), ('HA.states', [STATE_ON, STATE_OFF])): try: folder = ISY.programs['My Programs'][folder_name] except __HOLE__: # folder does not exist pass else: for _, _, node_id in folder.children: node = folder[node_id].leaf devs.append(ISYSensorDevice(node, states)) add_devices(devs)
KeyError
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/sensor/isy994.py/setup_platform
@Text('teardown') def teardown (self, reactor, service, command): try: descriptions,command = self.parser.extract_neighbors(command) _,code = command.split(' ',1) for key in reactor.peers: for description in descriptions: if reactor.match_neighbor(description,key): reactor.peers[key].teardown(int(code)) self.log_message('teardown scheduled for %s' % ' '.join(description)) reactor.answer(service,'done') return True except __HOLE__: reactor.answer(service,'error') return False except IndexError: reactor.answer(service,'error') return False
ValueError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/teardown
@Text('announce watchdog') def announce_watchdog (self, reactor, service, command): def callback (name): # XXX: move into Action for neighbor in reactor.configuration.neighbors: reactor.configuration.neighbors[neighbor].rib.outgoing.announce_watchdog(name) yield False reactor.route_update = True reactor.answer(service,'done') try: name = command.split(' ')[2] except __HOLE__: name = service reactor.plan(callback(name),'announce_watchdog') return True
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_watchdog
@Text('withdraw watchdog') def withdraw_watchdog (self, reactor, service, command): def callback (name): # XXX: move into Action for neighbor in reactor.configuration.neighbors: reactor.configuration.neighbors[neighbor].rib.outgoing.withdraw_watchdog(name) yield False reactor.route_update = True reactor.answer(service,'done') try: name = command.split(' ')[2] except __HOLE__: name = service reactor.plan(callback(name),'withdraw_watchdog') return True
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/withdraw_watchdog
@Text('flush route') def flush_route (self, reactor, service, command): def callback (self, peers): self.log_message("Flushing routes for %s" % ', '.join(peers if peers else []) if peers is not None else 'all peers') yield True reactor.route_update = True reactor.answer(service,'done') try: descriptions,command = self.parser.extract_neighbors(command) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') return False reactor.plan(callback(self,peers),'flush_route') return True except __HOLE__: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False except IndexError: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False
ValueError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/flush_route
@Text('announce route') def announce_route (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_route(command,peers) if not changes: self.log_failure('command could not parse route in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.ANNOUNCE reactor.configuration.inject_change(peers,change) self.log_message('route added to %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except ValueError: self.log_failure('issue parsing the route') reactor.answer(service,'error') yield True except __HOLE__: self.log_failure('issue parsing the route') reactor.answer(service,'error') yield True reactor.plan(callback(),'announce_route') return True
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_route
@Text('withdraw route') def withdraw_route (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_route(command,peers) if not changes: self.log_failure('command could not parse route in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.WITHDRAW if reactor.configuration.inject_change(peers,change): self.log_message('route removed from %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False else: self.log_failure('route not found on %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except __HOLE__: self.log_failure('issue parsing the route') yield True except IndexError: self.log_failure('issue parsing the route') yield True reactor.plan(callback(),'withdraw_route') return True
ValueError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/withdraw_route
@Text('announce vpls') def announce_vpls (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_vpls(command,peers) if not changes: self.log_failure('command could not parse vpls in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.ANNOUNCE reactor.configuration.inject_change(peers,change) self.log_message('vpls added to %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except __HOLE__: self.log_failure('issue parsing the vpls') reactor.answer(service,'error') yield True except IndexError: self.log_failure('issue parsing the vpls') reactor.answer(service,'error') yield True reactor.plan(callback(),'announce_vpls') return True
ValueError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_vpls
@Text('withdraw vpls') def withdraw_vpls (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_vpls(command,peers) if not changes: self.log_failure('command could not parse vpls in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.WITHDRAW if reactor.configuration.inject_change(peers,change): self.log_message('vpls removed from %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False else: self.log_failure('vpls not found on %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except ValueError: self.log_failure('issue parsing the vpls') reactor.answer(service,'error') yield True except __HOLE__: self.log_failure('issue parsing the vpls') reactor.answer(service,'error') yield True reactor.plan(callback(),'withdraw_vpls') return True
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/withdraw_vpls
@Text('announce attributes') def announce_attributes (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_attributes(command,peers) if not changes: self.log_failure('command could not parse route in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.ANNOUNCE reactor.configuration.inject_change(peers,change) self.log_message('route added to %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except __HOLE__: self.log_failure('issue parsing the route') reactor.answer(service,'error') yield True except IndexError: self.log_failure('issue parsing the route') reactor.answer(service,'error') yield True reactor.plan(callback(),'announce_attributes') return True
ValueError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_attributes
@Text('withdraw attributes') def withdraw_attribute (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_attributes(command,peers) if not changes: self.log_failure('command could not parse route in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.WITHDRAW if reactor.configuration.inject_change(peers,change): self.log_message('route removed from %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False else: self.log_failure('route not found on %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except ValueError: self.log_failure('issue parsing the route') reactor.answer(service,'error') yield True except __HOLE__: self.log_failure('issue parsing the route') reactor.answer(service,'error') yield True reactor.plan(callback(),'withdraw_route') return True
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/withdraw_attribute
@Text('announce flow') def announce_flow (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_flow(command,peers) if not changes: self.log_failure('command could not parse flow in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.ANNOUNCE reactor.configuration.inject_change(peers,change) self.log_message('flow added to %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except ValueError: self.log_failure('issue parsing the flow') reactor.answer(service,'error') yield True except __HOLE__: self.log_failure('issue parsing the flow') reactor.answer(service,'error') yield True reactor.plan(callback(),'announce_flow') return True
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_flow
@Text('withdraw flow') def withdraw_flow (self, reactor, service, line): def callback (): try: descriptions,command = self.parser.extract_neighbors(line) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') yield True return changes = self.parser.api_flow(command,peers) if not changes: self.log_failure('command could not parse flow in : %s' % command,'warning') reactor.answer(service,'error') yield True return for (peers,change) in changes: change.nlri.action = OUT.WITHDRAW if reactor.configuration.inject_change(peers,change): self.log_message('flow removed from %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) else: self.log_failure('flow not found on %s : %s' % (', '.join(peers) if peers else 'all peers',change.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') except ValueError: self.log_failure('issue parsing the flow') reactor.answer(service,'error') yield True except __HOLE__: self.log_failure('issue parsing the flow') reactor.answer(service,'error') yield True reactor.plan(callback(),'withdraw_flow') return True
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/withdraw_flow
@Text('announce eor') def announce_eor (self, reactor, service, command): def callback (self, command, peers): family = self.parser.api_eor(command) if not family: self.log_failure("Command could not parse eor : %s" % command) reactor.answer(service,'error') yield True return reactor.configuration.inject_eor(peers,family) self.log_message("Sent to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',family.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') try: descriptions,command = self.parser.extract_neighbors(command) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') return False reactor.plan(callback(self,command,peers),'announce_eor') return True except ValueError: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False except __HOLE__: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_eor
@Text('announce route-refresh') def announce_refresh (self, reactor, service, command): def callback (self, command, peers): refresh = self.parser.api_refresh(command) if not refresh: self.log_failure("Command could not parse flow in : %s" % command) reactor.answer(service,'error') yield True return reactor.configuration.inject_refresh(peers,refresh) self.log_message("Sent to %s : %s" % (', '.join(peers if peers else []) if peers is not None else 'all peers',refresh.extensive())) yield False reactor.route_update = True reactor.answer(service,'done') try: descriptions,command = self.parser.extract_neighbors(command) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') return False reactor.plan(callback(self,command,peers),'announce_refresh') return True except ValueError: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False except __HOLE__: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_refresh
@Text('announce operational') def announce_operational (self, reactor, service, command): def callback (self, command, peers): operational = self.parser.api_operational(command) if not operational: self.log_failure("Command could not parse operational command : %s" % command) reactor.answer(service,'error') yield True return reactor.configuration.inject_operational(peers,operational) self.log_message("operational message sent to %s : %s" % ( ', '.join(peers if peers else []) if peers is not None else 'all peers',operational.extensive() ) ) yield False reactor.route_update = True reactor.answer(service,'done') if (command.split() + ['be','safe'])[2].lower() not in ('asm','adm','rpcq','rpcp','apcq','apcp','lpcq','lpcp'): return False try: descriptions,command = self.parser.extract_neighbors(command) peers = reactor.match_neighbors(descriptions) if not peers: self.log_failure('no neighbor matching the command : %s' % command,'warning') reactor.answer(service,'error') return False reactor.plan(callback(self,command,peers),'announce_operational') return True except ValueError: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False except __HOLE__: self.log_failure('issue parsing the command') reactor.answer(service,'error') return False
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/api/command/text.py/announce_operational
@classmethod def get_current_request(cls): try: return cls._threadlocal.request except __HOLE__: return None
AttributeError
dataset/ETHPy150Open msiedlarek/wiring/example/guestbook/application.py/Application.get_current_request
def updateWebmapService(self, webmapId, oldUrl, newUrl, folderID=None): try: params = urllib.urlencode({'token' : self.user.token, 'f' : 'json'}) print 'Getting Info for: ' + webmapId #Get the item data reqUrl = self.user.portalUrl + '/sharing/content/items/' + webmapId + '/data?' + params itemDataReq = urllib.urlopen(reqUrl).read() itemString = str(itemDataReq) #See if it needs to be updated if itemString.find(oldUrl) > -1: #Update the map newString = itemString.replace(oldUrl, newUrl) #Get the item's info for the addItem parameters itemInfoReq = urllib.urlopen(self.user.portalUrl + '/sharing/content/items/' + webmapId + '?' + params) itemInfo = json.loads(itemInfoReq.read(), object_hook=self.__decode_dict__) print 'Updating ' + itemInfo['title'] #Set up the addItem parameters outParamObj = { 'extent' : ', '.join([str(itemInfo['extent'][0][0]), str(itemInfo['extent'][0][1]), str(itemInfo['extent'][1][0]), str(itemInfo['extent'][1][1])]), 'type' : itemInfo['type'], 'item' : itemInfo['item'], 'title' : itemInfo['title'], 'overwrite' : 'true', 'tags' : ','.join(itemInfo['tags']), 'text' : newString } # Get the item folder. if itemInfo['ownerFolder']: folderID = itemInfo['ownerFolder'] else: folderID = '' #Post back the changes overwriting the old map modRequest = urllib.urlopen(self.user.portalUrl + '/sharing/content/users/' + self.user.username + '/' + folderID + '/addItem?' + params , urllib.urlencode(outParamObj)) #Evaluate the results to make sure it happened modResponse = json.loads(modRequest.read()) if modResponse.has_key('error'): raise AGOPostError(webmapId, modResponse['error']['message']) else: print "Successfully updated the urls" else: print 'Didn\'t find any services for ' + oldUrl except __HOLE__ as e: print 'Error - no web maps specified' except AGOPostError as e: print 'Error updating web map ' + e.webmap + ": " + e.msg
ValueError
dataset/ETHPy150Open Esri/ago-tools/agoTools/utilities.py/Utilities.updateWebmapService
def updateItemUrl(self, itemId, oldUrl, newUrl, folderID=None): ''' Use this to update the URL for items such as Map Services. The oldUrl parameter is required as a check to ensure you are not accidentally changing the wrong item or url. This can also replace part of a URL. The text of oldUrl is replaced with the text of newUrl. For example you could change just the host name of your URLs. ''' try: params = urllib.urlencode({'token' : self.user.token, 'f' : 'json'}) print 'Getting Info for: ' + itemId # Get the item data reqUrl = self.user.portalUrl + '/sharing/rest/content/items/' + itemId + '?' + params itemReq = urllib.urlopen(reqUrl).read() itemString = str(itemReq) itemInfo = json.loads(itemString) if not itemInfo.has_key('url'): print itemInfo['title'] + ' doesn\'t have a url property' return print 'Updating ' + itemInfo['title'] existingURL = itemInfo['url'] # Double check that the existing URL matches the provided URL if itemString.find(oldUrl) > -1: # Get the item folder. if itemInfo['ownerFolder']: folderID = itemInfo['ownerFolder'] else: folderID = '' # Update the item URL updatedURL = existingURL.replace(oldUrl, newUrl) updateParams = urllib.urlencode({'url' : updatedURL}) updateUrl = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '/' + folderID + '/items/' + itemId + '/update?' + params updateReq = urllib.urlopen(updateUrl, updateParams).read() modResponse = json.loads(updateReq) if modResponse.has_key('success'): print "Successfully updated the url." else: raise AGOPostError(itemId, modResponse['error']['message']) else: print 'Didn\'t find the specified old URL: ' + oldUrl except __HOLE__ as e: print e except AGOPostError as e: print 'Error updating item: ' + e.msg
ValueError
dataset/ETHPy150Open Esri/ago-tools/agoTools/utilities.py/Utilities.updateItemUrl
def updatewebmapversionAGX(self, webmapId, folderID=None): '''Update the web map version from 1.9x to 1.7x so that the new web maps can be opened in ArcGIS Explorer Online.''' try: params = urllib.urlencode({'token' : self.user.token, 'f' : 'json'}) print 'Getting Info for: ' + webmapId #Get the item data reqUrl = self.user.portalUrl + '/sharing/content/items/' + webmapId + '/data?' + params itemDataReq = urllib.urlopen(reqUrl).read() itemString = str(itemDataReq) itemString = itemString.replace('1.9', '1.7') itemInfoReq = urllib.urlopen(self.user.portalUrl + '/sharing/content/items/' + webmapId + '?' + params) itemInfo = json.loads(itemInfoReq.read(), object_hook=self.__decode_dict__) print 'Updating ' + itemInfo['title'] #Set up the addItem parameters outParamObj = { 'extent' : ', '.join([str(itemInfo['extent'][0][0]), str(itemInfo['extent'][0][1]), str(itemInfo['extent'][1][0]), str(itemInfo['extent'][1][1])]), 'type' : itemInfo['type'], 'item' : itemInfo['item'], 'title' : itemInfo['title'], 'overwrite' : 'true', 'tags' : ','.join(itemInfo['tags']), 'text' : itemString } # Get the item folder. if itemInfo['ownerFolder']: folderID = itemInfo['ownerFolder'] else: folderID = '' #Post back the changes overwriting the old map modRequest = urllib.urlopen(self.user.portalUrl + '/sharing/content/users/' + self.user.username + '/' + folderID + '/addItem?' + params , urllib.urlencode(outParamObj)) #Evaluate the results to make sure it happened modResponse = json.loads(modRequest.read()) if modResponse.has_key('error'): raise AGOPostError(webmapId, modResponse['error']['message']) else: print "Successfully updated the version" except __HOLE__ as e: print 'Error - no web maps specified' except AGOPostError as e: print 'Error updating web map ' + e.webmap + ": " + e.msg
ValueError
dataset/ETHPy150Open Esri/ago-tools/agoTools/utilities.py/Utilities.updatewebmapversionAGX
def __getattr__(self, key): if is_instrumented(self, key): return get_attribute(self, key) else: try: return self._goofy_dict[key] except __HOLE__: raise AttributeError(key)
KeyError
dataset/ETHPy150Open zzzeek/sqlalchemy/examples/custom_attributes/custom_management.py/MyClass.__getattr__
def extract_component(self, name, action, default_entry_point_creator=None): """Return the class + component info to use for doing the action w/the component.""" try: # Use a copy instead of the original since we will be # modifying this dictionary which may not be wanted for future # usages of this dictionary (so keep the original clean)... component_info = copy.deepcopy(self._components[name]) except KeyError: component_info = {} action_classes = component_info.pop('action_classes', {}) if default_entry_point_creator is not None: default_action_classes = default_entry_point_creator(name, copy.deepcopy(component_info)) if default_action_classes: for (an_action, entry_point) in six.iteritems(default_action_classes): if an_action not in action_classes: action_classes[an_action] = entry_point try: entry_point = action_classes.pop(action) except __HOLE__: raise RuntimeError('No entrypoint configured/generated for' ' %r %r for distribution %r' % (action, name, self.name)) else: return Component(entry_point, component_info, action_classes)
KeyError
dataset/ETHPy150Open openstack/anvil/anvil/distro.py/Distro.extract_component
def run(): """ We reorganized our categories: https://bugzilla.mozilla.org/show_bug.cgi?id=854499 Usage:: python -B manage.py runscript migrations.575-reorganize-cats """ all_cats = Category.objects.filter(type=amo.ADDON_WEBAPP) # (1) "Entertainment & Sports" becomes "Entertainment" and "Sports." try: entertainment = all_cats.filter(slug='entertainment-sports')[0] except IndexError: print 'Could not find Category with slug="entertainment-sports"' else: # (a) Change name of the category to "Entertainment." entertainment.name = 'Entertainment' entertainment.slug = 'entertainment' entertainment.save() print 'Renamed "Entertainment & Sports" to "Entertainment"' # (b) Create a new category called "Sports." Category.objects.create(type=amo.ADDON_WEBAPP, slug='sports', name='Sports') print 'Created "Sports"' # -- # (2) "Music & Audio" becomes "Music". try: music = all_cats.filter(slug='music')[0] except __HOLE__: print 'Could not find Category with slug="music"' else: music.name = 'Music' music.save() print 'Renamed "Music & Audio" to "Music"' # -- # (3) "Social & Communication" becomes "Social". try: social = all_cats.filter(slug='social')[0] except IndexError: print 'Could not find Category with slug="social"' else: social.name = 'Social' social.save() print 'Renamed "Social & Communication" to "Social"' # -- # (4) "Books & Reference" becomes "Books" and "Reference." try: books = all_cats.filter(slug='books-reference')[0] except IndexError: print 'Could not find Category with slug="books-reference"' else: # (a) Change name of the category to "Books."" books.name = 'Books' books.slug = 'books' books.save() print 'Renamed "Books & Reference" to "Books"' # (b) Create a new category called "Reference." Category.objects.create(type=amo.ADDON_WEBAPP, slug='reference', name='Reference') print 'Created "Reference"' # -- # (5) "Photos & Media" becomes "Photo & Video." try: photos = all_cats.filter(slug='photos-media')[0] except IndexError: print 'Could not find Category with slug="photos-media"' else: photos.name = 'Photo & Video' photos.slug = 'photo-video' photos.save() print 'Renamed "Photos & Media" to "Photo & Video"' # -- # (6) Add "Maps & Navigation." Category.objects.create(type=amo.ADDON_WEBAPP, slug='maps-navigation', name='Maps & Navigation') print 'Created "Maps & Navigation"'
IndexError
dataset/ETHPy150Open mozilla/addons-server/src/olympia/migrations/575-reorganize-cats.py/run
def on_message(self, data): try: data = json_decode(data) except __HOLE__: self._die(log_message='Unable to decode json') if type(data).__name__ != 'dict' or 'method' not in data: self._die(log_message='data is not a dict or no key "method" in data dict found. Data: %s' % data) # define available methods methods = { 'register_myself_as_map_participant': self._register_myself_as_map_participant, 'update_component_pos': self._update_component_pos, 'update_component_title': self._update_component_title, 'add_components_offset_except_one': self._add_components_offset_except_one, 'add_component': self._add_component, 'delete_component': self._delete_component, } # call method try: method = methods.get(data['method'], None) method(data) except TypeError: self._die('Unknown method "%s" called' % data['method'])
ValueError
dataset/ETHPy150Open ierror/BeautifulMind.io/beautifulmind/mindmaptornado/handlers.py/MindmapWebSocketHandler.on_message
def on_close(self): for map_pk in self._maps_participants.keys(): if self in self._maps_participants.get(map_pk, []): self._lock.acquire() # remove client from map try: self._maps_participants[map_pk].remove(self) except __HOLE__: pass # remove map if no participants remains on map try: if not len(self._maps_participants[map_pk]): del self._maps_participants[map_pk] except KeyError: pass self._lock.release() self._braodcast_map_participants_count(map_pk)
KeyError
dataset/ETHPy150Open ierror/BeautifulMind.io/beautifulmind/mindmaptornado/handlers.py/MindmapWebSocketHandler.on_close
def _parse(self, is_source, lang_rules): """ Parses Qt file and exports all entries as GenericTranslations. """ def clj(s, w): return s[:w].replace("\n", " ").ljust(w) if lang_rules: nplural = len(lang_rules) else: nplural = self.language.get_pluralrules_numbers() try: doc = xml.dom.minidom.parseString( self.content.encode(self.format_encoding) ) except Exception, e: logger.warning("QT parsing: %s" % e.message, exc_info=True) raise LinguistParseError(_( "Your file doesn't seem to contain valid xml: %s!" % e.message )) if hasattr(doc, 'doctype') and hasattr(doc.doctype, 'name'): if doc.doctype.name != "TS": raise LinguistParseError(_("Incorrect doctype!")) else: raise LinguistParseError(_("Uploaded file has no Doctype!")) root = doc.documentElement if root.tagName != "TS": raise LinguistParseError(_("Root element is not 'TS'")) # This needed to be commented out due the 'is_source' parameter. # When is_source=True we return the value of the <source> node as the # translation for the given file, instead of the <translation> node(s). #stringset.target_language = language #language = get_attribute(root, "language", die = STRICT) i = 1 # There can be many <message> elements, they might have # 'encoding' or 'numerus' = 'yes' | 'no' attributes # if 'numerus' = 'yes' then 'translation' element contains 'numerusform' elements for context in root.getElementsByTagName("context"): context_name_element = _getElementByTagName(context, "name") if context_name_element.firstChild: if context_name_element.firstChild.nodeValue: context_name = escape_context( [context_name_element.firstChild.nodeValue]) else: context_name = [] else: context_name = [] for message in context.getElementsByTagName("message"): occurrences = [] # NB! There can be zero to many <location> elements, but all # of them must have 'filename' and 'line' attributes for location in message.getElementsByTagName("location"): if location.attributes.has_key("filename") and \ location.attributes.has_key("line"): occurrences.append("%s:%i" % ( location.attributes["filename"].value, int(location.attributes["line"].value))) elif STRICT: raise LinguistParseError(_("Malformed 'location' element")) pluralized = False if message.attributes.has_key("numerus") and \ message.attributes['numerus'].value=='yes': pluralized = True source = _getElementByTagName(message, "source") try: translation = _getElementByTagName(message, "translation") except LinguistParseError: translation = None try: ec_node = _getElementByTagName(message, "extracomment") extracomment = _getText(ec_node.childNodes) except LinguistParseError, e: extracomment = None # <commend> in ts files are also used to distinguish entries, # so we append it to the context to make the entry unique try: c_node = _getElementByTagName(message, "comment") comment_text = _getText(c_node.childNodes) if comment_text: comment = escape_context([comment_text]) else: comment = [] except LinguistParseError, e: comment = [] status = None if source.firstChild: sourceString = _getText(source.childNodes) else: sourceString = None # WTF? # Check whether the message is using logical id if message.attributes.has_key("id"): sourceStringText = sourceString sourceString = message.attributes['id'].value else: sourceStringText = None same_nplural = True obsolete, fuzzy = False, False messages = [] if is_source: if translation and translation.attributes.has_key("variants") and \ translation.attributes['variants'].value == 'yes': logger.error("Source file has unsupported" " variants.") raise LinguistParseError(_("Qt Linguist variants are" " not yet supported.")) # Skip obsolete strings. if translation and translation.attributes.has_key("type"): status = translation.attributes["type"].value.lower() if status == "obsolete": continue translation_text = None if translation: translation_text = _getText(translation.childNodes) messages = [(5, translation_text or sourceStringText or sourceString)] # remove unfinished/obsolete attrs from template if translation and translation.attributes.has_key("type"): status = translation.attributes["type"].value.lower() if status == "unfinished": del translation.attributes["type"] if pluralized: if translation: try: numerusforms = translation.getElementsByTagName('numerusform') messages = [] for n,f in enumerate(numerusforms): if numerusforms[n].attributes.has_key("variants") and \ numerusforms[n].attributes['variants'].value == 'yes': logger.error("Source file has unsupported" " variants.") raise LinguistParseError(_("Source file" " could not be imported: Qt Linguist" " variants are not supported.")) for n,f in enumerate(numerusforms): if numerusforms[n].attributes.has_key("variants") and \ numerusforms[n].attributes['variants'].value == 'yes': continue for n,f in enumerate(numerusforms): nf=numerusforms[n] messages.append((nplural[n], _getText(nf.childNodes) or sourceStringText or sourceString )) except LinguistParseError, e: pass else: plural_numbers = self.language.get_pluralrules_numbers() for p in plural_numbers: if p != 5: messages.append((p, sourceStringText or sourceString)) elif translation and translation.firstChild: # For messages with variants set to 'yes', we skip them # altogether. We can't support variants at the momment... if translation.attributes.has_key("variants") and \ translation.attributes['variants'].value == 'yes': continue # Skip obsolete strings. if translation.attributes.has_key("type"): status = translation.attributes["type"].value.lower() if status == "obsolete": continue if translation.attributes.has_key("type"): status = translation.attributes["type"].value.lower() if status == "unfinished" and\ not pluralized: suggestion = GenericTranslation(sourceString, _getText(translation.childNodes), context=context_name + comment, occurrences= ";".join(occurrences)) self.suggestions.strings.append(suggestion) else: logger.error("Element 'translation' attribute "\ "'type' is neither 'unfinished' nor 'obsolete'") continue if not pluralized: messages = [(5, _getText(translation.childNodes))] else: numerusforms = translation.getElementsByTagName('numerusform') try: for n,f in enumerate(numerusforms): if numerusforms[n].attributes.has_key("variants") and \ numerusforms[n].attributes['variants'].value == 'yes': raise StopIteration except __HOLE__: continue if nplural: nplural_file = len(numerusforms) if len(nplural) != nplural_file: logger.error("Passed plural rules has nplurals=%s" ", but '%s' file has nplurals=%s. String '%s'" "skipped." % (nplural, self.filename, nplural_file, sourceString)) same_nplural = False else: same_nplural = False if not same_nplural: # If we're missing plurals, skip them altogether continue for n,f in enumerate(numerusforms): nf=numerusforms[n] if nf.firstChild: messages.append((nplural[n], _getText(nf.childNodes))) # NB! If <translation> doesn't have type attribute, it means that string is finished if sourceString and messages: for msg in messages: self._add_translation_string( sourceString, msg[1], context = context_name + comment, rule=msg[0], occurrences = ";".join(occurrences), pluralized=pluralized, fuzzy=fuzzy, comment=extracomment, obsolete=obsolete) i += 1 if is_source: if sourceString is None: continue if message.attributes.has_key("numerus") and \ message.attributes['numerus'].value=='yes' and translation: numerusforms = translation.getElementsByTagName('numerusform') for n,f in enumerate(numerusforms): f.appendChild(doc.createTextNode( "%(hash)s_pl_%(key)s" % { 'hash': hash_tag(sourceString, context_name + comment), 'key': n } )) else: if not translation: translation = doc.createElement("translation") # Delete all child nodes. This is usefull for xml like # strings (eg html) where the translation text is split # in multiple nodes. translation.childNodes = [] translation.appendChild(doc.createTextNode( ("%(hash)s_tr" % {'hash': hash_tag( sourceString, context_name + comment)}) )) return doc
StopIteration
dataset/ETHPy150Open rvanlaar/easy-transifex/src/transifex/transifex/resources/formats/qt.py/LinguistHandler._parse
def test_max_recursion_error(self): """ Overriding a method on a super class and then calling that method on the super class should not trigger infinite recursion. See #17011. """ try: super(ClassDecoratedTestCase, self).test_max_recursion_error() except __HOLE__, e: self.fail()
RuntimeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/tests/regressiontests/settings_tests/tests.py/ClassDecoratedTestCase.test_max_recursion_error
def main(tests=None, testdir=None, verbose=0, quiet=False, exclude=False, single=False, randomize=False, fromfile=None, findleaks=False, use_resources=None, trace=False, coverdir='coverage', runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, expected=False, memo=None, junit_xml=None): """Execute a test suite. This also parses command-line options and modifies its behavior accordingly. tests -- a list of strings containing test names (optional) testdir -- the directory in which to look for tests (optional) Users other than the Python test suite will certainly want to specify testdir; if it's omitted, the directory containing the Python test suite is searched for. If the tests argument is omitted, the tests listed on the command-line will be used. If that's empty, too, then all *.py files beginning with test_ will be used. The other default arguments (verbose, quiet, exclude, single, randomize, findleaks, use_resources, trace, coverdir, and print_slow) allow programmers calling main() directly to set the values that would normally be set by flags on the command line. """ test_support.record_original_stdout(sys.stdout) try: opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:wM:em:j:', ['help', 'verbose', 'quiet', 'exclude', 'single', 'slow', 'random', 'fromfile', 'findleaks', 'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir', 'runleaks', 'huntrleaks=', 'verbose2', 'memlimit=', 'expected', 'memo' ]) except getopt.error, msg: usage(2, msg) # Defaults allran = True if use_resources is None: use_resources = [] for o, a in opts: if o in ('-h', '--help'): usage(0) elif o in ('-v', '--verbose'): verbose += 1 elif o in ('-w', '--verbose2'): verbose2 = True elif o in ('-q', '--quiet'): quiet = True; verbose = 0 elif o in ('-x', '--exclude'): exclude = True allran = False elif o in ('-e', '--expected'): expected = True allran = False elif o in ('-s', '--single'): single = True elif o in ('-S', '--slow'): print_slow = True elif o in ('-r', '--randomize'): randomize = True elif o in ('-f', '--fromfile'): fromfile = a elif o in ('-l', '--findleaks'): findleaks = True elif o in ('-L', '--runleaks'): runleaks = True elif o in ('-m', '--memo'): memo = a elif o in ('-j', '--junit-xml'): junit_xml = a elif o in ('-t', '--threshold'): import gc gc.set_threshold(int(a)) elif o in ('-T', '--coverage'): trace = True elif o in ('-D', '--coverdir'): coverdir = os.path.join(os.getcwd(), a) elif o in ('-N', '--nocoverdir'): coverdir = None elif o in ('-R', '--huntrleaks'): huntrleaks = a.split(':') if len(huntrleaks) != 3: print a, huntrleaks usage(2, '-R takes three colon-separated arguments') if len(huntrleaks[0]) == 0: huntrleaks[0] = 5 else: huntrleaks[0] = int(huntrleaks[0]) if len(huntrleaks[1]) == 0: huntrleaks[1] = 4 else: huntrleaks[1] = int(huntrleaks[1]) if len(huntrleaks[2]) == 0: huntrleaks[2] = "reflog.txt" elif o in ('-M', '--memlimit'): test_support.set_memlimit(a) elif o in ('-u', '--use'): u = [x.lower() for x in a.split(',')] for r in u: if r == 'all': use_resources[:] = RESOURCE_NAMES continue remove = False if r[0] == '-': remove = True r = r[1:] if r not in RESOURCE_NAMES: usage(1, 'Invalid -u/--use option: ' + a) if remove: if r in use_resources: use_resources.remove(r) elif r not in use_resources: use_resources.append(r) else: print >>sys.stderr, ("No handler for option {0}. Please " "report this as a bug at http://bugs.python.org.").format(o) sys.exit(1) if single and fromfile: usage(2, "-s and -f don't go together!") good = [] bad = [] skipped = [] resource_denieds = [] if findleaks: try: if test_support.is_jython: raise ImportError() import gc except ImportError: print 'No GC available, disabling findleaks.' findleaks = False else: # Uncomment the line below to report garbage that is not # freeable by reference counting alone. By default only # garbage that is not collectable by the GC is reported. #gc.set_debug(gc.DEBUG_SAVEALL) found_garbage = [] if single: from tempfile import gettempdir filename = os.path.join(gettempdir(), 'pynexttest') try: fp = open(filename, 'r') next = fp.read().strip() tests = [next] fp.close() except IOError: pass if fromfile: tests = [] fp = open(fromfile) for line in fp: guts = line.split() # assuming no test has whitespace in its name if guts and not guts[0].startswith('#'): tests.extend(guts) fp.close() # Strip .py extensions. if args: args = map(removepy, args) allran = False if tests: tests = map(removepy, tests) stdtests = STDTESTS[:] nottests = NOTTESTS.copy() if exclude: for arg in args: if arg in stdtests: stdtests.remove(arg) nottests[:0] = args args = [] tests = tests or args or findtests(testdir, stdtests, nottests) if single: tests = tests[:1] if randomize: random.shuffle(tests) if trace: import trace tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], trace=False, count=True) test_times = [] test_support.verbose = verbose # Tell tests to be moderately quiet test_support.use_resources = use_resources test_support.junit_xml_dir = junit_xml save_modules = sys.modules.keys() skips = _ExpectedSkips() failures = _ExpectedFailures() for test in tests: if expected and (test in skips or test in failures): continue if not quiet: print test sys.stdout.flush() if trace: # If we're tracing code coverage, then we don't exit with status # if on a false return value from main. tracer.runctx('runtest(test, verbose, quiet,' ' test_times, testdir)', globals=globals(), locals=vars()) else: try: ok = runtest(test, verbose, quiet, test_times, testdir, huntrleaks, junit_xml) except __HOLE__: # print a newline separate from the ^C print break except: raise if ok > 0: good.append(test) elif ok == 0: bad.append(test) else: skipped.append(test) if ok == -2: resource_denieds.append(test) if findleaks: gc.collect() if gc.garbage: print "Warning: test created", len(gc.garbage), print "uncollectable object(s)." # move the uncollectable objects somewhere so we don't see # them again found_garbage.extend(gc.garbage) del gc.garbage[:] # Unload the newly imported modules (best effort finalization) for module in sys.modules.keys(): if module not in save_modules and module.startswith("test."): test_support.unload(module) module = module[5:] if hasattr(_test, module): delattr(_test, module) if good and not quiet: if not bad and not skipped and len(good) > 1: print "All", print count(len(good), "test"), "OK." if print_slow: test_times.sort(reverse=True) print "10 slowest tests:" for time, test in test_times[:10]: print "%s: %.1fs" % (test, time) surprises = 0 if skipped and not quiet: print count(len(skipped), "test"), "skipped:" surprises += countsurprises(skips, skipped, 'skip', 'ran', allran, resource_denieds) if bad: print count(len(bad), "test"), "failed:" surprises += countsurprises(failures, bad, 'fail', 'passed', allran, resource_denieds) if verbose2 and bad: print "Re-running failed tests in verbose mode" for test in bad: print "Re-running test %r in verbose mode" % test sys.stdout.flush() try: test_support.verbose = True ok = runtest(test, True, quiet, test_times, testdir, huntrleaks) except KeyboardInterrupt: # print a newline separate from the ^C print break except: raise if single: alltests = findtests(testdir, stdtests, nottests) for i in range(len(alltests)): if tests[0] == alltests[i]: if i == len(alltests) - 1: os.unlink(filename) else: fp = open(filename, 'w') fp.write(alltests[i+1] + '\n') fp.close() break else: os.unlink(filename) if trace: r = tracer.results() r.write_results(show_missing=True, summary=True, coverdir=coverdir) if runleaks: os.system("leaks %d" % os.getpid()) if memo: savememo(memo,good,bad,skipped) sys.exit(surprises > 0)
KeyboardInterrupt
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/regrtest.py/main
def runtest_inner(test, verbose, quiet, test_times, testdir=None, huntrleaks=False, junit_xml_dir=None): test_support.unload(test) if not testdir: testdir = findtestdir() if verbose: capture_stdout = None else: capture_stdout = cStringIO.StringIO() from test.junit_xml import Tee, write_direct_test try: save_stdout = sys.stdout indirect_test = None if junit_xml_dir: save_stderr = sys.stderr sys.stdout = stdout = Tee(sys.stdout) sys.stderr = stderr = Tee(sys.stderr) try: if capture_stdout: sys.stdout = capture_stdout if test.startswith('test.'): abstest = test else: # Always import it from the test package abstest = 'test.' + test start_time = time.time() the_package = __import__(abstest, globals(), locals(), []) the_module = getattr(the_package, test) # Old tests run to completion simply as a side-effect of # being imported. For tests based on unittest or doctest, # explicitly invoke their test_main() function (if it exists). indirect_test = getattr(the_module, "test_main", None) test_time = None if indirect_test is not None: indirect_test() elif junit_xml_dir: test_time = time.time() - start_time write_direct_test(junit_xml_dir, abstest, test_time, stdout=stdout.getvalue(), stderr=stderr.getvalue()) if huntrleaks: dash_R(the_module, test, indirect_test, huntrleaks) if test_time is None: test_time = time.time() - start_time test_times.append((test_time, test)) finally: sys.stdout = save_stdout if junit_xml_dir: sys.stderr = save_stderr test_time = time.time() - start_time except test_support.ResourceDenied, msg: if not quiet: print test, "skipped --", msg sys.stdout.flush() if junit_xml_dir: write_direct_test(junit_xml_dir, abstest, test_time, 'skipped', sys.exc_info(), stdout=stdout.getvalue(), stderr=stderr.getvalue()) return -2 except (__HOLE__, unittest.SkipTest), msg: if not quiet: print test, "skipped --", msg sys.stdout.flush() if junit_xml_dir: write_direct_test(junit_xml_dir, abstest, test_time, 'skipped', sys.exc_info(), stdout=stdout.getvalue(), stderr=stderr.getvalue()) return -1 except KeyboardInterrupt: raise except test_support.TestFailed, msg: print "test", test, "failed --", msg sys.stdout.flush() if junit_xml_dir and indirect_test is None: write_direct_test(junit_xml_dir, abstest, test_time, 'failure', sys.exc_info(), stdout=stdout.getvalue(), stderr=stderr.getvalue()) return 0 except: type, value = sys.exc_info()[:2] print "test", test, "crashed --", str(type) + ":", value sys.stdout.flush() if verbose: traceback.print_exc(file=sys.stdout) sys.stdout.flush() if junit_xml_dir and indirect_test is None: write_direct_test(junit_xml_dir, abstest, test_time, 'error', sys.exc_info(), stdout=stdout.getvalue(), stderr=stderr.getvalue()) return 0 else: # Except in verbose mode, tests should not print anything if verbose or huntrleaks: return 1 output = capture_stdout.getvalue() if not output: return 1 print "test", test, "produced unexpected output:" print "*" * 70 print output print "*" * 70 sys.stdout.flush() return 0
ImportError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/regrtest.py/runtest_inner