function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def test_does_not_match_bad_arg_type_failure(self): try: assert_that('fred').does_not_match(123) fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('given pattern arg must be a string')
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_does_not_match_bad_arg_type_failure
def test_does_not_match_bad_arg_empty_failure(self): try: assert_that('fred').does_not_match('') fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('given pattern arg must not be empty')
ValueError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_does_not_match_bad_arg_empty_failure
def test_is_alpha_digit_failure(self): try: assert_that('foo123').is_alpha() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <foo123> to contain only alphabetic chars, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_alpha_digit_failure
def test_is_alpha_space_failure(self): try: assert_that('foo bar').is_alpha() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <foo bar> to contain only alphabetic chars, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_alpha_space_failure
def test_is_alpha_punctuation_failure(self): try: assert_that('foo,bar').is_alpha() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <foo,bar> to contain only alphabetic chars, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_alpha_punctuation_failure
def test_is_alpha_bad_value_type_failure(self): try: assert_that(123).is_alpha() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is not a string')
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_alpha_bad_value_type_failure
def test_is_alpha_empty_value_failure(self): try: assert_that('').is_alpha() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is empty')
ValueError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_alpha_empty_value_failure
def test_is_digit_alpha_failure(self): try: assert_that('foo123').is_digit() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <foo123> to contain only digits, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_digit_alpha_failure
def test_is_digit_space_failure(self): try: assert_that('1 000 000').is_digit() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <1 000 000> to contain only digits, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_digit_space_failure
def test_is_digit_punctuation_failure(self): try: assert_that('-123').is_digit() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <-123> to contain only digits, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_digit_punctuation_failure
def test_is_digit_bad_value_type_failure(self): try: assert_that(123).is_digit() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is not a string')
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_digit_bad_value_type_failure
def test_is_digit_empty_value_failure(self): try: assert_that('').is_digit() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is empty')
ValueError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_digit_empty_value_failure
def test_is_lower_failure(self): try: assert_that('FOO').is_lower() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <FOO> to contain only lowercase chars, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_lower_failure
def test_is_lower_bad_value_type_failure(self): try: assert_that(123).is_lower() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is not a string')
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_lower_bad_value_type_failure
def test_is_lower_empty_value_failure(self): try: assert_that('').is_lower() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is empty')
ValueError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_lower_empty_value_failure
def test_is_upper_failure(self): try: assert_that('foo').is_upper() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <foo> to contain only uppercase chars, but did not.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_upper_failure
def test_is_upper_bad_value_type_failure(self): try: assert_that(123).is_upper() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is not a string')
TypeError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_upper_bad_value_type_failure
def test_is_upper_empty_value_failure(self): try: assert_that('').is_upper() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('val is empty')
ValueError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_upper_empty_value_failure
def test_is_unicode_failure(self): try: assert_that(123).is_unicode() fail('should have raised error') except __HOLE__ as ex: assert_that(str(ex)).is_equal_to('Expected <123> to be unicode, but was <int>.')
AssertionError
dataset/ETHPy150Open ActivisionGameScience/assertpy/tests/test_string.py/TestString.test_is_unicode_failure
def fromTuple(self, locationTuple): """ Read the coordinates from a tuple. :: >>> t = (('pop', 1), ('snap', -100)) >>> l = Location() >>> l.fromTuple(t) >>> print(l) <Location pop:1, snap:-100 > """ for key, value in locationTuple: try: self[key] = float(value) except __HOLE__: self[key] = tuple([float(v) for v in value])
TypeError
dataset/ETHPy150Open LettError/MutatorMath/Lib/mutatorMath/objects/location.py/Location.fromTuple
def asString(self, strict=False): """ Return the location as a string. :: >>> l = Location(pop=1, snap=(-100.0, -200)) >>> l.asString() 'pop:1, snap:(-100.000,-200.000)' """ if len(self.keys())==0: return "origin" v = [] n = [] try: for name, value in self.asTuple(): s = '' if value is None: s = "None" elif type(value) == tuple or type(value) == list: s = "(%.3f,%.3f)"%(value[0], value[1]) elif int(value) == value: s = "%d"%(int(value)) else: s = "%.3f"%(value) if s != '': n.append("%s:%s"%(name, s)) return ", ".join(n) except __HOLE__: import traceback print("Location value error:", name, value) for key, value in self.items(): print("\t\tkey:", key) print("\t\tvalue:", value) traceback.print_exc() return "error"
TypeError
dataset/ETHPy150Open LettError/MutatorMath/Lib/mutatorMath/objects/location.py/Location.asString
def isAmbivalent(self, dim=None): """ Return True if any of the factors are in fact tuples. If a dimension name is given only that dimension is tested. :: >>> l = Location(pop=1) >>> l.isAmbivalent() False >>> l = Location(pop=1, snap=(100, -100)) >>> l.isAmbivalent() True """ if dim is not None: try: return type(self[dim]) == TupleType except __HOLE__: # dimension is not present, it should be 0, so not ambivalent return False for dim, val in self.items(): if type(val) == TupleType: return True return False
KeyError
dataset/ETHPy150Open LettError/MutatorMath/Lib/mutatorMath/objects/location.py/Location.isAmbivalent
def __sub__(self, other): new = self.__class__() new.update(self) for key, value in other.items(): try: new[key] = -value except __HOLE__: new[key] = (-value[0], -value[1]) selfDim = set(self.keys()) otherDim = set(other.keys()) for key in selfDim & otherDim: ts = type(self[key])!=tuple to = type(other[key])!=tuple if ts: sx = sy = self[key] else: sx = self[key][0] sy = self[key][1] if to: ox = oy = other[key] else: ox = other[key][0] oy = other[key][1] x = sx-ox y = sy-oy if x==y: new[key] = x else: new[key] = x,y return new
TypeError
dataset/ETHPy150Open LettError/MutatorMath/Lib/mutatorMath/objects/location.py/Location.__sub__
def _column_builder(self, col): """Return a callable that builds a column or aggregate object""" if len(col.name) > 1: # Aggregate try: aclass = aggregate_functions[col.name[0]] except __HOLE__: raise KeyError("Unknown aggregate function %s" % col.name[0]) return lambda:aclass(col.name[1], col.alias if col.alias else '%s(%s)' % (col.name[0], col.name[1])) else: # Column return lambda:Column(col.name[0], col.alias)
KeyError
dataset/ETHPy150Open samuel/squawk/squawk/query.py/Query._column_builder
def check_constants(self): """Verify that all constant definitions evaluate to a value.""" for constant in self.constants: try: self.lookup_constant(constant) except __HOLE__ as e: fmt = "Constant '{name}' is undefined" self.append_error(fmt, name=e.args[0]) except: fmt = "Constant '{name}' has a circular reference" self.append_error(fmt, name=constant)
KeyError
dataset/ETHPy150Open EricssonResearch/calvin-base/calvin/csparser/checker.py/Checker.check_constants
def get_service(hass, config): """Get the instapush notification service.""" if not validate_config({DOMAIN: config}, {DOMAIN: [CONF_API_KEY, 'app_secret', 'event', 'tracker']}, _LOGGER): return None headers = {'x-instapush-appid': config[CONF_API_KEY], 'x-instapush-appsecret': config['app_secret']} try: response = requests.get(_RESOURCE + 'events/list', headers=headers).json() except __HOLE__: _LOGGER.error('Unexpected answer from Instapush API.') return None if 'error' in response: _LOGGER.error(response['msg']) return None if len([app for app in response if app['title'] == config['event']]) == 0: _LOGGER.error( "No app match your given value. " "Please create an app at https://instapush.im") return None return InstapushNotificationService( config[CONF_API_KEY], config['app_secret'], config['event'], config['tracker']) # pylint: disable=too-few-public-methods
ValueError
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/notify/instapush.py/get_service
def get_tasks(): """Get the imported task classes for each task that will be run""" task_classes = [] for task_path in TASKS: try: module, classname = task_path.rsplit('.', 1) except __HOLE__: raise ImproperlyConfigured('%s isn\'t a task module' % task_path) try: mod = import_module(module) except ImportError as e: raise ImproperlyConfigured('Error importing task %s: "%s"' % (module, e)) try: task_class = getattr(mod, classname) except AttributeError: raise ImproperlyConfigured('Task module "%s" does not define a ' '"%s" class' % (module, classname)) task_classes.append(task_class) return task_classes
ValueError
dataset/ETHPy150Open jazzband/django-discover-jenkins/discover_jenkins/runner.py/get_tasks
def __getattr__(self, attr_name): try: return _wrap(self._d_[attr_name]) except __HOLE__: raise AttributeError( '%r object has no attribute %r' % (self.__class__.__name__, attr_name))
KeyError
dataset/ETHPy150Open elastic/elasticsearch-dsl-py/elasticsearch_dsl/utils.py/AttrDict.__getattr__
def __delattr__(self, attr_name): try: del self._d_[attr_name] except __HOLE__: raise AttributeError( '%r object has no attribute %r' % (self.__class__.__name__, attr_name))
KeyError
dataset/ETHPy150Open elastic/elasticsearch-dsl-py/elasticsearch_dsl/utils.py/AttrDict.__delattr__
@classmethod def get_dsl_type(cls, name): try: return cls._types[name] except __HOLE__: raise UnknownDslObject('DSL type %s does not exist.' % name)
KeyError
dataset/ETHPy150Open elastic/elasticsearch-dsl-py/elasticsearch_dsl/utils.py/DslMeta.get_dsl_type
@classmethod def get_dsl_class(cls, name): try: return cls._classes[name] except __HOLE__: raise UnknownDslObject('DSL class `%s` does not exist in %s.' % (name, cls._type_name))
KeyError
dataset/ETHPy150Open elastic/elasticsearch-dsl-py/elasticsearch_dsl/utils.py/DslBase.get_dsl_class
def __getattr__(self, name): if name.startswith('_'): raise AttributeError( '%r object has no attribute %r' % (self.__class__.__name__, name)) value = None try: value = self._params[name] except __HOLE__: # compound types should never throw AttributeError and return empty # container instead if name in self._param_defs: pinfo = self._param_defs[name] if pinfo.get('multi'): value = self._params.setdefault(name, []) elif pinfo.get('hash'): value = self._params.setdefault(name, {}) if value is None: raise AttributeError( '%r object has no attribute %r' % (self.__class__.__name__, name)) # wrap nested dicts in AttrDict for convenient access if isinstance(value, dict): return AttrDict(value) return value
KeyError
dataset/ETHPy150Open elastic/elasticsearch-dsl-py/elasticsearch_dsl/utils.py/DslBase.__getattr__
def __getattr__(self, name): try: return super(ObjectBase, self).__getattr__(name) except __HOLE__: if name in self._doc_type.mapping: f = self._doc_type.mapping[name] if hasattr(f, 'empty'): value = f.empty() if value not in SKIP_VALUES: setattr(self, name, value) value = getattr(self, name) return value raise
AttributeError
dataset/ETHPy150Open elastic/elasticsearch-dsl-py/elasticsearch_dsl/utils.py/ObjectBase.__getattr__
def to_dict(self): out = {} for k, v in iteritems(self._d_): try: f = self._doc_type.mapping[k] if f._coerce: v = f.serialize(v) except __HOLE__: pass # don't serialize empty values # careful not to include numeric zeros if v in ([], {}, None): continue out[k] = v return out
KeyError
dataset/ETHPy150Open elastic/elasticsearch-dsl-py/elasticsearch_dsl/utils.py/ObjectBase.to_dict
def _with_retries(self, pool, fn): """ Performs the passed function with retries against the given pool. :param pool: the connection pool to use :type pool: Pool :param fn: the function to pass a transport :type fn: function """ skip_nodes = [] def _skip_bad_nodes(transport): return transport._node not in skip_nodes retry_count = self.retries for retry in range(retry_count): try: with pool.transaction(_filter=_skip_bad_nodes) as transport: try: return fn(transport) except (__HOLE__, HTTPException) as e: if _is_retryable(e): transport._node.error_rate.incr(1) skip_nodes.append(transport._node) raise BadResource(e) else: raise except BadResource as e: if retry < (retry_count - 1): continue else: # Re-raise the inner exception raise e.args[0]
IOError
dataset/ETHPy150Open basho/riak-python-client/riak/client/transport.py/RiakClientTransport._with_retries
def find( menu ) : s = scope( menu ) try : findDialogue = s.scriptWindow.__findDialogue except __HOLE__ : findDialogue = GafferUI.NodeFinderDialogue( s.parent ) s.scriptWindow.addChildWindow( findDialogue ) s.scriptWindow.__findDialogue = findDialogue findDialogue.setScope( s.parent ) findDialogue.setVisible( True ) ## A function suitable as the command for an Edit/Arrange menu item. It must # be invoked from a menu that has a ScriptWindow in its ancestry.
AttributeError
dataset/ETHPy150Open ImageEngine/gaffer/python/GafferUI/EditMenu.py/find
@api_endpoint(['POST']) def project_information(request): try: # TODO(marcua): Add checking for json.loads exceptions to all # endpoints. return get_project_information( json.loads(request.body.decode())['project_id']) except __HOLE__: raise BadRequest('project_id is required') except Project.DoesNotExist: raise BadRequest('No project for given id')
KeyError
dataset/ETHPy150Open unlimitedlabs/orchestra/orchestra/project_api/views.py/project_information
@api_endpoint(['POST']) def create_project(request): project_details = json.loads(request.body.decode()) try: if project_details['task_class'] == 'real': task_class = WorkerCertification.TaskClass.REAL else: task_class = WorkerCertification.TaskClass.TRAINING args = ( project_details['workflow_slug'], project_details['workflow_version_slug'], project_details['description'], project_details['priority'], task_class, project_details['project_data'], ) except __HOLE__: raise BadRequest('One of the parameters is missing') project = create_project_with_tasks(*args) return {'project_id': project.id}
KeyError
dataset/ETHPy150Open unlimitedlabs/orchestra/orchestra/project_api/views.py/create_project
def savepid (self): self._saved_pid = False if not self.pid: return True ownid = os.getpid() flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY mode = ((os.R_OK | os.W_OK) << 6) | (os.R_OK << 3) | os.R_OK try: fd = os.open(self.pid,flags,mode) except OSError: self.logger.daemon("PIDfile already exists, not updated %s" % self.pid) return False try: f = os.fdopen(fd,'w') line = "%d\n" % ownid f.write(line) f.close() self._saved_pid = True except __HOLE__: self.logger.daemon("Can not create PIDfile %s" % self.pid,'warning') return False self.logger.daemon("Created PIDfile %s with value %d" % (self.pid,ownid),'warning') return True
IOError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/daemon.py/Daemon.savepid
def removepid (self): if not self.pid or not self._saved_pid: return try: os.remove(self.pid) except __HOLE__,exc: if exc.errno == errno.ENOENT: pass else: self.logger.daemon("Can not remove PIDfile %s" % self.pid,'error') return self.logger.daemon("Removed PIDfile %s" % self.pid)
OSError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/daemon.py/Daemon.removepid
def drop_privileges (self): """return true if we are left with insecure privileges""" # os.name can be ['posix', 'nt', 'os2', 'ce', 'java', 'riscos'] if os.name not in ['posix',]: return True uid = os.getuid() gid = os.getgid() if uid and gid: return True try: user = pwd.getpwnam(self.user) nuid = int(user.pw_uid) ngid = int(user.pw_gid) except KeyError: return False # not sure you can change your gid if you do not have a pid of zero try: # we must change the GID first otherwise it may fail after change UID if not gid: os.setgid(ngid) if not uid: os.setuid(nuid) cuid = os.getuid() ceid = os.geteuid() cgid = os.getgid() if cuid < 0: cuid += (1 << 32) if cgid < 0: cgid += (1 << 32) if ceid < 0: ceid += (1 << 32) if nuid != cuid or nuid != ceid or ngid != cgid: return False except __HOLE__: return False return True
OSError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/daemon.py/Daemon.drop_privileges
def _is_socket (self, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW) except __HOLE__: # The file descriptor is closed return False try: s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) except socket.error,exc: # It is look like one but it is not a socket ... if exc.args[0] == errno.ENOTSOCK: return False return True
ValueError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/daemon.py/Daemon._is_socket
def daemonise (self): if not self.daemonize: return log = environment.settings().log if log.enable and log.destination.lower() in ('stdout','stderr'): self.logger.daemon('ExaBGP can not fork when logs are going to %s' % log.destination.lower(),'critical') return def fork_exit (): try: pid = os.fork() if pid > 0: os._exit(0) except __HOLE__,exc: self.logger.reactor('Can not fork, errno %d : %s' % (exc.errno,exc.strerror),'critical') # do not detach if we are already supervised or run by init like process if self._is_socket(sys.__stdin__.fileno()) or os.getppid() == 1: return fork_exit() os.setsid() fork_exit() self.silence()
OSError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/daemon.py/Daemon.daemonise
def silence (self): # closing more would close the log file too if open maxfd = 3 for fd in range(0, maxfd): try: os.close(fd) except __HOLE__: pass os.open("/dev/null", os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2) # import resource # if 'linux' in sys.platform: # nofile = resource.RLIMIT_NOFILE # elif 'bsd' in sys.platform: # nofile = resource.RLIMIT_OFILE # else: # self.logger.daemon("For platform %s, can not close FDS before forking" % sys.platform) # nofile = None # if nofile: # maxfd = resource.getrlimit(nofile)[1] # if (maxfd == resource.RLIM_INFINITY): # maxfd = MAXFD # else: # maxfd = MAXFD
OSError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/reactor/daemon.py/Daemon.silence
def find_retained_introns(gene): '''Given a bundle of transcripts, find intervals matching retained introns. A retained intron is defined as an interval from an exon/intron boundary to the next where both boundaries are in the same exon of another transcript''' intron_intervals = [GTF.toIntronIntervals(transcript) for transcript in gene] intron_intervals = list(set( itertools.chain.from_iterable(intron_intervals))) intron_intervals.sort() for transcript in gene: exons = iter(sorted(GTF.asRanges(transcript))) introns = iter(intron_intervals) retained_introns = [] try: intron = introns.next() exon = exons.next() while True: if exon[1] < intron[0]: exon = exons.next() continue if intron[0] >= exon[0] and intron[1] <= exon[1]: E.debug("exon %s of transcript %s contains intron %s" % (exon, transcript[0].transcript_id, intron)) retained_introns.append(intron) intron = introns.next() except __HOLE__: pass retained_introns = Intervals.combine(retained_introns) for intron in retained_introns: entry = GTF.Entry() entry = entry.copy(transcript[0]) entry.start = intron[0] entry.end = intron[1] yield entry
StopIteration
dataset/ETHPy150Open CGATOxford/cgat/scripts/gtf2gtf.py/find_retained_introns
def main(argv=None): if not argv: argv = sys.argv parser = E.OptionParser(version="%prog version: $Id$", usage=globals()["__doc__"]) parser.add_option("--merge-exons-distance", dest="merge_exons_distance", type="int", help="distance in nucleotides between " "exons to be merged [%default].") parser.add_option("--pattern-identifier", dest="pattern", type="string", help="pattern to use for renaming genes/transcripts. " "The pattern should contain a %i, for example " "--pattern-identifier=ENSG%010i [%default].") parser.add_option("--sort-order", dest="sort_order", type="choice", choices=("gene", "gene+transcript", "transcript", "position", "contig+gene", "position+gene", "gene+position", "gene+exon"), help="sort input data [%default].") parser.add_option("--mark-utr", dest="mark_utr", action="store_true", help="mark utr for method --merge-exons. " "[%default].") parser.add_option( "--without-utr", dest="with_utr", action="store_false", help="exclude UTR in methods --merge-exons, merge-transcripts " "and intersect-transripts. Setting this option will remove " "non-coding transcripts. " "[%default].") parser.add_option( "--filter-method", dest="filter_method", type="choice", choices=("gene", "transcript", "longest-gene", "longest-transcript", "representative-transcript", "proteincoding", "lincrna"), help="Filter method to apply. Available filters are: " "'gene': filter by gene_id given in ``--map-tsv-file``, " "'transcript': filter by transcript_id given in ``--map-tsv-file``, " "'longest-gene': output the longest gene for overlapping genes ," "'longest-transcript': output the longest transcript per gene," "'representative-transcript': output the representative transcript " "per gene. The representative transcript is the transcript " "that shares most exons with other transcripts in a gene. " "The input needs to be sorted by gene. " "'proteincoding': only output protein coding features. " "'lincrna': only output lincRNA features. " "[%default].") parser.add_option("-a", "--map-tsv-file", dest="filename_filter", type="string", metavar="tsv", help="filename of ids to map/filter [%default].") parser.add_option( "--gff-file", dest="filename_gff", type="string", metavar="GFF", help="second filename of features (see --remove-overlapping) " "[%default]") parser.add_option("--invert-filter", dest="invert_filter", action="store_true", help="when using --filter, invert selection " "(like grep -v). " "[%default].") parser.add_option("--sample-size", dest="sample_size", type="int", help="extract a random sample of size # if the option " "'--method=filter --filter-method' is set " "[%default].") parser.add_option( "--intron-min-length", dest="intron_min_length", type="int", help="minimum length for introns (for --exons-file2introns) " "[%default].") parser.add_option("--min-exons-length", dest="min_exons_length", type="int", help="minimum length for gene (sum of exons) " "(--sam-fileple-size) [%default].") parser.add_option( "--intron-border", dest="intron_border", type="int", help="number of residues to exclude at intron at either end " "(--exons-file2introns) [%default].") parser.add_option("--ignore-strand", dest="ignore_strand", action="store_true", help="remove strandedness of features (set to '.') when " "using ``transcripts2genes`` or ``filter``" "[%default].") parser.add_option("--permit-duplicates", dest="strict", action="store_false", help="permit duplicate genes. " "[%default]") parser.add_option( "--duplicate-feature", dest="duplicate_feature", type="choice", choices=("gene", "transcript", "both", "ucsc", "coordinates"), help="remove duplicates by gene/transcript. " "If ``ucsc`` is chosen, transcripts ending on _dup# are " "removed. This is necessary to remove duplicate entries " "that are next to each other in the sort order " "[%default]") parser.add_option("--use-gene-id", dest="use_geneid", action="store_true", help="when merging transcripts, exons or introns, use " "the parent gene_id as the transcript id.") parser.add_option("-m", "--method", dest="method", type="choice", action="append", choices=( "add-protein-id", "exons2introns", "filter", "find-retained-introns", "genes-to-unique-chunks", "intersect-transcripts", "join-exons", "merge-exons", "merge-transcripts", "merge-genes", "merge-introns", "remove-overlapping", "remove-duplicates", "rename-genes", "rename-transcripts", "rename-duplicates", "renumber-genes", "renumber-transcripts", "set-transcript-to-gene", "set-gene-to-transcript", "set-protein-to-transcript", "set-score-to-distance", "set-gene_biotype-to-source", "set-source-to-transcript_biotype", "sort", "transcript2genes", "unset-genes"), help="Method to apply [%default]." "Please only select one.") parser.set_defaults( sort_order="gene", filter_method="gene", pattern="%i", merge_exons_distance=0, filename_filter=None, intron_border=None, intron_min_length=None, sample_size=0, min_exons_length=0, ignore_strand=False, mark_utr=False, with_utr=True, invert_filter=False, duplicate_feature=None, strict=True, method=None, use_geneid=False, ) (options, args) = E.Start(parser, argv=argv) ninput, noutput, nfeatures, ndiscarded = 0, 0, 0, 0 if options.method is None: raise ValueError("please specify a --method") if len(options.method) > 1: raise ValueError("multiple --method arguements specified") else: options.method = options.method[0] if options.method == "set-transcript-to-gene": for gff in GTF.iterator(options.stdin): ninput += 1 gff.setAttribute("transcript_id", gff.gene_id) options.stdout.write("%s\n" % str(gff)) noutput += 1 nfeatures += 1 elif options.method == "set-gene_biotype-to-source": for gff in GTF.iterator(options.stdin): ninput += 1 if "gene_biotype" not in gff.attributes: gff.setAttribute("gene_biotype", gff.source) options.stdout.write("%s\n" % str(gff)) noutput += 1 nfeatures += 1 elif options.method == "set-source-to-transcript_biotype": for gff in GTF.iterator(options.stdin): ninput += 1 try: gff.source = gff.transcript_biotype except __HOLE__: pass options.stdout.write("%s\n" % str(gff)) noutput += 1 nfeatures += 1 elif options.method == "remove-duplicates": counts = collections.defaultdict(int) if options.duplicate_feature == "ucsc": store = [] remove = set() f = lambda x: x[0].transcript_id gffs = GTF.transcript_iterator( GTF.iterator(options.stdin), strict=False) outf = lambda x: "\n".join([str(y) for y in x]) for entry in gffs: ninput += 1 store.append(entry) id = f(entry) if "_dup" in id: remove.add(re.sub("_dup\d+", "", id)) remove.add(id) for entry in store: id = f(entry) if id not in remove: options.stdout.write(outf(entry) + "\n") noutput += 1 else: ndiscarded += 1 E.info("discarded duplicates for %s" % (id)) else: if options.duplicate_feature == "gene": gffs = GTF.gene_iterator( GTF.iterator(options.stdin), strict=False) f = lambda x: x[0][0].gene_id outf = lambda x: "\n".join( ["\n".join([str(y) for y in xx]) for xx in x]) elif options.duplicate_feature == "transcript": gffs = GTF.transcript_iterator( GTF.iterator(options.stdin), strict=False) f = lambda x: x[0].transcript_id outf = lambda x: "\n".join([str(y) for y in x]) elif options.duplicate_feature == "coordinates": gffs = GTF.chunk_iterator(GTF.iterator(options.stdin)) f = lambda x: x[0].contig + "_" + \ str(x[0].start) + "-" + str(x[0].end) outf = lambda x: "\n".join([str(y) for y in x]) store = [] for entry in gffs: ninput += 1 store.append(entry) id = f(entry) counts[id] += 1 # Assumes GTF file sorted by contig then start last_id = "" if options.duplicate_feature == "coordinates": for entry in store: id = f(entry) if id == last_id: ndiscarded += 1 E.info("discarded duplicates for %s: %i" % (id, counts[id])) else: options.stdout.write(outf(entry) + "\n") noutput += 1 last_id = id else: for entry in store: id = f(entry) if counts[id] == 1: options.stdout.write(outf(entry) + "\n") noutput += 1 else: ndiscarded += 1 E.info("discarded duplicates for %s: %i" % (id, counts[id])) elif "sort" == options.method: for gff in GTF.iterator_sorted(GTF.iterator(options.stdin), sort_order=options.sort_order): ninput += 1 options.stdout.write("%s\n" % str(gff)) noutput += 1 nfeatures += 1 elif "set-gene-to-transcript" == options.method: for gff in GTF.iterator(options.stdin): ninput += 1 gff.setAttribute("gene_id", gff.transcript_id) options.stdout.write("%s\n" % str(gff)) noutput += 1 nfeatures += 1 elif "set-protein-to-transcript" == options.method: for gff in GTF.iterator(options.stdin): ninput += 1 gff.setAttribute("protein_id", gff.transcript_id) options.stdout.write("%s\n" % str(gff)) noutput += 1 nfeatures += 1 elif "add-protein-id" == options.method: transcript2protein = IOTools.readMap( IOTools.openFile(options.filename_filter, "r")) missing = set() for gff in GTF.iterator(options.stdin): ninput += 1 if gff.transcript_id not in transcript2protein: if gff.transcript_id not in missing: E.debug( ("removing transcript '%s' due to " "missing protein id") % gff.transcript_id) missing.add(gff.transcript_id) ndiscarded += 1 continue gff.setAttribute( "protein_id", transcript2protein[gff.transcript_id]) options.stdout.write("%s\n" % str(gff)) noutput += 1 nfeatures += 1 E.info("transcripts removed due to missing protein ids: %i" % len(missing)) elif "join-exons" == options.method: for exons in GTF.transcript_iterator(GTF.iterator(options.stdin)): ninput += 1 strand = Genomics.convertStrand(exons[0].strand) contig = exons[0].contig transid = exons[0].transcript_id geneid = exons[0].gene_id biotype = exons[0].source all_start, all_end = min([x.start for x in exons]), max( [x.end for x in exons]) y = GTF.Entry() y.contig = contig y.source = biotype y.feature = "transcript" y.start = all_start y.end = all_end y.strand = strand y.transcript_id = transid y.gene_id = geneid options.stdout.write("%s\n" % str(y)) elif "merge-genes" == options.method: # merges overlapping genes # gffs = GTF.iterator_sorted_chunks( GTF.flat_gene_iterator(GTF.iterator(options.stdin)), sort_by="contig-strand-start") def iterate_chunks(gff_chunks): last = gff_chunks.next() to_join = [last] for gffs in gff_chunks: d = gffs[0].start - last[-1].end if gffs[0].contig == last[0].contig and \ gffs[0].strand == last[0].strand: assert gffs[0].start >= last[0].start, \ ("input file should be sorted by contig, strand " "and position: d=%i:\nlast=\n%s\nthis=\n%s\n") % \ (d, "\n".join([str(x) for x in last]), "\n".join([str(x) for x in gffs])) if gffs[0].contig != last[0].contig or \ gffs[0].strand != last[0].strand or \ d > 0: yield to_join to_join = [] last = gffs to_join.append(gffs) yield to_join raise StopIteration for chunks in iterate_chunks(gffs): ninput += 1 if len(chunks) > 1: gene_id = "merged_%s" % chunks[0][0].gene_id transcript_id = "merged_%s" % chunks[0][0].transcript_id info = ",".join([x[0].gene_id for x in chunks]) else: gene_id = chunks[0][0].gene_id transcript_id = chunks[0][0].transcript_id info = None intervals = [] for c in chunks: intervals += [(x.start, x.end) for x in c] intervals = Intervals.combine(intervals) # take single strand strand = chunks[0][0].strand for start, end in intervals: y = GTF.Entry() y.fromGTF(chunks[0][0], gene_id, transcript_id) y.start = start y.end = end y.strand = strand if info: y.addAttribute("merged", info) options.stdout.write("%s\n" % str(y)) nfeatures += 1 noutput += 1 elif options.method == "renumber-genes": map_old2new = {} for gtf in GTF.iterator(options.stdin): ninput += 1 if gtf.gene_id not in map_old2new: map_old2new[gtf.gene_id] = options.pattern % ( len(map_old2new) + 1) gtf.setAttribute("gene_id", map_old2new[gtf.gene_id]) options.stdout.write("%s\n" % str(gtf)) noutput += 1 elif options.method == "unset-genes": map_old2new = {} for gtf in GTF.iterator(options.stdin): ninput += 1 key = gtf.transcript_id if key not in map_old2new: map_old2new[key] = options.pattern % (len(map_old2new) + 1) gtf.setAttribute("gene_id", map_old2new[key]) options.stdout.write("%s\n" % str(gtf)) noutput += 1 elif options.method == "renumber-transcripts": map_old2new = {} for gtf in GTF.iterator(options.stdin): ninput += 1 key = (gtf.gene_id, gtf.transcript_id) if key not in map_old2new: map_old2new[key] = options.pattern % ( len(map_old2new) + 1) gtf.setAttribute("transcript_id", map_old2new[key]) options.stdout.write("%s\n" % str(gtf)) noutput += 1 elif options.method == "transcripts2genes": transcripts = set() genes = set() ignore_strand = options.ignore_strand for gtfs in GTF.iterator_transcripts2genes( GTF.iterator(options.stdin)): ninput += 1 for gtf in gtfs: if ignore_strand: gtf.strand = "." options.stdout.write("%s\n" % str(gtf)) transcripts.add(gtf.transcript_id) genes.add(gtf.gene_id) nfeatures += 1 noutput += 1 E.info("transcripts2genes: transcripts=%i, genes=%i" % (len(transcripts), len(genes))) elif options.method in ("rename-genes", "rename-transcripts"): map_old2new = IOTools.readMap( IOTools.openFile(options.filename_filter, "r")) if options.method == "rename-transcripts": is_gene_id = False elif options.method == "rename-genes": is_gene_id = True for gff in GTF.iterator(options.stdin): ninput += 1 if is_gene_id: if gff.gene_id in map_old2new: gff.setAttribute("gene_id", map_old2new[gff.gene_id]) else: E.debug("removing missing gene_id %s" % gff.gene_id) ndiscarded += 1 continue else: if gff.transcript_id in map_old2new: gff.setAttribute( "transcript_id", map_old2new[gff.transcript_id]) else: E.debug("removing missing transcript_id %s" % gff.transcript_id) ndiscarded += 1 continue noutput += 1 options.stdout.write("%s\n" % str(gff)) elif options.method == "filter": keep_genes = set() if options.filter_method == "longest-gene": iterator = GTF.flat_gene_iterator(GTF.iterator(options.stdin)) coords = [] gffs = [] for gff in iterator: gff.sort(key=lambda x: x.start) coords.append((gff[0].contig, min([x.start for x in gff]), max([x.end for x in gff]), gff[0].gene_id)) gffs.append(gff) coords.sort() last_contig = None max_end = 0 longest_gene_id = None longest_length = None for contig, start, end, gene_id in coords: ninput += 1 if contig != last_contig or start >= max_end: if longest_gene_id: keep_genes.add(longest_gene_id) longest_gene_id = gene_id longest_length = end - start max_end = end else: if end - start > longest_length: longest_length, longest_gene_id = end - start, gene_id last_contig = contig max_end = max(max_end, end) keep_genes.add(longest_gene_id) invert = options.invert_filter for gff in gffs: keep = gff[0].gene_id in keep_genes if (keep and not invert) or (not keep and invert): noutput += 1 for g in gff: nfeatures += 1 options.stdout.write("%s\n" % g) else: ndiscarded += 1 elif options.filter_method in ("longest-transcript", "representative-transcript"): iterator = GTF.gene_iterator(GTF.iterator(options.stdin)) def selectLongestTranscript(gene): r = [] for transcript in gene: transcript.sort(key=lambda x: x.start) length = transcript[-1].end - transcript[0].start r.append((length, transcript)) r.sort() return r[-1][1] def selectRepresentativeTranscript(gene): '''select a representative transcript. The representative transcript represent the largest number of exons over all transcripts. ''' all_exons = [] for transcript in gene: all_exons.extend([(x.start, x.end) for x in transcript if x.feature == "exon"]) exon_counts = {} for key, exons in itertools.groupby(all_exons): exon_counts[key] = len(list(exons)) transcript_counts = [] for transcript in gene: count = sum([exon_counts[(x.start, x.end)] for x in transcript if x.feature == "exon"]) # add transcript id to sort to provide a stable # segmentation. transcript_counts.append((count, transcript[0].transcript_id, transcript)) transcript_counts.sort() return transcript_counts[-1][-1] if options.filter_method == "longest-transcript": _select = selectLongestTranscript elif options.filter_method == "representative-transcript": _select = selectRepresentativeTranscript for gene in iterator: ninput += 1 # sort in order to make reproducible which # gene is chosen. transcript = _select(sorted(gene)) noutput += 1 for g in transcript: nfeatures += 1 options.stdout.write("%s\n" % g) elif options.filter_method in ("gene", "transcript"): if options.filename_filter: ids = IOTools.readList( IOTools.openFile(options.filename_filter, "r")) E.info("read %i ids" % len(ids)) ids = set(ids) by_gene = options.filter_method == "gene" by_transcript = options.filter_method == "transcript" invert = options.invert_filter ignore_strand = options.ignore_strand for gff in GTF.iterator(options.stdin): ninput += 1 keep = False if by_gene: keep = gff.gene_id in ids if by_transcript: keep = gff.transcript_id in ids if (invert and keep) or (not invert and not keep): continue if ignore_strand: gff.strand = "." options.stdout.write("%s\n" % str(gff)) nfeatures += 1 noutput += 1 elif options.sample_size: if options.filter_method == "gene": iterator = GTF.flat_gene_iterator( GTF.iterator(options.stdin)) elif options.filter_method == "transcript": iterator = GTF.transcript_iterator( GTF.iterator(options.stdin)) if options.min_exons_length: iterator = GTF.iterator_min_feature_length( iterator, min_length=options.min_exons_length, feature="exon") data = [x for x in iterator] ninput = len(data) if len(data) > options.sample_size: data = random.sample(data, options.sample_size) for d in data: noutput += 1 for dd in d: nfeatures += 1 options.stdout.write(str(dd) + "\n") else: assert False, "please supply either a filename " "with ids to filter with (--map-tsv-file) or a sample-size." elif options.filter_method in ("proteincoding", "lincrna", "processed-pseudogene"): # extract entries by transcript/gene biotype. # This filter uses a test on the source field (ENSEMBL pre v78) # a regular expression on the attributes (ENSEMBL >= v78). tag = {"proteincoding": "protein_coding", "processed-pseudogene": "processed_pseudogene", "lincrna": "lincRNA"}[options.filter_method] rx = re.compile('"%s"' % tag) if not options.invert_filter: f = lambda x: x.source == tag or rx.search(x.attributes) else: f = lambda x: x.source != tag and not rx.search(x.attributes) for gff in GTF.iterator(options.stdin): ninput += 1 if f(gff): options.stdout.write(str(gff) + "\n") noutput += 1 else: ndiscarded += 1 elif options.method == "exons2introns": for gffs in GTF.flat_gene_iterator(GTF.iterator(options.stdin)): ninput += 1 cds_ranges = GTF.asRanges(gffs, "CDS") exon_ranges = GTF.asRanges(gffs, "exon") input_ranges = Intervals.combine(cds_ranges + exon_ranges) if len(input_ranges) > 1: last = input_ranges[0][1] output_ranges = [] for start, end in input_ranges[1:]: output_ranges.append((last, start)) last = end if options.intron_border: b = options.intron_border output_ranges = [(x[0] + b, x[1] - b) for x in output_ranges] if options.intron_min_length: l = options.intron_min_length output_ranges = [ x for x in output_ranges if x[1] - x[0] > l] for start, end in output_ranges: entry = GTF.Entry() entry.copy(gffs[0]) entry.clearAttributes() entry.transcript_id = "merged" entry.feature = "intron" entry.start = start entry.end = end options.stdout.write("%s\n" % str(entry)) nfeatures += 1 noutput += 1 else: ndiscarded += 1 elif options.method == "set-score-to-distance": for gffs in GTF.transcript_iterator(GTF.iterator(options.stdin)): ninput += 1 strand = Genomics.convertStrand(gffs[0].strand) all_start, all_end = min([x.start for x in gffs]), max( [x.end for x in gffs]) if strand != ".": t = 0 if strand == "-": gffs.reverse() for gff in gffs: gff.score = t t += gff.end - gff.start if strand == "-": gffs.reverse() for gff in gffs: options.stdout.write("%s\n" % str(gff)) nfeatures += 1 noutput += 1 elif options.method == "remove-overlapping": index = GTF.readAndIndex( GTF.iterator(IOTools.openFile(options.filename_gff, "r"))) for gffs in GTF.transcript_iterator(GTF.iterator(options.stdin)): ninput += 1 found = False for e in gffs: if index.contains(e.contig, e.start, e.end): found = True break if found: ndiscarded += 1 else: noutput += 1 for e in gffs: nfeatures += 1 options.stdout.write("%s\n" % str(e)) elif options.method == "intersect-transcripts": for gffs in GTF.gene_iterator(GTF.iterator(options.stdin), strict=options.strict): ninput += 1 r = [] for g in gffs: if options.with_utr: ranges = GTF.asRanges(g, "exon") else: ranges = GTF.asRanges(g, "CDS") r.append(ranges) result = r[0] for x in r[1:]: result = Intervals.intersect(result, x) entry = GTF.Entry() entry.copy(gffs[0][0]) entry.clearAttributes() entry.transcript_id = "merged" entry.feature = "exon" for start, end in result: entry.start = start entry.end = end options.stdout.write("%s\n" % str(entry)) nfeatures += 1 noutput += 1 elif "rename-duplicates" == options.method: # note: this will only rename entries with "CDS" in feature column assert options.duplicate_feature in ["gene", "transcript", "both"],\ ("for renaming duplicates, --duplicate-feature must be set to one " "of 'gene', transcript' or 'both'") gene_ids = list() transcript_ids = list() gtfs = list() for gtf in GTF.iterator(options.stdin): gtfs.append(gtf) if gtf.feature == "CDS": gene_ids.append(gtf.gene_id) transcript_ids.append(gtf.transcript_id) dup_gene = [item for item in set(gene_ids) if gene_ids.count(item) > 1] dup_transcript = [item for item in set(transcript_ids) if transcript_ids.count(item) > 1] E.info("Number of duplicated gene_ids: %i" % len(dup_gene)) E.info("Number of duplicated transcript_ids: %i" % len(dup_transcript)) gene_dict = dict(zip(dup_gene, ([0] * len(dup_gene)))) transcript_dict = dict(zip(dup_transcript, ([0] * len(dup_transcript)))) for gtf in gtfs: if gtf.feature == "CDS": if options.duplicate_feature in ["both", "gene"]: if gtf.gene_id in dup_gene: gene_dict[gtf.gene_id] = gene_dict[gtf.gene_id] + 1 # TS. patch until pysam.ctabixproxies.pyx bugfixed gtf.attributes = gtf.attributes.strip() gtf.setAttribute('gene_id', gtf.gene_id + "." + str(gene_dict[gtf.gene_id])) if options.duplicate_feature in ["both", "transcript"]: if gtf.transcript_id in dup_transcript: transcript_dict[gtf.transcript_id] = \ transcript_dict[gtf.transcript_id] + 1 # TS. patch until pysam.ctabixproxies.pyx bugfixed gtf.attributes = gtf.attributes.strip() gtf.setAttribute( 'transcript_id', gtf.transcript_id + "." + str(transcript_dict[gtf.transcript_id])) options.stdout.write("%s\n" % gtf) elif options.method in ("merge-exons", "merge-introns", "merge-transcripts"): for gffs in GTF.flat_gene_iterator( GTF.iterator(options.stdin), strict=options.strict): ninput += 1 cds_ranges = GTF.asRanges(gffs, "CDS") exon_ranges = GTF.asRanges(gffs, "exon") # sanity checks strands = set([x.strand for x in gffs]) contigs = set([x.contig for x in gffs]) if len(strands) > 1: raise ValueError( "can not merge gene '%s' on multiple strands: %s" % ( gffs[0].gene_id, str(strands))) if len(contigs) > 1: raise ValueError( "can not merge gene '%s' on multiple contigs: %s" % ( gffs[0].gene_id, str(contigs))) strand = Genomics.convertStrand(gffs[0].strand) utr_ranges = [] if cds_ranges and options.mark_utr: cds_start, cds_end = cds_ranges[0][0], cds_ranges[-1][1] midpoint = (cds_end - cds_start) / 2 + cds_start utr_ranges = [] for start, end in Intervals.truncate(exon_ranges, cds_ranges): if end - start > 3: if strand == ".": feature = "UTR" elif strand == "+": if start < midpoint: feature = "UTR5" else: feature = "UTR3" elif strand == "-": if start < midpoint: feature = "UTR3" else: feature = "UTR5" utr_ranges.append((feature, start, end)) try: biotypes = [x["gene_biotype"] for x in gffs] biotype = ":".join(set(biotypes)) except (KeyError, AttributeError): biotype = None def output_ranges(ranges, gffs, biotype=None, use_geneid=False): result = [] for feature, start, end in ranges: entry = GTF.Entry() entry.copy(gffs[0]) entry.clearAttributes() entry.feature = feature if use_geneid: entry.transcript_id = entry.gene_id else: entry.transcript_id = "merged" if biotype: entry.addAttribute("gene_biotype", biotype) entry.start = start entry.end = end result.append(entry) return result result = [] if options.method == "merge-exons": if options.with_utr: if options.mark_utr: result.extend(output_ranges(utr_ranges, gffs, biotype, options.use_geneid)) r = [("CDS", x, y) for x, y in Intervals.combineAtDistance( cds_ranges, options.merge_exons_distance)] else: r = [("exon", x, y) for x, y in Intervals.combineAtDistance( exon_ranges, options.merge_exons_distance)] else: r = [("CDS", x, y) for x, y in Intervals.combineAtDistance( cds_ranges, options.merge_exons_distance)] elif options.method == "merge-transcripts": if options.with_utr: r = [("exon", exon_ranges[0][0], exon_ranges[-1][1])] elif cds_ranges: r = [("exon", cds_ranges[0][0], cds_ranges[-1][1])] else: ndiscarded += 1 continue elif options.method == "merge-introns": if len(exon_ranges) >= 2: r = [("exon", exon_ranges[0][1], exon_ranges[-1][0])] else: ndiscarded += 1 continue result.extend(output_ranges(r, gffs, biotype, options.use_geneid)) result.sort(key=lambda x: x.start) for x in result: options.stdout.write("%s\n" % str(x)) nfeatures += 1 noutput += 1 elif options.method == "find-retained-introns": for gene in GTF.gene_iterator(GTF.iterator(options.stdin)): ninput += 1 found_any = False for intron in find_retained_introns(gene): found_any = True options.stdout.write("%s\n" % str(intron)) nfeatures += 1 if found_any: noutput += 1 elif options.method == "genes-to-unique-chunks": for gene in GTF.flat_gene_iterator(GTF.iterator(options.stdin)): ninput += 1 for exon in gene_to_blocks(gene): options.stdout.write("%s\n" % str(exon)) nfeatures += 1 noutput += 1 else: raise ValueError("unknown method '%s'" % options.method) E.info("ninput=%i, noutput=%i, nfeatures=%i, ndiscarded=%i" % (ninput, noutput, nfeatures, ndiscarded)) E.Stop()
AttributeError
dataset/ETHPy150Open CGATOxford/cgat/scripts/gtf2gtf.py/main
def testInterface(algo): """ Tests whether the algorithm is properly implementing the correct Blackbox-optimization interface.""" # without any arguments, initialization has to work emptyalgo = algo() try: # but not learning emptyalgo.learn(0) return "Failed to throw missing evaluator error?" except __HOLE__: pass emptyalgo.setEvaluator(sf, xa1) # not it can run emptyalgo.learn(0) # simple functions don't check for dimension mismatch algo(sf, xa1) algo(sf, xa100) # for these, either an initial point or a dimension parameter is required algo(sf, numParameters=2) try: algo(sf) return "Failed to throw unknown dimension error" except ValueError: pass # FitnessEvaluators do not require that algo(ife1) # parameter containers can be used too algo(ife2, pc2) return True
AssertionError
dataset/ETHPy150Open pybrain/pybrain/pybrain/tests/optimizationtest.py/testInterface
def testContinuousInterface(algo): """ Test the specifics for the interface for ContinuousOptimizers """ if not issubclass(algo, bbo.ContinuousOptimizer): return True # list starting points are internally converted to arrays x = algo(sf, xlist2) assert isinstance(x.bestEvaluable, ndarray), 'not converted to array' # check for dimension mismatch try: algo(ife1, xa2) return "Failed to throw dimension mismatch error" except __HOLE__: pass return True
ValueError
dataset/ETHPy150Open pybrain/pybrain/pybrain/tests/optimizationtest.py/testContinuousInterface
def testOnEvolvable(algo): if issubclass(algo, bbo.ContinuousOptimizer): return True if issubclass(algo, bbo.TopologyOptimizer): try: algo(evoEval, evo1).learn(1) return "Topology optimizers should not accept arbitrary Evolvables" except __HOLE__: return True else: algo(evoEval, evo1).learn(1) return True # the main test procedure # ------------------------
AttributeError
dataset/ETHPy150Open pybrain/pybrain/pybrain/tests/optimizationtest.py/testOnEvolvable
def find_library_file(self, dirs, lib, debug=0): shortlib = '%s.lib' % lib longlib = 'lib%s.lib' % lib # this form very rare # get EMX's default library directory search path try: emx_dirs = os.environ['LIBRARY_PATH'].split(';') except __HOLE__: emx_dirs = [] for dir in dirs + emx_dirs: shortlibp = os.path.join(dir, shortlib) longlibp = os.path.join(dir, longlib) if os.path.exists(shortlibp): return shortlibp elif os.path.exists(longlibp): return longlibp # Oops, didn't find it in *any* of 'dirs' return None # class EMXCCompiler # Because these compilers aren't configured in Python's pyconfig.h file by # default, we should at least warn the user if he is using a unmodified # version.
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/distutils/emxccompiler.py/EMXCCompiler.find_library_file
def check_config_h(): """Check if the current Python installation (specifically, pyconfig.h) appears amenable to building extensions with GCC. Returns a tuple (status, details), where 'status' is one of the following constants: CONFIG_H_OK all is well, go ahead and compile CONFIG_H_NOTOK doesn't look good CONFIG_H_UNCERTAIN not sure -- unable to read pyconfig.h 'details' is a human-readable string explaining the situation. Note there are two ways to conclude "OK": either 'sys.version' contains the string "GCC" (implying that this Python was built with GCC), or the installed "pyconfig.h" contains the string "__GNUC__". """ # XXX since this function also checks sys.version, it's not strictly a # "pyconfig.h" check -- should probably be renamed... from distutils import sysconfig import string # if sys.version contains GCC then python was compiled with # GCC, and the pyconfig.h file should be OK if string.find(sys.version,"GCC") >= 0: return (CONFIG_H_OK, "sys.version mentions 'GCC'") fn = sysconfig.get_config_h_filename() try: # It would probably better to read single lines to search. # But we do this only once, and it is fast enough f = open(fn) s = f.read() f.close() except __HOLE__, exc: # if we can't read this file, we cannot say it is wrong # the compiler will complain later about this file as missing return (CONFIG_H_UNCERTAIN, "couldn't read '%s': %s" % (fn, exc.strerror)) else: # "pyconfig.h" contains an "#ifdef __GNUC__" or something similar if string.find(s,"__GNUC__") >= 0: return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn) else: return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
IOError
dataset/ETHPy150Open babble/babble/include/jython/Lib/distutils/emxccompiler.py/check_config_h
def dumps(self, msg, use_bin_type=False): ''' Run the correct dumps serialization format :param use_bin_type: Useful for Python 3 support. Tells msgpack to differentiate between 'str' and 'bytes' types by encoding them differently. Since this changes the wire protocol, this option should not be used outside of IPC. ''' try: if msgpack.version >= (0, 4, 0): # msgpack only supports 'use_bin_type' starting in 0.4.0. # Due to this, if we don't need it, don't pass it at all so # that under Python 2 we can still work with older versions # of msgpack. return msgpack.dumps(msg, use_bin_type=use_bin_type) else: return msgpack.dumps(msg) except (OverflowError, msgpack.exceptions.PackValueError): # msgpack can't handle the very long Python longs for jids # Convert any very long longs to strings # We borrow the technique used by TypeError below def verylong_encoder(obj): if isinstance(obj, dict): for key, value in six.iteritems(obj.copy()): obj[key] = verylong_encoder(value) return dict(obj) elif isinstance(obj, (list, tuple)): obj = list(obj) for idx, entry in enumerate(obj): obj[idx] = verylong_encoder(entry) return obj if six.PY2 and isinstance(obj, long) and long > pow(2, 64): return str(obj) elif six.PY3 and isinstance(obj, int) and int > pow(2, 64): return str(obj) else: return obj if msgpack.version >= (0, 4, 0): return msgpack.dumps(verylong_encoder(msg), use_bin_type=use_bin_type) else: return msgpack.dumps(verylong_encoder(msg)) except TypeError as e: # msgpack doesn't support datetime.datetime datatype # So here we have converted datetime.datetime to custom datatype # This is msgpack Extended types numbered 78 def default(obj): return msgpack.ExtType(78, obj) def dt_encode(obj): datetime_str = obj.strftime("%Y%m%dT%H:%M:%S.%f") if msgpack.version >= (0, 4, 0): return msgpack.packb(datetime_str, default=default, use_bin_type=use_bin_type) else: return msgpack.packb(datetime_str, default=default) def datetime_encoder(obj): if isinstance(obj, dict): for key, value in six.iteritems(obj.copy()): obj[key] = datetime_encoder(value) return dict(obj) elif isinstance(obj, (list, tuple)): obj = list(obj) for idx, entry in enumerate(obj): obj[idx] = datetime_encoder(entry) return obj if isinstance(obj, datetime.datetime): return dt_encode(obj) else: return obj if "datetime.datetime" in str(e): if msgpack.version >= (0, 4, 0): return msgpack.dumps(datetime_encoder(msg), use_bin_type=use_bin_type) else: return msgpack.dumps(datetime_encoder(msg)) if msgpack.version >= (0, 2, 0): # Should support OrderedDict serialization, so, let's # raise the exception raise # msgpack is < 0.2.0, let's make its life easier # Since OrderedDict is identified as a dictionary, we can't # make use of msgpack custom types, we will need to convert by # hand. # This means iterating through all elements of a dictionary or # list/tuple def odict_encoder(obj): if isinstance(obj, dict): for key, value in six.iteritems(obj.copy()): obj[key] = odict_encoder(value) return dict(obj) elif isinstance(obj, (list, tuple)): obj = list(obj) for idx, entry in enumerate(obj): obj[idx] = odict_encoder(entry) return obj return obj if msgpack.version >= (0, 4, 0): return msgpack.dumps(odict_encoder(msg), use_bin_type=use_bin_type) else: return msgpack.dumps(odict_encoder(msg)) except (SystemError, __HOLE__) as exc: # pylint: disable=W0705 log.critical('Unable to serialize message! Consider upgrading msgpack. ' 'Message which failed was {failed_message} ' 'with exception {exception_message}').format(msg, exc)
TypeError
dataset/ETHPy150Open saltstack/salt/salt/payload.py/Serial.dumps
def run(self, node, ip_address, platform, poller, mon_protocol, std_community, community, status): """ Create an node in an Orion monitoring platform. """ results = {} # Sort out which platform & poller to create the node on. if platform is None: try: platform = self.config['defaults']['platform'] except __HOLE__: self.send_user_error("No default Orion platform.") raise ValueError("No default Orion platform.") self.logger.info("Connecting to Orion platform: {}".format(platform)) self.connect(platform) results['platform'] = platform if self.node_exists(node, ip_address): self.logger.error( "Node ({}) or IP ({}) already in Orion platform: {}".format( node, ip_address, platform) ) self.send_user_error("Node and/or IP is already in Orion!") raise Exception("Node and/or IP already exists!") else: self.logger.info( "Checking node ({}) is not on Orion platform: {}".format( node, platform) ) # engineID if happens to be None, default to the primary. if poller is not None: engineID = self.get_engine_id(poller) else: engineID = 1 kargs = {'Caption': node, 'EngineID': engineID, 'IPAddress': ip_address } if mon_protocol == "snmpv2": kargs['ObjectSubType'] = "SNMP" kargs['SNMPVersion'] = 2 if community is not None: kargs['Community'] = community elif std_community is not None: kargs['Community'] = self.config['defaults']['snmp'][std_community] elif std_community is None: raise ValueError("Need one of community or std_community") self.logger.info("Creating Orion Node: {}".format(kargs)) orion_data = self.create('Orion.Nodes', **kargs) node_id = re.search('(\d+)$', orion_data).group(0) results['node_id'] = node_id self.logger.info("Created Orion Node: {}".format(results['node_id'])) pollers_to_add = { 'N.Details.SNMP.Generic': True, 'N.Uptime.SNMP.Generic': True, 'N.Cpu.SNMP.HrProcessorLoad': True, 'N.Memory.SNMP.NetSnmpReal': True, 'N.AssetInventory.Snmp.Generic': True, 'N.Topology_Layer3.SNMP.ipNetToMedia': True, 'N.Routing.SNMP.Ipv4CidrRoutingTable': False } if status == 'icmp': pollers_to_add['N.Status.ICMP.Native'] = True pollers_to_add['N.Status.SNMP.Native'] = False pollers_to_add['N.ResponseTime.ICMP.Native'] = True pollers_to_add['N.ResponseTime.SNMP.Native'] = False elif status == 'snmp': pollers_to_add['N.Status.ICMP.Native'] = False pollers_to_add['N.Status.SNMP.Native'] = True pollers_to_add['N.ResponseTime.ICMP.Native'] = False pollers_to_add['N.ResponseTime.SNMP.Native'] = True pollers = [] for p in pollers_to_add: pollers.append({ 'PollerType': p, 'NetObject': 'N:{}'.format(node_id), 'NetObjectType': 'N', 'NetObjectID': node_id, 'Enabled': pollers_to_add[p] }) for poller in pollers: response = self.create('Orion.Pollers', **poller) self.logger.info("Added {} ({}) poller: {}".format( poller['PollerType'], poller['Enabled'], response)) return results
IndexError
dataset/ETHPy150Open StackStorm/st2contrib/packs/orion/actions/node_create.py/NodeCreate.run
def admin(args): cfg = conf.ceph.load(args) conf_data = StringIO() cfg.write(conf_data) try: with file('%s.client.admin.keyring' % args.cluster, 'rb') as f: keyring = f.read() except: raise RuntimeError('%s.client.admin.keyring not found' % args.cluster) errors = 0 for hostname in args.client: LOG.debug('Pushing admin keys and conf to %s', hostname) try: distro = hosts.get(hostname, username=args.username) distro.conn.remote_module.write_conf( args.cluster, conf_data.getvalue(), args.overwrite_conf, ) distro.conn.remote_module.write_file( '/etc/ceph/%s.client.admin.keyring' % args.cluster, keyring, 0600, ) distro.conn.exit() except __HOLE__ as e: LOG.error(e) errors += 1 if errors: raise exc.GenericError('Failed to configure %d admin hosts' % errors)
RuntimeError
dataset/ETHPy150Open ceph/ceph-deploy/ceph_deploy/admin.py/admin
def viewhereaccessor(context): U = hereaccessor(context) i = 1 while True: try: params = context.locate(inevow.IViewParameters, depth=i) except __HOLE__: break for (cmd, args, kw) in iter(params): U = getattr(U, cmd)(*args, **kw) i += 1 return U
KeyError
dataset/ETHPy150Open twisted/nevow/nevow/url.py/viewhereaccessor
def url(self, obj): try: return self._objmap.get(obj, None) except __HOLE__: return None
TypeError
dataset/ETHPy150Open twisted/nevow/nevow/url.py/URLGenerator.url
def do_GET(self): datadir = os.path.join(os.path.split(__file__)[0], "sample_provider_pages") # remove the first character because includes / submitted_url = self.path[1:len(self.path)+1] # separate everything after & because is id try: (url_part, arg_part) = submitted_url.split("?") except __HOLE__: url_part = submitted_url # don't try to serve up the favicon, just exit if url_part == "favicon.ico": return sample_provider_page_path = os.path.join(datadir, url_part) print sample_provider_page_path try: text = open(sample_provider_page_path).read() print text print "Found:", submitted_url self.send_response(200) self.end_headers() self.wfile.write(text) except IOError: print "Not Found:", submitted_url self.send_response(500, "Test Proxy: Unknown URL") print "done with do_GET"
ValueError
dataset/ETHPy150Open Impactstory/total-impact-core/extras/functional_tests/alt_providers_test_proxy.py/ProvidersTestProxy.do_GET
def login(request): if 'openid' in request.GET or request.method == 'POST': form = LoginForm( dict(list(request.GET.items()) + list(request.POST.items())) ) if form.is_valid(): client = _openid_consumer(request) try: auth_request = client.begin(form.cleaned_data['openid']) if QUERY_EMAIL: sreg = SRegRequest() for name in SRegFields: sreg.requestField(field_name=name, required=True) auth_request.addExtension(sreg) ax = FetchRequest() for name in AXAttributes: ax.add(AttrInfo(name, required=True)) auth_request.addExtension(ax) callback_url = reverse(callback) SocialLogin.stash_state(request) redirect_url = auth_request.redirectURL( request.build_absolute_uri('/'), request.build_absolute_uri(callback_url)) return HttpResponseRedirect(redirect_url) # UnicodeDecodeError: # see https://github.com/necaris/python3-openid/issues/1 except (__HOLE__, DiscoveryFailure) as e: if request.method == 'POST': form._errors["openid"] = form.error_class([e]) else: return render_authentication_error( request, OpenIDProvider.id, exception=e) else: form = LoginForm(initial={'next': request.GET.get('next'), 'process': request.GET.get('process')}) d = dict(form=form) return render(request, "openid/login.html", d)
UnicodeDecodeError
dataset/ETHPy150Open pennersr/django-allauth/allauth/socialaccount/providers/openid/views.py/login
def __getattr__(self, key): if key in self._registered_documents: document = self._registered_documents[key] try: return getattr(self[document.__database__][document.__collection__], key) except __HOLE__: raise AttributeError("%s: __collection__ attribute not found. " "You cannot specify the `__database__` attribute without " "the `__collection__` attribute" % key) else: if key not in self._databases: self._databases[key] = Database(self, key) return self._databases[key]
AttributeError
dataset/ETHPy150Open namlook/mongokit/mongokit/connection.py/MongoKitConnection.__getattr__
def authorize_client(client, auth_type=None, service=None, source=None, scopes=None, oauth_type=None, consumer_key=None, consumer_secret=None): """Uses command line arguments, or prompts user for token values.""" if auth_type is None: auth_type = int(get_param( 'auth_type', 'Please choose the authorization mechanism you want' ' to use.\n' '1. to use your email address and password (ClientLogin)\n' '2. to use a web browser to visit an auth web page (AuthSub)\n' '3. if you have registed to use OAuth\n')) # Get the scopes for the services we want to access. if auth_type == AUTHSUB or auth_type == OAUTH: if scopes is None: scopes = get_param( 'scopes', 'Enter the URL prefixes (scopes) for the resources you ' 'would like to access.\nFor multiple scope URLs, place a comma ' 'between each URL.\n' 'Example: http://www.google.com/calendar/feeds/,' 'http://www.google.com/m8/feeds/\n').split(',') elif isinstance(scopes, (str, unicode)): scopes = scopes.split(',') if auth_type == CLIENT_LOGIN: email = get_param('email', 'Please enter your username') password = get_param('password', 'Password', True) if service is None: service = get_param( 'service', 'What is the name of the service you wish to access?' '\n(See list:' ' http://code.google.com/apis/gdata/faq.html#clientlogin)') if source is None: source = get_param('source', ask=False) client.client_login(email, password, source=source, service=service) elif auth_type == AUTHSUB: auth_sub_token = get_param('auth_sub_token', ask=False) session_token = get_param('session_token', ask=False) private_key = None auth_url = None single_use_token = None rsa_private_key = get_param( 'rsa_private_key', 'If you want to use secure mode AuthSub, please provide the\n' ' location of your RSA private key which corresponds to the\n' ' certificate you have uploaded for your domain. If you do not\n' ' have an RSA key, simply press enter') if rsa_private_key: try: private_key_file = open(rsa_private_key, 'rb') private_key = private_key_file.read() private_key_file.close() except IOError: print 'Unable to read private key from file' if private_key is not None: if client.auth_token is None: if session_token: client.auth_token = gdata.gauth.SecureAuthSubToken( session_token, private_key, scopes) return elif auth_sub_token: client.auth_token = gdata.gauth.SecureAuthSubToken( auth_sub_token, private_key, scopes) client.upgrade_token() return auth_url = gdata.gauth.generate_auth_sub_url( 'http://gauthmachine.appspot.com/authsub', scopes, True) print 'with a private key, get ready for this URL', auth_url else: if client.auth_token is None: if session_token: client.auth_token = gdata.gauth.AuthSubToken(session_token, scopes) return elif auth_sub_token: client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, scopes) client.upgrade_token() return auth_url = gdata.gauth.generate_auth_sub_url( 'http://gauthmachine.appspot.com/authsub', scopes) print 'Visit the following URL in your browser to authorize this app:' print str(auth_url) print 'After agreeing to authorize the app, copy the token value from the' print ' URL. Example: "www.google.com/?token=ab12" token value is ab12' token_value = raw_input('Please enter the token value: ') if private_key is not None: single_use_token = gdata.gauth.SecureAuthSubToken( token_value, private_key, scopes) else: single_use_token = gdata.gauth.AuthSubToken(token_value, scopes) client.auth_token = single_use_token client.upgrade_token() elif auth_type == OAUTH: if oauth_type is None: oauth_type = int(get_param( 'oauth_type', 'Please choose the authorization mechanism you want' ' to use.\n' '1. use an HMAC signature using your consumer key and secret\n' '2. use RSA with your private key to sign requests\n')) consumer_key = get_param( 'consumer_key', 'Please enter your OAuth conumer key ' 'which identifies your app') if oauth_type == HMAC: consumer_secret = get_param( 'consumer_secret', 'Please enter your OAuth conumer secret ' 'which you share with the OAuth provider', True) # Swap out this code once the client supports requesting an oauth token. # Get a request token. request_token = client.get_oauth_token( scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key, consumer_secret=consumer_secret) elif oauth_type == RSA: rsa_private_key = get_param( 'rsa_private_key', 'Please provide the location of your RSA private key which\n' ' corresponds to the certificate you have uploaded for your domain.') try: private_key_file = open(rsa_private_key, 'rb') private_key = private_key_file.read() private_key_file.close() except __HOLE__: print 'Unable to read private key from file' request_token = client.get_oauth_token( scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key, rsa_private_key=private_key) else: print 'Invalid OAuth signature type' return None # Authorize the request token in the browser. print 'Visit the following URL in your browser to authorize this app:' print str(request_token.generate_authorization_url()) print 'After agreeing to authorize the app, copy URL from the browser\'s' print ' address bar.' url = raw_input('Please enter the url: ') gdata.gauth.authorize_request_token(request_token, url) # Exchange for an access token. client.auth_token = client.get_access_token(request_token) else: print 'Invalid authorization type.' return None
IOError
dataset/ETHPy150Open kuri65536/python-for-android/python-build/python-libs/gdata/build/lib/gdata/sample_util.py/authorize_client
def launch_gevent_wsgi_server(application, port, max_concurrent_requests, server_name='server', use_pywsgi=False, **kwargs): """Set up and launch a Gevent WSGI server in the local process. The server will run forever and shut down cleanly when receiving a SIGTERM. NOTE: Gevent monkey patching should occur prior to calling this method. Args: application - A callable that accepts two arguments, per the PEP-333 WSGI spec. port - Port that the server should run on (integer). max_concurrent_requests - The maximum number of concurrent requests to serve (integer). server_name - Optional server name to print to logs. use_pywsgi - If True, launch a gevent.pywsgi server; if False, launch a gevent.wsgi server. **kwargs - Additional keyword args are passed to the WSGIServer ctor. """ import signal import gevent from gevent import pool if use_pywsgi: from gevent import pywsgi server_class = pywsgi.WSGIServer else: from gevent import wsgi server_class = wsgi.WSGIServer wsgi_server = None def _shut_down_wsgi_server(): """Gracefully terminate the WSGI server when receiving a SIGTERM. """ print 'Stopping %s %s' % (server_class.__module__, server_name) if wsgi_server: wsgi_server.stop() gevent.signal(signal.SIGTERM, _shut_down_wsgi_server) print 'Starting %s %s' % (server_class.__module__, server_name) try: greenlet_pool = pool.Pool(max_concurrent_requests) wsgi_server = server_class( ('', port), application, spawn=greenlet_pool, log=None, **kwargs) wsgi_server.serve_forever() except __HOLE__: _shut_down_wsgi_server()
KeyboardInterrupt
dataset/ETHPy150Open tellapart/gevent_request_profiler/tellapart/frontend/util.py/launch_gevent_wsgi_server
def parse_amount(amount): to_find = [] try: if ',' in amount: # if a comma seperation (e.g. 10,11,12) is specified temp = amount.split(',') for i in temp: to_find.append(int(i)) return to_find elif '-' in amount: # if a range (e.g. 10-20) is specified temp = amount.split('-') for i in range(int(temp[0]),int(temp[1]) + 1): to_find.append(i) return to_find else: # if a single number (e.g. 123) is specified to_find.append(int(amount)) return to_find except __HOLE__: print(" ERROR: incorrect value given for update range.") exit()
ValueError
dataset/ETHPy150Open AdamGreenhill/VirusShare-Search/VirusShare-Search.py/parse_amount
def update(directory, amount, latest): try: l = int(latest) except __HOLE__: print(" ERROR: incorrect value given for latest hash release.") exit() if amount == "all": # Downloads all md5 files for i in range(0,l): downloader(directory,i) elif amount == "missing": # Finds all md5 files not in a directory to_find = find_missing(directory, l) for i in to_find: downloader(directory,i) else: # Parses amount... to_find = parse_amount(amount) for i in to_find: downloader(directory,i)
ValueError
dataset/ETHPy150Open AdamGreenhill/VirusShare-Search/VirusShare-Search.py/update
def find_yajl_cffi(ffi, required): ''' Finds and loads yajl shared object of the required major version (1, 2, ...) using cffi. ''' try: yajl = ffi.dlopen('yajl') except __HOLE__: raise YAJLImportError('Unable to load YAJL.') require_version(yajl.yajl_version(), required) return yajl
OSError
dataset/ETHPy150Open isagalaev/ijson/ijson/backends/__init__.py/find_yajl_cffi
def assertIsSuperAndSubsequence(self, super_seq, sub_seq, msg=None): super_seq = list(super_seq) sub_seq = list(sub_seq) current_tail = super_seq for sub_elem in sub_seq: try: super_index = current_tail.index(sub_elem) except __HOLE__: # element not found if msg is None: msg = ("%r is not subsequence of %r: " "element %r not found in tail %r" % (sub_seq, super_seq, sub_elem, current_tail)) self.fail(msg) else: current_tail = current_tail[super_index + 1:]
ValueError
dataset/ETHPy150Open openstack/taskflow/taskflow/test.py/TestCase.assertIsSuperAndSubsequence
def testReadandValidateSchemaFromFile(self): infile = os.path.join(self.dirname, 'test_schema_file') f = open(infile, 'wt') f.write(test_util.GetCarsSchemaString()) f.close() read_schema = load_lib.ReadSchemaFile(infile) load_lib._ValidateExtendedSchema(read_schema) expected_schema = json.loads(test_util.GetCarsSchemaString()) self.assertEquals(expected_schema, read_schema) # append some non-json text and check failure. f = open(infile, 'at') f.write('bogus') f.close() try: load_lib.ReadSchemaFile(infile) self.fail() except __HOLE__: pass # success
ValueError
dataset/ETHPy150Open google/encrypted-bigquery-client/src/load_lib_test.py/LoadLibraryTest.testReadandValidateSchemaFromFile
def testReadandValidateNestedSchemaFromFile(self): infile = os.path.join(self.dirname, 'test_nested_schema_file') f = open(infile, 'wt') f.write(test_util.GetPlacesSchemaString()) f.close() read_schema = load_lib.ReadSchemaFile(infile) load_lib._ValidateExtendedSchema(read_schema) expected_schema = json.loads(test_util.GetPlacesSchemaString()) self.assertEquals(expected_schema, read_schema) # append some non-json text and check failure. f = open(infile, 'at') f.write('bogus') f.close() try: load_lib.ReadSchemaFile(infile) self.fail() except __HOLE__: pass # success
ValueError
dataset/ETHPy150Open google/encrypted-bigquery-client/src/load_lib_test.py/LoadLibraryTest.testReadandValidateNestedSchemaFromFile
def testReadandValidateMultipleNestedSchemaFromFile(self): infile = os.path.join(self.dirname, 'test_multiple_nested_schema_file') f = open(infile, 'wt') f.write(test_util.GetJobsSchemaString()) f.close() read_schema = load_lib.ReadSchemaFile(infile) load_lib._ValidateExtendedSchema(read_schema) expected_schema = json.loads(test_util.GetJobsSchemaString()) self.assertEquals(expected_schema, read_schema) # append some non-json text and check failure. f = open(infile, 'at') f.write('bogus') f.close() try: load_lib.ReadSchemaFile(infile) self.fail() except __HOLE__: pass # success
ValueError
dataset/ETHPy150Open google/encrypted-bigquery-client/src/load_lib_test.py/LoadLibraryTest.testReadandValidateMultipleNestedSchemaFromFile
@unittest.skip("doesn't work in docker") def test_out_of_disk_space_message(self): # this test can only be run as root becasue it needs to create # a tmpfs temporary partition to invoke the correct exception def executeTest(ramdiskdir): openFiles = Storage.OpenFiles(10) os.system('mount -t tmpfs -o size=1M tmpfs %s' % ramdiskdir) try: paths = [os.path.join(ramdiskdir, 'test-%s' % ix) for ix in range(10)] toWrite = [chr((ix + c) % 255) for c in range(1024) for ix in range(32)] * 32 for string in toWrite: path = random.choice(paths) openFiles.append(path, string) except __HOLE__ as e: self.assertEqual(e.errno, errno.ENOSPC) openFiles.shutdown() raise if os.geteuid() == 0: ramdiskdir = tempfile.mkdtemp() self.assertRaises(OSError, lambda : executeTest(ramdiskdir)) self.assertTrue(os.system('umount %s' % ramdiskdir) == 0) else: logging.warn("this test must be run as a superuser!")
OSError
dataset/ETHPy150Open ufora/ufora/ufora/distributed/SharedState/Storage/FileIO_test.py/FileIOTest.test_out_of_disk_space_message
def _add_trace_comments(engine): """Add trace comments. Augment statements with a trace of the immediate calling code for a given statement. """ import os import sys import traceback target_paths = set([ os.path.dirname(sys.modules['oslo_db'].__file__), os.path.dirname(sys.modules['sqlalchemy'].__file__) ]) try: skip_paths = set([ os.path.dirname(sys.modules['oslo_db.tests'].__file__), ]) except __HOLE__: skip_paths = set() @sqlalchemy.event.listens_for(engine, "before_cursor_execute", retval=True) def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): # NOTE(zzzeek) - if different steps per DB dialect are desirable # here, switch out on engine.name for now. stack = traceback.extract_stack() our_line = None for idx, (filename, line, method, function) in enumerate(stack): for tgt in skip_paths: if filename.startswith(tgt): break else: for tgt in target_paths: if filename.startswith(tgt): our_line = idx break if our_line: break if our_line: trace = "; ".join( "File: %s (%s) %s" % ( line[0], line[1], line[2] ) # include three lines of context. for line in stack[our_line - 3:our_line] ) statement = "%s -- %s" % (statement, trace) return statement, parameters
KeyError
dataset/ETHPy150Open openstack/oslo.db/oslo_db/sqlalchemy/engines.py/_add_trace_comments
def __setitem__(self, key, value): try: del self[key] except __HOLE__: pass self._items.append((key, value))
KeyError
dataset/ETHPy150Open benoitc/restkit/restkit/datastructures.py/MultiDict.__setitem__
def fastq_stats(fastq, quiet=False): lengths = [] # how many reads are exactly this length? posquals = [] # accumulator of quality values for each position # (not all the values, but an accumulator for each value at each position) qualities = [] # quality accumulator for each position total = [] # how many reads are at least this length? # (used for dividing an accumulator later) total_reads = 0 line = 0 try: for read in fastq.fetch(quiet=quiet): # Note: all lengths are based on the FASTQ quality score, which # will be the correct length for base- and color-space files. The # sequence may have a prefix in color-space files line += 4 total_reads += 1 while len(total) <= len(read.qual): total.append(0) for x in xrange(len(read.qual)): total[x + 1] += 1 while len(lengths) <= len(read.qual): lengths.append(0) qualities.append(0) posquals.append([]) lengths[len(read.qual)] += 1 for idx, q in enumerate([ord(x) - 33 for x in read.qual]): if q < 0: q = 0 qualities[idx + 1] += q while len(posquals[idx + 1]) <= q: posquals[idx + 1].append(0) posquals[idx + 1][q] += 1 except __HOLE__: pass return FASTQStats._make([fastq, total_reads, total, lengths, qualities, posquals])
KeyboardInterrupt
dataset/ETHPy150Open ngsutils/ngsutils/ngsutils/fastq/stats.py/fastq_stats
def available_python_versions(self): """Get the executable names of available versions of Python on the system. """ for py in supported_pythons: try: check_call([py, '-c', 'import nose'], stdout=PIPE) yield py except (__HOLE__, CalledProcessError): self.unavailable_pythons.append(py)
OSError
dataset/ETHPy150Open networkx/networkx/tools/test_pr.py/TestRun.available_python_versions
def setup(self): """Prepare the repository and virtualenvs.""" try: os.mkdir(basedir) except __HOLE__ as e: if e.errno != errno.EEXIST: raise os.chdir(basedir) # Delete virtualenvs and recreate for venv in glob('venv-*'): shutil.rmtree(venv) for py in self.available_python_versions(): check_call(['virtualenv', '-p', py, '--system-site-packages', 'venv-%s' % py]) self.venvs.append((py, 'venv-%s' % py)) # Check out and update the repository if not os.path.exists('networkx'): try : check_call(['git', 'clone', nx_repository]) except CalledProcessError: check_call(['git', 'clone', nx_http_repository]) os.chdir(repodir) check_call(['git', 'checkout', 'master']) try : check_call(['git', 'pull', 'origin', 'master']) except CalledProcessError: check_call(['git', 'pull', nx_http_repository, 'master']) self.master_sha = check_output(['git', 'log', '-1', '--format=%h']).decode('ascii').strip() os.chdir(basedir)
OSError
dataset/ETHPy150Open networkx/networkx/tools/test_pr.py/TestRun.setup
def __init__(self, target, onDelete=None): """Return a weak-reference-like instance for a bound method target -- the instance-method target for the weak reference, must have im_self and im_func attributes and be reconstructable via: target.im_func.__get__( target.im_self ) which is true of built-in instance methods. onDelete -- optional callback which will be called when this weak reference ceases to be valid (i.e. either the object or the function is garbage collected). Should take a single argument, which will be passed a pointer to this object. """ def remove(weak, self=self): """Set self.isDead to true when method or instance is destroyed""" methods = self.deletionMethods[:] del self.deletionMethods[:] try: del self.__class__._allInstances[ self.key ] except __HOLE__: pass for function in methods: try: if callable( function ): function( self ) except Exception, e: try: traceback.print_exc() except AttributeError, err: print '''Exception during saferef %s cleanup function %s: %s'''%( self, function, e ) self.deletionMethods = [onDelete] self.key = self.calculateKey( target ) self.weakSelf = weakref.ref(target.im_self, remove) self.weakFunc = weakref.ref(target.im_func, remove) self.selfName = str(target.im_self) self.funcName = str(target.im_func.__name__)
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/dispatch/saferef.py/BoundMethodWeakref.__init__
def _description(self, test): if self._format: try: return self._format.format( method_name=str(test), short_description=test.shortDescription() or '') except __HOLE__: sys.exit(_( 'Bad format string: {format}\n' 'Replacement options are: {{short_description}} and ' '{{method_name}}').format(format=self._format)) return test.shortDescription() or str(test)
KeyError
dataset/ETHPy150Open mblayman/tappy/tap/plugins/_nose.py/TAP._description
def children(self): """ List the children of this path object. @raise OSError: If an error occurs while listing the directory. If the error is 'serious', meaning that the operation failed due to an access violation, exhaustion of some kind of resource (file descriptors or memory), OSError or a platform-specific variant will be raised. @raise UnlistableError: If the inability to list the directory is due to this path not existing or not being a directory, the more specific OSError subclass L{UnlistableError} is raised instead. @return: an iterable of all currently-existing children of this object accessible with L{_PathHelper.child}. """ try: subnames = self.listdir() except WindowsError, winErrObj: # WindowsError is an OSError subclass, so if not for this clause # the OSError clause below would be handling these. Windows error # codes aren't the same as POSIX error codes, so we need to handle # them differently. # Under Python 2.5 on Windows, WindowsError has a winerror # attribute and an errno attribute. The winerror attribute is # bound to the Windows error code while the errno attribute is # bound to a translation of that code to a perhaps equivalent POSIX # error number. # Under Python 2.4 on Windows, WindowsError only has an errno # attribute. It is bound to the Windows error code. # For simplicity of code and to keep the number of paths through # this suite minimal, we grab the Windows error code under either # version. # Furthermore, attempting to use os.listdir on a non-existent path # in Python 2.4 will result in a Windows error code of # ERROR_PATH_NOT_FOUND. However, in Python 2.5, # ERROR_FILE_NOT_FOUND results instead. -exarkun winerror = getattr(winErrObj, 'winerror', winErrObj.errno) if winerror not in (ERROR_PATH_NOT_FOUND, ERROR_FILE_NOT_FOUND, ERROR_INVALID_NAME, ERROR_DIRECTORY): raise raise _WindowsUnlistableError(winErrObj) except __HOLE__, ose: if ose.errno not in (errno.ENOENT, errno.ENOTDIR): # Other possible errors here, according to linux manpages: # EACCES, EMIFLE, ENFILE, ENOMEM. None of these seem like the # sort of thing which should be handled normally. -glyph raise raise UnlistableError(ose) return map(self.child, subnames)
OSError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/filepath.py/AbstractFilePath.children
def restat(self, reraise=True): """ Re-calculate cached effects of 'stat'. To refresh information on this path after you know the filesystem may have changed, call this method. @param reraise: a boolean. If true, re-raise exceptions from L{os.stat}; otherwise, mark this path as not existing, and remove any cached stat information. @raise Exception: is C{reraise} is C{True} and an exception occurs while reloading metadata. """ try: self.statinfo = stat(self.path) except __HOLE__: self.statinfo = 0 if reraise: raise
OSError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/filepath.py/FilePath.restat
def touch(self): """ Updates the access and last modification times of the file at this file path to the current time. Also creates the file if it does not already exist. @raise Exception: if unable to create or modify the last modification time of the file. """ try: self.open('a').close() except __HOLE__: pass utime(self.path, None)
IOError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/filepath.py/FilePath.touch
def moveTo(self, destination, followLinks=True): """ Move self to destination - basically renaming self to whatever destination is named. If destination is an already-existing directory, moves all children to destination if destination is empty. If destination is a non-empty directory, or destination is a file, an OSError will be raised. If moving between filesystems, self needs to be copied, and everything that applies to copyTo applies to moveTo. @param destination: the destination (a FilePath) to which self should be copied @param followLinks: whether symlinks in self should be treated as links or as their targets (only applicable when moving between filesystems) """ try: os.rename(self.path, destination.path) except __HOLE__, ose: if ose.errno == errno.EXDEV: # man 2 rename, ubuntu linux 5.10 "breezy": # oldpath and newpath are not on the same mounted filesystem. # (Linux permits a filesystem to be mounted at multiple # points, but rename(2) does not work across different mount # points, even if the same filesystem is mounted on both.) # that means it's time to copy trees of directories! secsib = destination.temporarySibling() self.copyTo(secsib, followLinks) # slow secsib.moveTo(destination, followLinks) # visible # done creating new stuff. let's clean me up. mysecsib = self.temporarySibling() self.moveTo(mysecsib, followLinks) # visible mysecsib.remove() # slow else: raise else: self.changed() destination.changed()
OSError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/filepath.py/FilePath.moveTo
@classmethod def isDate(cls, value, country='nl'): """ Check if value is a valid date. @@@ Still add checking on leap year >>> v = Validator() >>> v.isDate('2003/04/04') True >>> v.isDate('04/04/2004') True >>> v.isDate('14/04/2004') True >>> v.isDate('32-04-2004') False >>> v.isDate('14-04-2004') True >>> v.isDate(' 14.04.2004') True >>> v.isDate('4-14-2004', 'usa') True >>> v.isDate('14-04-2004', 'usa') False >>> v.isDate('2000-33-44') False >>> v.isDate('2000-0202') False >>> v.isDate('2000-02-==02') False >>> v.isDate('aaa') False >>> v.isDate('2003/3') False >>> v.isDate('200-233-444') False >>> v.isDate('10-2000-12') False >>> v.isDate('10') False >>> v.isDate(10) False >>> v.isDate('') False >>> v.isDate(None) False """ monthdays = [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] try: split = re.findall(cls.DATEPATTERN, value) except __HOLE__: return False if not split: #print '[%s] Date has wrong format' % value return False d1 = int(split[0][0]) d2 = int(split[0][1]) d3 = int(split[0][2]) if d1 >= 1000: # d1 is year if country == 'nl': # d2 is month m = d2 d = d3 else: m = d3 d = d2 elif d3 >= 1000: # d3 is year if country == 'nl': # d2 is month m = d2 d = d1 else: m = d1 d = d2 else: # We don't support years < 1000 #print '[%s] Year is out of range' % value return False if not (1 <= m <= 12): #print '[%s] Month is out of range' % value return False if not (1 <= d <= monthdays[m]): #print '[%s] Day is out of range for this month' % value return False return True
TypeError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/parsers/validator.py/Validator.isDate
@classmethod def isEmail(cls, s): """ Test if s is a valid email adres. Testing is done on: - pattern - Only one @ - No characters > 127 in user - No characters > 127 in domain - Valid user name pattern - Valid ip number pattern - Valid domain name pattern >>> v = Validator() >>> v.isEmail('"aaa"@bbb123.com') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('[email protected]') True >>> v.isEmail('<[email protected]>') False >>> v.isEmail('x###[email protected]') False >>> v.isEmail('[email protected]') False >>> v.isEmail('[email protected]') False >>> v.isEmail('[email protected]') False >>> v.isEmail('[email protected]') False >>> v.isEmail('xxx-sss@100,aaa,100,100') False >>> v.isEmail('') False >>> v.isEmail(None) False """ if not s: # Some extra safety return False try: split = re.findall(cls.EMAILPATTERN, s)[0] except __HOLE__: #print '[%s] Wrong email pattern' % s # Totally wrong pattern return False if len(split) != 2: #print '[%s] Wrong @ pattern' % s return False user, domain = split for c in user: # Test user name characters on > 127 if ord(c) > 127: #print '[%s] User character > 127' % user return False for c in domain: if ord(c) > 127: # Test domain name characters on > 127 #print '[%s] Domain character > 127' % user return False u1 = re.findall(cls.QUOTEDUSERPATTERN, user) u2 = re.findall(cls.USERPATTERN, user) if not (u1 or u2): #print '[%s] Wrong user pattern' % user return False ip = re.findall(cls.IPPATTERN, domain) # Test on ip number and domain name if ip: # Test if values in ip address are valid for t in ip[0]: v = int(t) if not (0 <= v <= 255): #print '[%s] Not within 0-255 range' % domain # Values not within 0-255 range return False d = re.findall(cls.DOMAINPATTERN, domain) if not (ip or d): #print '[%s] Wrong domain pattern' % user return False return True
IndexError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/parsers/validator.py/Validator.isEmail
def build_extension(self, ext): """Wrap `build_extension` with `BuildFailed`.""" try: # Uncomment to test compile failures: # raise errors.CCompilerError("OOPS") build_ext.build_extension(self, ext) except ext_errors: raise BuildFailed() except __HOLE__: # this can happen on Windows 64 bit, see Python issue 7511 if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3 raise BuildFailed() raise # There are a few reasons we might not be able to compile the C extension. # Figure out if we should attempt the C extension or not.
ValueError
dataset/ETHPy150Open psd-tools/psd-tools/setup.py/ve_build_ext.build_extension
def it_can_validate_a_hex_RGB_string(self, valid_fixture): str_value, exception = valid_fixture if exception is None: try: ST_HexColorRGB.validate(str_value) except __HOLE__: raise AssertionError( "string '%s' did not validate" % str_value ) else: with pytest.raises(exception): ST_HexColorRGB.validate(str_value)
ValueError
dataset/ETHPy150Open scanny/python-pptx/tests/oxml/test_simpletypes.py/DescribeST_HexColorRGB.it_can_validate_a_hex_RGB_string
def emit(self, record): """ Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() logging.FileHandler.emit(self, record) except (__HOLE__, SystemExit): raise except: self.handleError(record)
KeyboardInterrupt
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/logging/handlers.py/BaseRotatingHandler.emit
def emit(self, record): """ Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket. """ try: s = self.makePickle(record) self.send(s) except (KeyboardInterrupt, __HOLE__): raise except: self.handleError(record)
SystemExit
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/logging/handlers.py/SocketHandler.emit
def emit(self, record): """ Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server. """ msg = self.format(record) + '\000' """ We need to convert record level to lowercase, maybe this will change in the future. """ prio = '<%d>' % self.encodePriority(self.facility, self.mapPriority(record.levelname)) # Message is a string. Convert to bytes as required by RFC 5424 if type(msg) is unicode: msg = msg.encode('utf-8') if codecs: msg = codecs.BOM_UTF8 + msg msg = prio + msg try: if self.unixsocket: try: self.socket.send(msg) except socket.error: self._connect_unixsocket(self.address) self.socket.send(msg) elif self.socktype == socket.SOCK_DGRAM: self.socket.sendto(msg, self.address) else: self.socket.sendall(msg) except (KeyboardInterrupt, __HOLE__): raise except: self.handleError(record)
SystemExit
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/logging/handlers.py/SysLogHandler.emit
def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.quit() except (KeyboardInterrupt, __HOLE__): raise except: self.handleError(record)
SystemExit
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/logging/handlers.py/SMTPHandler.emit
def __init__(self, appname, dllname=None, logtype="Application"): logging.Handler.__init__(self) try: import win32evtlogutil, win32evtlog self.appname = appname self._welu = win32evtlogutil if not dllname: dllname = os.path.split(self._welu.__file__) dllname = os.path.split(dllname[0]) dllname = os.path.join(dllname[0], r'win32service.pyd') self.dllname = dllname self.logtype = logtype self._welu.AddSourceToRegistry(appname, dllname, logtype) self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE self.typemap = { logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, } except __HOLE__: print("The Python Win32 extensions for NT (service, event "\ "logging) appear not to be available.") self._welu = None
ImportError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/logging/handlers.py/NTEventLogHandler.__init__
def emit(self, record): """ Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log. """ if self._welu: try: id = self.getMessageID(record) cat = self.getEventCategory(record) type = self.getEventType(record) msg = self.format(record) self._welu.ReportEvent(self.appname, id, cat, type, [msg]) except (__HOLE__, SystemExit): raise except: self.handleError(record)
KeyboardInterrupt
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/logging/handlers.py/NTEventLogHandler.emit
def emit(self, record): """ Emit a record. Send the record to the Web server as a percent-encoded dictionary """ try: import httplib, urllib host = self.host h = httplib.HTTP(host) url = self.url data = urllib.urlencode(self.mapLogRecord(record)) if self.method == "GET": if (url.find('?') >= 0): sep = '&' else: sep = '?' url = url + "%c%s" % (sep, data) h.putrequest(self.method, url) # support multiple hosts on one IP address... # need to strip optional :port from host, if present i = host.find(":") if i >= 0: host = host[:i] h.putheader("Host", host) if self.method == "POST": h.putheader("Content-type", "application/x-www-form-urlencoded") h.putheader("Content-length", str(len(data))) h.endheaders(data if self.method == "POST" else None) h.getreply() #can't do anything with the result except (KeyboardInterrupt, __HOLE__): raise except: self.handleError(record)
SystemExit
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/logging/handlers.py/HTTPHandler.emit
def make_string_packer(padding=" ", nullterminated=False): def pack_string(space, packer, width): try: w_s = packer.args_w[packer.args_index] except __HOLE__: raise space.error(space.w_ArgumentError, "too few arguments") string = space.str_w(space.convert_type(w_s, space.w_string, "to_str")) if nullterminated: packer.result += string packer.result.append("\0") else: assert width >= 0 string = string[:width] packer.result += string packer.result.extend([padding] * (width - len(string))) packer.args_index += 1 return pack_string
IndexError
dataset/ETHPy150Open topazproject/topaz/topaz/utils/packing/stringpacking.py/make_string_packer
def annotate_stacktrace(self, stacktrace): out = '' for ln in stacktrace.splitlines(): out += ln + '\n' match = re.search(r'/static/min/(.+)(\.[0-9a-f]+)\.js:(\d+):(\d+)', ln) if match: # Get the appropriate source map for the minified file. minified_src = match.groups()[0] + '.js' index = self._index_for(minified_src) gen_line, gen_col = list(map(int, match.groups()[2:4])) # The sourcemap lib is 0-based, so subtract 1 from line and col. try: result = index.lookup(line=gen_line-1, column=gen_col-1) out += (' = %s line %d column %d\n' % (result.src, result.src_line+1, result.src_col+1)) except __HOLE__: out += ' [Unable to look up in source map]\n' if ln.startswith(' at'): out += '\n' return out
IndexError
dataset/ETHPy150Open zulip/zulip/zerver/lib/unminify.py/SourceMap.annotate_stacktrace
def is_supported(): """ Return True if Lua scripting is supported """ global _supported if _supported is not None: return _supported try: import lupa except __HOLE__: log.msg("WARNING: Lua scripting is not available because 'lupa' Python package is not installed") _supported = False return False try: lua = lupa.LuaRuntime() except lupa.LuaError as e: log.msg("WARNING: Lua scripting is not available: %r" % e) _supported = False return False _supported = True return True
ImportError
dataset/ETHPy150Open scrapinghub/splash/splash/lua.py/is_supported
def decode(bytes_sequence, encodings): """Return the first successfully decoded string""" for encoding in encodings: try: decoded = bytes_sequence.decode(encoding) return decoded except __HOLE__: # Try the next in the list. pass raise DecodeError
UnicodeDecodeError
dataset/ETHPy150Open pjdietz/rester-sublime-http-client/rester/http.py/decode
def run(self): """Method to run when the thread is started.""" if not self._validate_request(): return # Determine the class to use for the connection. if self.request.protocol == "https": try: connection_class = HTTPSConnection except __HOLE__: message = "Your Python interpreter does not have SSL. " \ "If you have cURL installed, set the http_client " \ "setting to \"curl\"." sublime.error_message(message) self.message = "Unable to make HTTPS requests." self.success = False return else: connection_class = HTTPConnection # Create the connection. conn = connection_class(self.request.host, port=self.request.port, timeout=self._timeout) try: # Body: encode and add Content-length header body_bytes = None if self.request.body: body_bytes = self.request.body.encode(self._encoding) if not self.request.get_header("Content-length"): self.request.headers.append(("Content-length", len(body_bytes))) # Insert a host header, if needed. if not self.request.get_header("host"): self.request.headers.append(("Host", self.request.host)) # Method and Path conn.putrequest(self.request.method, self.request.full_path, True, True) # Headers for key, value in self.request.headers: conn.putheader(key, value) conn.endheaders() # Body if body_bytes: conn.send(body_bytes) except socket.gaierror: self.message = "Unable to make request. " \ "Make sure the hostname is valid." self.success = False conn.close() return except ConnectionRefusedError: self.message = "Connection refused." self.success = False conn.close() return # Read the response. #noinspection PyBroadException try: time_start = time.time() resp = conn.getresponse() except socket.timeout: self.message = "Request timed out." self.success = False conn.close() return except: self.message = "Unexpected error making request." self.success = False conn.close() return # Read the response self._read_response(resp) time_end = time.time() self.elapsed = time_end - time_start conn.close() self.success = True
NameError
dataset/ETHPy150Open pjdietz/rester-sublime-http-client/rester/http.py/HttpClientRequestThread.run
def _read_response(self, curl_output): # Build a new response. self.response = Response() # Read the metadata appended to the end of the request. meta = curl_output[curl_output.rfind(b"\n\n"):] meta = meta.decode("ascii") meta = json.loads(meta) size_header = meta["size_header"] size_download = meta["size_download"] # Extract the headers and body headers = curl_output[0:size_header] body = curl_output[size_header:size_header + size_download] # Parse the headers as ASCII headers = headers.decode("ascii") headers = headers.split("\r\n") # Consume blank lines and CONTINUE status lines from headers for i in range(len(headers)): header = headers[i].upper() if header and "100 CONTINUE" not in header: headers = headers[i:] break # Read the first line as the status line. status_line = headers[0] try: (protocol, status, reason) = status_line.split(" ", 2) except __HOLE__: print(curl_output) self.message = "Unable to read response. " \ "Response may have times out." self.success = False return self.response.protocol = protocol self.response.status = int(status) self.response.reason = reason # Add each header for header in headers[1:]: if ":" in header: (key, value) = header.split(":", 1) self.response.headers.append((key.strip(), value.strip())) # Read the body self.response.body = self._read_body(body) self.success = True
ValueError
dataset/ETHPy150Open pjdietz/rester-sublime-http-client/rester/http.py/CurlRequestThread._read_response
def test_listrecursion(self): x = [] x.append(x) try: json.dumps(x) except ValueError: pass else: self.fail("didn't raise ValueError on list recursion") x = [] y = [x] x.append(y) try: json.dumps(x) except __HOLE__: pass else: self.fail("didn't raise ValueError on alternating list recursion") y = [] x = [y, y] # ensure that the marker is cleared json.dumps(x)
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/simplejson/tests/test_recursion.py/TestRecursion.test_listrecursion
def test_dictrecursion(self): x = {} x["test"] = x try: json.dumps(x) except __HOLE__: pass else: self.fail("didn't raise ValueError on dict recursion") x = {} y = {"a": x, "b": x} # ensure that the marker is cleared json.dumps(x)
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/simplejson/tests/test_recursion.py/TestRecursion.test_dictrecursion
def test_defaultrecursion(self): enc = RecursiveJSONEncoder() self.assertEquals(enc.encode(JSONTestObject), '"JSONTestObject"') enc.recurse = True try: enc.encode(JSONTestObject) except __HOLE__: pass else: self.fail("didn't raise ValueError on default recursion")
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/simplejson/tests/test_recursion.py/TestRecursion.test_defaultrecursion