function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def _getRedo(self, channel): try: return self.redos[channel].pop() except (KeyError, __HOLE__): return None
IndexError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Topic/plugin.py/Topic._getRedo
def _formatTopics(self, irc, channel, topics, fit=False): topics = [s for s in topics if s and not s.isspace()] self.lastTopics[channel] = topics newTopic = self._joinTopic(channel, topics) try: maxLen = irc.state.supported['topiclen'] if fit: while len(newTopic) > maxLen: topics.pop(0) self.lastTopics[channel] = topics newTopic = self._joinTopic(channel, topics) elif len(newTopic) > maxLen: if self.registryValue('recognizeTopiclen', channel): irc.error(format(_('That topic is too long for this ' 'server (maximum length: %i; this topic: ' '%i).'), maxLen, len(newTopic)), Raise=True) except __HOLE__: pass return newTopic
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Topic/plugin.py/Topic._formatTopics
def do315(self, irc, msg): # Try to restore the topic when not set yet. channel = msg.args[1] c = irc.state.channels.get(channel) if c is None or not self.registryValue('setOnJoin', channel): return if irc.nick not in c.ops and 't' in c.modes: self.log.debug('Not trying to restore topic in %s. I\'m not opped ' 'and %s is +t.', channel, channel) return try: topics = self.lastTopics[channel] except __HOLE__: self.log.debug('No topic to auto-restore in %s.', channel) else: newTopic = self._formatTopics(irc, channel, topics) if c.topic == '' or (c.topic != newTopic and self.registryValue('alwaysSetOnJoin', channel)): self._sendTopics(irc, channel, newTopic)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Topic/plugin.py/Topic.do315
def restore(self, irc, msg, args, channel): """[<channel>] Restores the topic to the last topic set by the bot. <channel> is only necessary if the message isn't sent in the channel itself. """ self._checkManageCapabilities(irc, msg, channel) try: topics = self.lastTopics[channel] if not topics: raise KeyError except __HOLE__: irc.error(format(_('I haven\'t yet set the topic in %s.'), channel)) return self._sendTopics(irc, channel, topics)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Topic/plugin.py/Topic.restore
def refresh(self, irc, msg, args, channel): """[<channel>] Refreshes current topic set by anyone. Restores topic if empty. <channel> is only necessary if the message isn't sent in the channel itself. """ self._checkManageCapabilities(irc, msg, channel) topic = irc.state.channels[channel].topic if topic: self._sendTopics(irc, channel, topic) return try: topics = self.lastTopics[channel] if not topics: raise KeyError except __HOLE__: irc.error(format(_('I haven\'t yet set the topic in %s.'), channel)) return self._sendTopics(irc, channel, topics)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Topic/plugin.py/Topic.refresh
def cufftCheckStatus(status): """Raise an exception if the specified CUBLAS status is an error.""" if status != 0: try: raise cufftExceptions[status] except __HOLE__: raise cufftError # Data transformation types:
KeyError
dataset/ETHPy150Open lebedov/scikit-cuda/skcuda/cufft.py/cufftCheckStatus
def parse_metadata_line(self): if isinstance(self.metadata_line, dict): return self.metadata_line source = self.source if source is None: return None with open(source, 'r') as f: first_line = f.readline().strip() try: parsed_json = loads(first_line) if isinstance(parsed_json, dict): self.metadata_line = parsed_json except __HOLE__: if(constants.debug is True): print 'Could not parse JSON from first line: %s' % first_line pass
ValueError
dataset/ETHPy150Open jmathai/elodie/elodie/media/text.py/Text.parse_metadata_line
def __getitem__(self, index): if not isinstance(index, int): return self.first[index] try: return super(ElementList, self).__getitem__(index) except __HOLE__: raise ElementDoesNotExist( u'no elements could be found with {0} "{1}"'.format( self.find_by, self.query))
IndexError
dataset/ETHPy150Open cobrateam/splinter/splinter/element_list.py/ElementList.__getitem__
def __getattr__(self, name): try: return getattr(self.first, name) except (ElementDoesNotExist, __HOLE__): raise AttributeError(u"'{0}' object has no attribute '{1}'".format( self.__class__.__name__, name))
AttributeError
dataset/ETHPy150Open cobrateam/splinter/splinter/element_list.py/ElementList.__getattr__
def fetch(self, url, body=None, headers=None): if body is None: if url in self.get_responses: return self.get_responses[url] else: try: body.index('openid.mode=associate') except __HOLE__: pass # fall through else: assert body.find('DH-SHA1') != -1 response = associate( body, self.assoc_secret, self.assoc_handle) self.num_assocs += 1 return self.response(url, 200, response) return self.response(url, 404, 'Not found')
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/openid/test/test_consumer.py/TestFetcher.fetch
def test_notAList(self): # XXX: should be a Message object test, not a consumer test # Value should be a single string. If it's a list, it should generate # an exception. query = {'openid.mode': ['cancel']} try: r = Message.fromPostArgs(query) except __HOLE__, err: self.failUnless(str(err).find('values') != -1, err) else: self.fail("expected TypeError, got this instead: %s" % (r,))
TypeError
dataset/ETHPy150Open CollabQ/CollabQ/openid/test/test_consumer.py/TestQueryFormat.test_notAList
def iter_over_requirement(tokens): """Yield a single requirement 'block' (i.e. a sequence of tokens between comma). Parameters ---------- tokens: iterator Iterator of tokens """ while True: block = [] token = six.advance_iterator(tokens) try: while not isinstance(token, CommaToken): block.append(token) token = six.advance_iterator(tokens) yield block except __HOLE__ as e: yield block raise e
StopIteration
dataset/ETHPy150Open enthought/depsolver/depsolver/requirement_parser.py/iter_over_requirement
def get_sq_ext(filename): """ Gets the squadron extension from filename. Keyword arguments: filename -- the file to get the extension from """ try: return filename[filename.rindex('~')+1:] except __HOLE__: return ''
ValueError
dataset/ETHPy150Open gosquadron/squadron/squadron/template.py/get_sq_ext
def load_locs_json(domain, selected_loc_id=None, include_archived=False, user=None, only_administrative=False): """initialize a json location tree for drill-down controls on the client. tree is only partially initialized and branches will be filled in on the client via ajax. what is initialized: * all top level locs * if a 'selected' loc is provided, that loc and its complete ancestry only_administrative - if False get all locations if True get only administrative locations """ from .permissions import user_can_edit_location, user_can_view_location def loc_to_json(loc, project): ret = { 'name': loc.name, 'location_type': loc.location_type.name, # todo: remove when types aren't optional 'uuid': loc.location_id, 'is_archived': loc.is_archived, 'can_edit': True } if user: ret['can_edit'] = user_can_edit_location(user, loc, project) return ret project = Domain.get_by_name(domain) locations = SQLLocation.root_locations( domain, include_archive_ancestors=include_archived ) if only_administrative: locations = locations.filter(location_type__administrative=True) loc_json = [ loc_to_json(loc, project) for loc in locations if user is None or user_can_view_location(user, loc, project) ] # if a location is selected, we need to pre-populate its location hierarchy # so that the data is available client-side to pre-populate the drop-downs if selected_loc_id: selected = SQLLocation.objects.get( domain=domain, location_id=selected_loc_id ) lineage = selected.get_ancestors() parent = {'children': loc_json} for loc in lineage: children = loc.child_locations(include_archive_ancestors=include_archived) if only_administrative: children = children.filter(location_type__administrative=True) # find existing entry in the json tree that corresponds to this loc try: this_loc = [k for k in parent['children'] if k['uuid'] == loc.location_id][0] except __HOLE__: # if we couldn't find this location the view just break out of the loop. # there are some instances in viewing archived locations where we don't actually # support drilling all the way down. pass this_loc['children'] = [ loc_to_json(loc, project) for loc in children if user is None or user_can_view_location(user, loc, project) ] parent = this_loc return loc_json
IndexError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/locations/util.py/load_locs_json
def build_compilers(): # noinspection PyShadowingNames compilers = {} for compiler_path in settings.COMPILERS: compiler_options = {} if isinstance(compiler_path, (tuple, list)): if len(compiler_path) != 2: raise django.core.exceptions.ImproperlyConfigured( 'Compiler must be specified in the format ("path.to.CompilerClass", {{compiler options...}}),' ' got {0}'.format(compiler_path) ) compiler_path, compiler_options = compiler_path if not isinstance(compiler_options, dict): raise django.core.exceptions.ImproperlyConfigured( 'Compiler options must be a dict, got {0}'.format(compiler_options) ) try: compiler_module, compiler_classname = compiler_path.rsplit('.', 1) except __HOLE__: raise django.core.exceptions.ImproperlyConfigured('{0} isn\'t a compiler module'.format(compiler_path)) try: mod = import_module(compiler_module) except ImportError as e: raise django.core.exceptions.ImproperlyConfigured( 'Error importing compiler {0}: "{1}"'.format(compiler_module, e) ) try: compiler_class = getattr(mod, compiler_classname) except AttributeError: raise django.core.exceptions.ImproperlyConfigured( 'Compiler module "{0}" does not define a "{1}" class'.format(compiler_module, compiler_classname) ) compiler_to_add = compiler_class(**compiler_options) compiler = compilers.setdefault(compiler_class.name, compiler_to_add) if compiler_to_add != compiler: warnings.warn("Both compilers {0} and {1} have the same name.".format(compiler_to_add, compiler)) return compilers
ValueError
dataset/ETHPy150Open andreyfedoseev/django-static-precompiler/static_precompiler/utils.py/build_compilers
def get_compiler_by_name(name): try: return get_compilers()[name] except __HOLE__: raise exceptions.CompilerNotFound("There is no compiler with name '{0}'.".format(name))
KeyError
dataset/ETHPy150Open andreyfedoseev/django-static-precompiler/static_precompiler/utils.py/get_compiler_by_name
def o_string(self, s, toenc, fromenc='latin_1'): """ Converts a String or Unicode String to a byte string of specified encoding. @param toenc: Encoding which we wish to convert to. This can be either ID3V2_FIELD_ENC_* or the actual python encoding type @param fromenc: converting from encoding specified """ # sanitise input - convert to string repr try: if type(encodings[toenc]) == types.StringType: toenc = encodings[toenc] except KeyError: toenc = 'latin_1' outstring = '' # make sure string is of a type we understand if type(s) not in [types.StringType, types.UnicodeType]: s = unicode(s) if type(s) == types.StringType: if toenc == fromenc: # don't need any conversion here outstring = s else: try: outstring = s.decode(fromenc).encode(toenc) except (UnicodeEncodeError, __HOLE__): warn("o_string: frame conversion failed. leaving as is.") outstring = s elif type(s) == types.UnicodeType: try: outstring = s.encode(toenc) except UnicodeEncodeError, err: warn("o_string: frame conversion failed - leaving empty. %s" %\ err) outstring = '' return outstring
UnicodeDecodeError
dataset/ETHPy150Open Ciantic/pytagger/tagger/id3v2frame.py/ID3v2BaseFrame.o_string
def x_text(self): """ Extract Text Fields @todo: handle multiple strings seperated by \x00 sets: encoding, strings """ data = self.rawdata self.encoding = encodings[ord(data[0])] rawtext = data[1:] if normalize_encoding(self.encoding) == 'latin_1': text = rawtext self.strings = text.split('\x00') else: text = rawtext.decode(self.encoding) if is_double_byte(self.encoding): self.strings = text.split('\x00\x00') else: self.strings = text.split('\x00') try: dummy = text.encode('utf_8') debug('Read Field: %s Len: %d Enc: %s Text: %s' % (self.fid, self.length, self.encoding, str([text]))) except __HOLE__: debug('Read Field: %s Len: %d Enc: %s Text: %s (Err)' % (self.fid, self.length, self.encoding, str([text])))
UnicodeDecodeError
dataset/ETHPy150Open Ciantic/pytagger/tagger/id3v2frame.py/ID3v2BaseFrame.x_text
def load_tests(loader, tests, ignore): try: import sklearn except __HOLE__: pass else: tests.addTests(doctest.DocTestSuite()) return tests
ImportError
dataset/ETHPy150Open Twangist/log_calls/log_calls/tests/test_with_sklearn/test_decorate_sklearn_KMeans_functions.py/load_tests
def clean(self): # if it's inactive, then do no validation if self.is_active is False: return # check against TWITTER_DEFAULT_USERNAME if self.user.username == settings.TWITTER_DEFAULT_USERNAME: raise ValidationError('''Streamsample is also configured to authenticate as \'%s\'. Please select a different user or mark this filter as inactive.''' % self.user.username) # check against other active TwitterFilters' user.usernames conflicting_tfs = \ TwitterFilter.objects.exclude(id=self.id).\ filter(is_active=True, user__username=self.user.username) if conflicting_tfs: raise ValidationError('''Filter %d is active and is configured to authenticate as \'%s\'. Please select a different user or mark this filter as inactive.''' % (conflicting_tfs[0].id, self.user.username)) # update line-breaks in words with spaces if self.words != '': wrd = self.words wrd = re.sub('\n|\r', ' ', wrd) self.words = wrd # replace line-breaks with space if self.people != '': ppl = self.people ppl = re.sub('\n|\r', ' ', ppl) ppl = ppl.lstrip().rstrip().lstrip(',').rstrip(',') ppl = re.sub('\s*,\s*', ',', ppl) if ' ' in ''.join(ppl): raise ValidationError("Please use commas to separate \ the list of twitter usernames") else: ppl = re.sub(',', ', ', ppl) self.people = ppl # check against values and length if self.locations != '': self.locations = re.sub('\n|\r|\s', '', self.locations) # Ensure it's a list of 4-tuples loclist = self.locations.split(',') if len(loclist) % 4 != 0: raise ValidationError("Locations must include valid, \ comma-separated bounding boxes.") # Each lat/long value must be numeric and between -180 and 180 try: for loc in loclist: if not(-180 <= float(loc) < 180): raise ValidationError("Invalid bounding box; each \ long/lat value must be between -180 and 180") except __HOLE__: raise ValidationError("Invalid bounding box; each \ long/lat value must be between -180 and 180")
ValueError
dataset/ETHPy150Open gwu-libraries/social-feed-manager/sfm/ui/models.py/TwitterFilter.clean
@property def apps(self): INSTALLED_APPS = [] try: import whoosh except Exception as e: try: import haystack except __HOLE__ as e: warnings.warn( 'Haystack search engine is disabled because: {}'.format(e)) except ImproperlyConfigured as e: warnings.warn( 'Haystack search engine is disabled because: {}'.format(e)) else: INSTALLED_APPS += ['haystack'] else: INSTALLED_APPS += ['whoosh', 'haystack'] return INSTALLED_APPS + ['leonardo.module.search']
ImportError
dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/module/search/__init__.py/Default.apps
def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (__HOLE__, TypeError): # pragma: nocover raise Exception(_( 'ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) if (req.accept.best_match(['application/json', 'application/xml']) == 'application/xml'): try: # simple check xml is valid body = [et.ElementTree.tostring( et.ElementTree.fromstring('<error_message>' + '\n'.join(app_iter) + '</error_message>'))] except et.ElementTree.ParseError as err: LOG.error(_LE('Error parsing HTTP response: %s'), err) body = ['<error_message>%s' % state['status_code'] + '</error_message>'] state['headers'].append(('Content-Type', 'application/xml')) else: if six.PY3: app_iter = [i.decode('utf-8') for i in app_iter] body = [json.dumps({'error_message': '\n'.join(app_iter)})] if six.PY3: body = [item.encode('utf-8') for item in body] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body
ValueError
dataset/ETHPy150Open openstack/ironic/ironic/api/middleware/parsable_error.py/ParsableErrorMiddleware.__call__
def test_config_with_non_package_relative_import(self): from pecan import configuration with tempfile.NamedTemporaryFile('wb', suffix='.py') as f: f.write(b_('\n'.join(['from . import variables']))) f.flush() configuration.Config({}) try: configuration.conf_from_file(f.name) except (__HOLE__, SystemError) as e: assert 'relative import' in str(e) else: raise AssertionError( "A relative import-related error should have been raised" )
ValueError
dataset/ETHPy150Open pecan/pecan/pecan/tests/test_conf.py/TestConf.test_config_with_non_package_relative_import
def _get_interface_name_from_hosting_port(self, port): """ Extract the underlying subinterface name for a port e.g. Port-channel10.200 or GigabitEthernet0/0/0.500 """ try: vlan = port['hosting_info']['segmentation_id'] int_prefix = port['hosting_info']['physical_interface'] return '%s.%s' % (int_prefix, vlan) except __HOLE__ as e: params = {'key': e} raise cfg_exc.DriverExpectedKeyNotSetException(**params)
KeyError
dataset/ETHPy150Open openstack/networking-cisco/networking_cisco/plugins/cisco/cfg_agent/device_drivers/asr1k/asr1k_routing_driver.py/ASR1kRoutingDriver._get_interface_name_from_hosting_port
def clear_cached_posts(sender, instance, **kwargs): """when a model is saved clear the cache on the query and the cache on it's rendered page - archive_dates - posts_published - posts_idea - posts_draft - posts_hidden - tags_and_counts - view post - archive month - all tags for post """ try: saved_instance = sender.objects.get(pk=instance.pk) except sender.DoesNotExist: saved_instance = None # # TODO: only kill the cache if items have changed . # e.g if the description, title, slug has changed nuke the # index # archive # kill_list = [] # dates if getattr(saved_instance, 'created', None) != instance.created: kill_list.append(get_key('archive_dates')) kill_list.append( get_key('month_%s_%s' % ( instance.created.year, instance.created.month))) try: kill_list.append( get_key('month_%s_%s' % ( saved_instance.created.year, saved_instance.created.month))) except AttributeError: pass # Just nuke all of the below for now kill_list.append(get_key('posts_published')) kill_list.append(get_key('posts_idea')) kill_list.append(get_key('posts_draft')) kill_list.append(get_key('posts_hidden')) kill_list.append(get_key('tags_and_counts')) kill_list.append(get_key('index_')) # always nuke the post kill_list.append(get_key('post_%s' % instance.slug)) # nuke tags try: for tag in instance.tags.all(): kill_list.append(get_key('tag_%s' % tag)) except __HOLE__: pass cache.delete_many(kill_list)
ValueError
dataset/ETHPy150Open jamiecurle/django-omblog/omblog/listeners.py/clear_cached_posts
def post_create_slug(sender, instance, **kwargs): """Create a slug on freshly created blog posts""" if instance.pk and instance.slug is not None: return slug = slugify(instance.title) try: sender.objects.exclude(pk__in=[instance.pk])\ .get(slug=slug) # append random characters and be done with it random = generate_random() slug = '%s-%s' % (slug, random) except (__HOLE__, sender.DoesNotExist): pass instance.slug = slug
AttributeError
dataset/ETHPy150Open jamiecurle/django-omblog/omblog/listeners.py/post_create_slug
def Get(self, db, key, *args, **kwargs): """ Handles GET message command. Executes a Get operation over the leveldb backend. db => LevelDB object *args => (key) to fetch """ try: return success(db.Get(key)) except __HOLE__: error_msg = "Key %r does not exist" % key errors_logger.exception(error_msg) return failure(KEY_ERROR, error_msg)
KeyError
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/api.py/Handler.Get
def MGet(self, db, keys, *args, **kwargs): def get_or_none(key, context): try: res = db.Get(key) except __HOLE__: warning_msg = "Key {0} does not exist".format(key) context['status'] = WARNING_STATUS errors_logger.warning(warning_msg) res = None return res context = {'status': SUCCESS_STATUS} value = [get_or_none(key, context) for key in keys] status = context['status'] return status, value
KeyError
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/api.py/Handler.MGet
def Put(self, db, key, value, *args, **kwargs): """ Handles Put message command. Executes a Put operation over the leveldb backend. db => LevelDB object *args => (key, value) to update """ try: return success(db.Put(key, value)) except __HOLE__: error_msg = "Unsupported value type : %s" % type(value) errors_logger.exception(error_msg) return failure(TYPE_ERROR, error_msg)
TypeError
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/api.py/Handler.Put
def Slice(self, db, key_from, offset, *args, **kwargs): """Returns a slice of the db. `offset` keys, starting a `key_from`""" # Operates over a snapshot in order to return # a consistent state of the db db_snapshot = db.CreateSnapshot() it = db_snapshot.RangeIter(key_from) value = [] pos = 0 while pos < offset: try: value.append(it.next()) except __HOLE__: break pos += 1 return success(value)
StopIteration
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/api.py/Handler.Slice
def Batch(self, db, collection, *args, **kwargs): batch = leveldb.WriteBatch() batch_actions = { SIGNAL_BATCH_PUT: batch.Put, SIGNAL_BATCH_DELETE: batch.Delete, } try: for command in collection: signal, args = destructurate(command) batch_actions[signal](*args) except __HOLE__: # Unrecognized signal return (FAILURE_STATUS, [SIGNAL_ERROR, "Unrecognized signal received : %r" % signal]) except ValueError: return (FAILURE_STATUS, [VALUE_ERROR, "Batch only accepts sequences (list, tuples,...)"]) except TypeError: return (FAILURE_STATUS, [TYPE_ERROR, "Invalid type supplied"]) db.Write(batch) return success()
KeyError
dataset/ETHPy150Open oleiade/Elevator/debian/elevator/usr/lib/python2.6/dist-packages/elevator/api.py/Handler.Batch
def test_mro(self): # in Python 2.3, this raises TypeError: MRO conflict among bases classes, # in Python 2.2 it works. # # But in early versions of _ctypes.c, the result of tp_new # wasn't checked, and it even crashed Python. # Found by Greg Chapman. try: class X(object, Array): _length_ = 5 _type_ = "i" except __HOLE__: pass from _ctypes import _Pointer try: class X(object, _Pointer): pass except TypeError: pass from _ctypes import _SimpleCData try: class X(object, _SimpleCData): _type_ = "i" except TypeError: pass try: class X(object, Structure): _fields_ = [] except TypeError: pass
TypeError
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/ctypes/test/test_functions.py/FunctionTestCase.test_mro
def post_comment(request): """ Post a comment Redirects to the `comments.comments.comment_was_posted` view upon success. Templates: `comment_preview` Context: comment the comment being posted comment_form the comment form options comment options target comment target hash security hash (must be included in a posted form to succesfully post a comment). rating_options comment ratings options ratings_optional are ratings optional? ratings_required are ratings required? rating_range range of ratings rating_choices choice of ratings """ if not request.POST: raise Http404, _("Only POSTs are allowed") try: options, target, security_hash = request.POST['options'], request.POST['target'], request.POST['gonzo'] except KeyError: raise Http404, _("One or more of the required fields wasn't submitted") photo_options = request.POST.get('photo_options', '') rating_options = normalize_newlines(request.POST.get('rating_options', '')) if Comment.objects.get_security_hash(options, photo_options, rating_options, target) != security_hash: raise Http404, _("Somebody tampered with the comment form (security violation)") # Now we can be assured the data is valid. if rating_options: rating_range, rating_choices = Comment.objects.get_rating_options(base64.decodestring(rating_options)) else: rating_range, rating_choices = [], [] content_type_id, object_id = target.split(':') # target is something like '52:5157' try: obj = ContentType.objects.get(pk=content_type_id).get_object_for_this_type(pk=object_id) except __HOLE__: raise Http404, _("The comment form had an invalid 'target' parameter -- the object ID was invalid") option_list = options.split(',') # options is something like 'pa,ra' new_data = request.POST.copy() new_data['content_type_id'] = content_type_id new_data['object_id'] = object_id new_data['ip_address'] = request.META.get('REMOTE_ADDR') new_data['is_public'] = IS_PUBLIC in option_list manipulator = PublicCommentManipulator(request.user, ratings_required=RATINGS_REQUIRED in option_list, ratings_range=rating_range, num_rating_choices=len(rating_choices)) errors = manipulator.get_validation_errors(new_data) # If user gave correct username/password and wasn't already logged in, log them in # so they don't have to enter a username/password again. if manipulator.get_user() and not manipulator.get_user().is_authenticated() and new_data.has_key('password') and manipulator.get_user().check_password(new_data['password']): from django.contrib.auth import login login(request, manipulator.get_user()) if errors or request.POST.has_key('preview'): class CommentFormWrapper(oldforms.FormWrapper): def __init__(self, manipulator, new_data, errors, rating_choices): oldforms.FormWrapper.__init__(self, manipulator, new_data, errors) self.rating_choices = rating_choices def ratings(self): field_list = [self['rating%d' % (i+1)] for i in range(len(rating_choices))] for i, f in enumerate(field_list): f.choice = rating_choices[i] return field_list comment = errors and '' or manipulator.get_comment(new_data) comment_form = CommentFormWrapper(manipulator, new_data, errors, rating_choices) return render_to_response('comments/preview.html', { 'comment': comment, 'comment_form': comment_form, 'options': options, 'target': target, 'hash': security_hash, 'rating_options': rating_options, 'ratings_optional': RATINGS_OPTIONAL in option_list, 'ratings_required': RATINGS_REQUIRED in option_list, 'rating_range': rating_range, 'rating_choices': rating_choices, }, context_instance=RequestContext(request)) elif request.POST.has_key('post'): # If the IP is banned, mail the admins, do NOT save the comment, and # serve up the "Thanks for posting" page as if the comment WAS posted. if request.META['REMOTE_ADDR'] in settings.BANNED_IPS: mail_admins("Banned IP attempted to post comment", str(request.POST) + "\n\n" + str(request.META)) else: manipulator.do_html2python(new_data) comment = manipulator.save(new_data) return HttpResponseRedirect("../posted/?c=%s:%s" % (content_type_id, object_id)) else: raise Http404, _("The comment form didn't provide either 'preview' or 'post'")
ObjectDoesNotExist
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/comments/views/comments.py/post_comment
def post_free_comment(request): """ Post a free comment (not requiring a log in) Redirects to `comments.comments.comment_was_posted` view on success. Templates: `comment_free_preview` Context: comment comment being posted comment_form comment form object options comment options target comment target hash security hash (must be included in a posted form to succesfully post a comment). """ if not request.POST: raise Http404, _("Only POSTs are allowed") try: options, target, security_hash = request.POST['options'], request.POST['target'], request.POST['gonzo'] except __HOLE__: raise Http404, _("One or more of the required fields wasn't submitted") if Comment.objects.get_security_hash(options, '', '', target) != security_hash: raise Http404, _("Somebody tampered with the comment form (security violation)") content_type_id, object_id = target.split(':') # target is something like '52:5157' content_type = ContentType.objects.get(pk=content_type_id) try: obj = content_type.get_object_for_this_type(pk=object_id) except ObjectDoesNotExist: raise Http404, _("The comment form had an invalid 'target' parameter -- the object ID was invalid") option_list = options.split(',') new_data = request.POST.copy() new_data['content_type_id'] = content_type_id new_data['object_id'] = object_id new_data['ip_address'] = request.META['REMOTE_ADDR'] new_data['is_public'] = IS_PUBLIC in option_list manipulator = PublicFreeCommentManipulator() errors = manipulator.get_validation_errors(new_data) if errors or request.POST.has_key('preview'): comment = errors and '' or manipulator.get_comment(new_data) return render_to_response('comments/free_preview.html', { 'comment': comment, 'comment_form': oldforms.FormWrapper(manipulator, new_data, errors), 'options': options, 'target': target, 'hash': security_hash, }, context_instance=RequestContext(request)) elif request.POST.has_key('post'): # If the IP is banned, mail the admins, do NOT save the comment, and # serve up the "Thanks for posting" page as if the comment WAS posted. if request.META['REMOTE_ADDR'] in settings.BANNED_IPS: from django.core.mail import mail_admins mail_admins("Practical joker", str(request.POST) + "\n\n" + str(request.META)) else: manipulator.do_html2python(new_data) comment = manipulator.save(new_data) return HttpResponseRedirect("../posted/?c=%s:%s" % (content_type_id, object_id)) else: raise Http404, _("The comment form didn't provide either 'preview' or 'post'")
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/comments/views/comments.py/post_free_comment
def comment_was_posted(request): """ Display "comment was posted" success page Templates: `comment_posted` Context: object The object the comment was posted on """ obj = None if request.GET.has_key('c'): content_type_id, object_id = request.GET['c'].split(':') try: content_type = ContentType.objects.get(pk=content_type_id) obj = content_type.get_object_for_this_type(pk=object_id) except __HOLE__: pass return render_to_response('comments/posted.html', {'object': obj}, context_instance=RequestContext(request))
ObjectDoesNotExist
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/comments/views/comments.py/comment_was_posted
def main(): ''' Main method, takes care of loading data, running it through the various analyses and reporting the results ''' # Handle command-line arguments parser = optparse.OptionParser() parser.add_option('--alexa-file', default='data/alexa_100k.csv', help='Alexa file to pull from. Default: %default') (options, arguments) = parser.parse_args() print options, arguments try: # Pokemon exception handling # This is the Alexa 1M domain list. print 'Loading alexa dataframe...' alexa_dataframe = pd.read_csv(options.alexa_file, names=['rank','uri'], header=None, encoding='utf-8') print alexa_dataframe.info() print alexa_dataframe.head() # Compute the 2LD of the domain given by Alexa alexa_dataframe['domain'] = [ domain_extract(uri) for uri in alexa_dataframe['uri']] del alexa_dataframe['rank'] del alexa_dataframe['uri'] alexa_dataframe = alexa_dataframe.dropna() alexa_dataframe = alexa_dataframe.drop_duplicates() print alexa_dataframe.head() # Set the class alexa_dataframe['class'] = 'legit' # Shuffle the data (important for training/testing) alexa_dataframe = alexa_dataframe.reindex(np.random.permutation(alexa_dataframe.index)) alexa_total = alexa_dataframe.shape[0] print 'Total Alexa domains %d' % alexa_total # Read in the DGA domains dga_dataframe = pd.read_csv('data/dga_domains.txt', names=['raw_domain'], header=None, encoding='utf-8') # We noticed that the blacklist values just differ by captilization or .com/.org/.info dga_dataframe['domain'] = dga_dataframe.applymap(lambda x: x.split('.')[0].strip().lower()) del dga_dataframe['raw_domain'] # It's possible we have NaNs from blanklines or whatever dga_dataframe = dga_dataframe.dropna() dga_dataframe = dga_dataframe.drop_duplicates() dga_total = dga_dataframe.shape[0] print 'Total DGA domains %d' % dga_total # Set the class dga_dataframe['class'] = 'dga' print 'Number of DGA domains: %d' % dga_dataframe.shape[0] print dga_dataframe.head() # Concatenate the domains in a big pile! all_domains = pd.concat([alexa_dataframe, dga_dataframe], ignore_index=True) # Add a length field for the domain all_domains['length'] = [len(x) for x in all_domains['domain']] # Okay since we're trying to detect dynamically generated domains and short # domains (length <=6) are crazy random even for 'legit' domains we're going # to punt on short domains (perhaps just white/black list for short domains?) all_domains = all_domains[all_domains['length'] > 6] # Add a entropy field for the domain all_domains['entropy'] = [entropy(x) for x in all_domains['domain']] print all_domains.head() # Now we compute NGrams for every Alexa domain and see if we can use the # NGrams to help us better differentiate and mark DGA domains... # Scikit learn has a nice NGram generator that can generate either char NGrams or word NGrams (we're using char). # Parameters: # - ngram_range=(3,5) # Give me all ngrams of length 3, 4, and 5 # - min_df=1e-4 # Minimumum document frequency. At 1e-4 we're saying give us NGrams that # # happen in at least .1% of the domains (so for 100k... at least 100 domains) alexa_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-4, max_df=1.0) # I'm SURE there's a better way to store all the counts but not sure... # At least the min_df parameters has already done some thresholding counts_matrix = alexa_vc.fit_transform(alexa_dataframe['domain']) alexa_counts = np.log10(counts_matrix.sum(axis=0).getA1()) ngrams_list = alexa_vc.get_feature_names() # For fun sort it and show it import operator _sorted_ngrams = sorted(zip(ngrams_list, alexa_counts), key=operator.itemgetter(1), reverse=True) print 'Alexa NGrams: %d' % len(_sorted_ngrams) for ngram, count in _sorted_ngrams[:10]: print ngram, count # We're also going to throw in a bunch of dictionary words word_dataframe = pd.read_csv('data/words.txt', names=['word'], header=None, dtype={'word': np.str}, encoding='utf-8') # Cleanup words from dictionary word_dataframe = word_dataframe[word_dataframe['word'].map(lambda x: str(x).isalpha())] word_dataframe = word_dataframe.applymap(lambda x: str(x).strip().lower()) word_dataframe = word_dataframe.dropna() word_dataframe = word_dataframe.drop_duplicates() print word_dataframe.head(10) # Now compute NGrams on the dictionary words # Same logic as above... dict_vc = sklearn.feature_extraction.text.CountVectorizer(analyzer='char', ngram_range=(3,5), min_df=1e-5, max_df=1.0) counts_matrix = dict_vc.fit_transform(word_dataframe['word']) dict_counts = np.log10(counts_matrix.sum(axis=0).getA1()) ngrams_list = dict_vc.get_feature_names() # For fun sort it and show it import operator _sorted_ngrams = sorted(zip(ngrams_list, dict_counts), key=operator.itemgetter(1), reverse=True) print 'Word NGrams: %d' % len(_sorted_ngrams) for ngram, count in _sorted_ngrams[:10]: print ngram, count # We use the transform method of the CountVectorizer to form a vector # of ngrams contained in the domain, that vector is than multiplied # by the counts vector (which is a column sum of the count matrix). def ngram_count(domain): alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot vector multiply and transpose Woo Hoo! dict_match = dict_counts * dict_vc.transform([domain]).T print '%s Alexa match:%d Dict match: %d' % (domain, alexa_match, dict_match) # Examples: ngram_count('google') ngram_count('facebook') ngram_count('1cb8a5f36f') ngram_count('pterodactylfarts') ngram_count('ptes9dro-dwacty2lfa5rrts') ngram_count('beyonce') ngram_count('bey666on4ce') # Compute NGram matches for all the domains and add to our dataframe all_domains['alexa_grams']= alexa_counts * alexa_vc.transform(all_domains['domain']).T all_domains['word_grams']= dict_counts * dict_vc.transform(all_domains['domain']).T print all_domains.head() # Use the vectorized operations of the dataframe to investigate differences # between the alexa and word grams all_domains['diff'] = all_domains['alexa_grams'] - all_domains['word_grams'] # The table below shows those domain names that are more 'dictionary' and less 'web' print all_domains.sort(['diff'], ascending=True).head(10) # The table below shows those domain names that are more 'web' and less 'dictionary' # Good O' web.... print all_domains.sort(['diff'], ascending=False).head(50) # Lets look at which Legit domains are scoring low on both alexa and word gram count weird_cond = (all_domains['class']=='legit') & (all_domains['word_grams']<3) & (all_domains['alexa_grams']<2) weird = all_domains[weird_cond] print weird.shape[0] print weird.head(10) # Epiphany... Alexa really may not be the best 'exemplar' set... # (probably a no-shit moment for everyone else :) # # Discussion: If you're using these as exemplars of NOT DGA, then your probably # making things very hard on your machine learning algorithm. # Perhaps we should have two categories of Alexa domains, 'legit' # and a 'weird'. based on some definition of weird. # Looking at the entries above... we have approx 80 domains # that we're going to mark as 'weird'. # all_domains.loc[weird_cond, 'class'] = 'weird' print all_domains['class'].value_counts() all_domains[all_domains['class'] == 'weird'].head() # Perhaps we will just exclude the weird class from our ML training not_weird = all_domains[all_domains['class'] != 'weird'] X = not_weird.as_matrix(['length', 'entropy', 'alexa_grams', 'word_grams']) # Labels (scikit learn uses 'y' for classification labels) y = np.array(not_weird['class'].tolist()) # Random Forest is a popular ensemble machine learning classifier. # http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.RandomForestClassifier.html clf = sklearn.ensemble.RandomForestClassifier(n_estimators=20, compute_importances=True) # Trees in the forest # Train on a 80/20 split from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # Now plot the results of the holdout set in a confusion matrix labels = ['legit', 'dga'] cm = sklearn.metrics.confusion_matrix(y_test, y_pred, labels) show_cm(cm, labels) # We can also look at what features the learning algorithm thought were the most important importances = zip(['length', 'entropy', 'alexa_grams', 'word_grams'], clf.feature_importances_) print importances # Now train on the whole thing before doing tests and saving models to disk clf.fit(X, y) # test_it shows how to do evaluation, also fun for manual testing below :) def test_it(domain): _alexa_match = alexa_counts * alexa_vc.transform([domain]).T # Woot matrix multiply and transpose Woo Hoo! _dict_match = dict_counts * dict_vc.transform([domain]).T _X = [len(domain), entropy(domain), _alexa_match, _dict_match] print '%s : %s' % (domain, clf.predict(_X)[0]) # Examples (feel free to change these and see the results!) test_it('google') test_it('google88') test_it('facebook') test_it('1cb8a5f36f') test_it('pterodactylfarts') test_it('ptes9dro-dwacty2lfa5rrts') test_it('beyonce') test_it('bey666on4ce') test_it('supersexy') test_it('yourmomissohotinthesummertime') test_it('35-sdf-09jq43r') test_it('clicksecurity') # Serialize model to disk save_model_to_disk('dga_model_random_forest', clf) save_model_to_disk('dga_model_alexa_vectorizor', alexa_vc) save_model_to_disk('dga_model_alexa_counts', alexa_counts) save_model_to_disk('dga_model_dict_vectorizor', dict_vc) save_model_to_disk('dga_model_dict_counts', dict_counts) except __HOLE__: print 'Goodbye Cruel World...' sys.exit(0) except Exception, error: traceback.print_exc() print '(Exception):, %s' % (str(error)) sys.exit(1)
KeyboardInterrupt
dataset/ETHPy150Open ClickSecurity/data_hacking/dga_detection/dga_model_gen.py/main
def store(self, section, key, data, dtype='json'): assert dtype in ('json',) if not self.enabled: return fn = self._get_cache_fn(section, key, dtype) try: try: os.makedirs(os.path.dirname(fn)) except __HOLE__ as ose: if ose.errno != errno.EEXIST: raise write_json_file(data, fn) except Exception: tb = traceback.format_exc() self._ydl.report_warning( 'Writing cache to %r failed: %s' % (fn, tb))
OSError
dataset/ETHPy150Open yasoob/youtube-dl-GUI/youtube_dl/cache.py/Cache.store
def load(self, section, key, dtype='json', default=None): assert dtype in ('json',) if not self.enabled: return default cache_fn = self._get_cache_fn(section, key, dtype) try: try: with io.open(cache_fn, 'r', encoding='utf-8') as cachef: return json.load(cachef) except ValueError: try: file_size = os.path.getsize(cache_fn) except (OSError, __HOLE__) as oe: file_size = str(oe) self._ydl.report_warning( 'Cache retrieval from %s failed (%s)' % (cache_fn, file_size)) except IOError: pass # No cache available return default
IOError
dataset/ETHPy150Open yasoob/youtube-dl-GUI/youtube_dl/cache.py/Cache.load
def standardize_patterns(column_names, patterns): """ Given patterns in any of the permitted input forms, return a dict whose keys are column indices and whose values are functions which return a boolean value whether the value passes. If patterns is a dictionary and any of its keys are values in column_names, the returned dictionary will have those keys replaced with the integer position of that value in column_names """ try: # Dictionary of patterns patterns = dict((k, pattern_as_function(v)) for k, v in patterns.items() if v) if not column_names: return patterns p2 = {} for k in patterns: if k in column_names: idx = column_names.index(k) if idx in patterns: raise ColumnIdentifierError("Column %s has index %i which already has a pattern." % (k, idx)) p2[idx] = patterns[k] else: p2[k] = patterns[k] return p2 except __HOLE__: # Sequence of patterns return dict((i, pattern_as_function(x)) for i, x in enumerate(patterns))
AttributeError
dataset/ETHPy150Open wireservice/csvkit/csvkit/grep.py/standardize_patterns
def stop(self, **params): try: if self.redis_subscriber: self.redis_subscriber.unsubscribe() self.state = "stopped" self.status = str(datetime.datetime.now()) except __HOLE__: return "not subscribed"
AttributeError
dataset/ETHPy150Open emccode/heliosburn/heliosburn/proxy/modules/traffic_recorder.py/TrafficRecorder.stop
def start(kapp): global app_ref if use_null_logger: logger = log.NullLogger() else: logger = logging.getLogger("ipg") logger.setLevel(logging.INFO) fmt = logging.Formatter(STD_FORMAT) stderrHdlr = logging.StreamHandler() stderrHdlr.setFormatter(fmt) logger.addHandler(stderrHdlr) fileHdlr = logging.FileHandler("ipg.log") fileHdlr.setFormatter(fmt) logger.addHandler(fileHdlr) kapp.logger = logger # Get settings (preferences) basedir = paths.ginga_home if not os.path.exists(basedir): try: os.mkdir(basedir) except __HOLE__ as e: logger.warning("Couldn't create ginga settings area (%s): %s" % ( basedir, str(e))) logger.warning("Preferences will not be able to be saved") # Set up preferences prefs = Settings.Preferences(basefolder=basedir, logger=logger) settings = prefs.createCategory('general') settings.load(onError='silent') settings.setDefaults(useMatplotlibColormaps=False) bindprefs = prefs.createCategory('bindings') bindprefs.load(onError='silent') # So we can find our plugins sys.path.insert(0, basedir) moduleHome = os.path.split(sys.modules['ginga.version'].__file__)[0] childDir = os.path.join(moduleHome, 'misc', 'plugins') sys.path.insert(0, childDir) childDir = os.path.join(basedir, 'plugins') sys.path.insert(0, childDir) # User configuration (custom star catalogs, etc.) try: import ipg_config ipg_config.pre_gui_config(kapp) except Exception as e: try: (type, value, tb) = sys.exc_info() tb_str = "\n".join(traceback.format_tb(tb)) except Exception: tb_str = "Traceback information unavailable." logger.error("Error importing Ginga config file: %s" % ( str(e))) logger.error("Traceback:\n%s" % (tb_str)) # create Qt app # Note: workaround for pyside bug where QApplication is not deleted app = QtGui.QApplication.instance() if not app: app = QtGui.QApplication([]) app.connect(app, QtCore.SIGNAL('lastWindowClosed()'), app, QtCore.SLOT('quit()')) # here is our little launcher w = StartMenu(logger, app, kapp, prefs) app_ref = w w.show() app.setActiveWindow(w) #app.exec_() # Very important, IPython-specific step: this gets GUI event loop # integration going, and it replaces calling app.exec_() kapp.start() return w # Some boilderplate to display matplotlib plots in notebook # If QT GUI could interact nicely with --pylab=inline we wouldn't need this
OSError
dataset/ETHPy150Open ejeschke/ginga/ginga/qtw/ipg.py/start
def get_notification_language(user): """ Returns site-specific notification language for this user. Raises LanguageStoreNotAvailable if this site does not use translated notifications. """ if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False): try: app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.') model = models.get_model(app_label, model_name) language_model = model._default_manager.get(user__id__exact=user.id) if hasattr(language_model, 'language'): return language_model.language except (__HOLE__, ImproperlyConfigured, model.DoesNotExist): raise LanguageStoreNotAvailable raise LanguageStoreNotAvailable
ImportError
dataset/ETHPy150Open davemerwin/blue-channel/external_apps/notification/models.py/get_notification_language
def time_varying_coefficients(d, timelines, constant=False, independent=0, randgen=random.exponential): """ Time vary coefficients d: the dimension of the dataset timelines: the observational times constant: True for constant coefficients independent: the number of coffients to set to 0 (covariate is ind of survival), or a list of covariates to make indepent. randgen: how scalar coefficients (betas) are sampled. returns a matrix (t,d+1) of coefficients """ t = timelines.shape[0] try: a = np.arange(d) random.shuffle(a) independent = a[:independent] except __HOLE__: pass n_funcs = len(FUNCS) coefficients = np.zeros((t, d)) data_generators = [] for i in range(d): f = FUNCS[random.randint(0, n_funcs)] if not constant else constant_ if i in independent: beta = 0 else: beta = randgen((1 - constant) * 0.5 / d) coefficients[:, i] = f(timelines, alpha=randgen(2000.0 / t), beta=beta) data_generators.append(f.__doc__) df_coefficients = pd.DataFrame(coefficients, columns=data_generators, index=timelines) return df_coefficients
IndexError
dataset/ETHPy150Open CamDavidsonPilon/lifelines/lifelines/generate_datasets.py/time_varying_coefficients
@property def most_specific(self): """The most specific (smallest) subdivision available. If there are no :py:class:`Subdivision` objects for the response, this returns an empty :py:class:`Subdivision`. :type: :py:class:`Subdivision` """ try: return self[-1] except __HOLE__: return Subdivision(self._locales)
IndexError
dataset/ETHPy150Open maxmind/GeoIP2-python/geoip2/records.py/Subdivisions.most_specific
def _unpack_message_set(self, tp, messages): try: for offset, size, msg in messages: if self.config['check_crcs'] and not msg.validate_crc(): raise Errors.InvalidMessageError(msg) elif msg.is_compressed(): for record in self._unpack_message_set(tp, msg.decompress()): yield record else: key, value = self._deserialize(msg) yield ConsumerRecord(tp.topic, tp.partition, offset, key, value) # If unpacking raises StopIteration, it is erroneously # caught by the generator. We want all exceptions to be raised # back to the user. See Issue 545 except __HOLE__ as e: log.exception('StopIteration raised unpacking messageset: %s', e) raise Exception('StopIteration raised unpacking messageset')
StopIteration
dataset/ETHPy150Open dpkp/kafka-python/kafka/consumer/fetcher.py/Fetcher._unpack_message_set
def __next__(self): if not self._iterator: self._iterator = self._message_generator() try: return next(self._iterator) except __HOLE__: self._iterator = None raise
StopIteration
dataset/ETHPy150Open dpkp/kafka-python/kafka/consumer/fetcher.py/Fetcher.__next__
def _maybeClass(classnamep): try: object except __HOLE__: isObject = 0 else: isObject = isinstance(classnamep, type) if isinstance(classnamep, ClassType) or isObject: return qual(classnamep) return classnamep
NameError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/spread/jelly.py/_maybeClass
def _resolve_spec(self, spec): package, subpath = self._split_spec(spec) try: pkgpath = self.resolver.resolve(package + ':').abspath() except __HOLE__ as e: raise BundleError(e) else: return path.join(pkgpath, subpath)
ImportError
dataset/ETHPy150Open sontek/pyramid_webassets/pyramid_webassets/__init__.py/PyramidResolver._resolve_spec
def resolve_source_to_url(self, ctx, filepath, item): request = get_current_request() # Use the filepath to reconstruct the item without globs package, _ = self._split_spec(item) if package is not None: pkgdir = self._resolve_spec(package + ':') if filepath.startswith(pkgdir): item = '{}:{}'.format(package, filepath[len(pkgdir):]) # Attempt to resolve the filepath as passed (but after versioning). # If this fails, it may be because the static route was registered # with an asset spec. In this case, the original item may also be # an asset spec contained therein, so try to resolve that. if request is not None: for attempt in (filepath, item): try: return request.static_url(attempt) except __HOLE__: pass if USING_WEBASSETS_CONTEXT: return super(PyramidResolver, self).resolve_source_to_url( ctx, filepath, item ) else: # pragma: no cover return super(PyramidResolver, self).resolve_source_to_url( filepath, item )
ValueError
dataset/ETHPy150Open sontek/pyramid_webassets/pyramid_webassets/__init__.py/PyramidResolver.resolve_source_to_url
def resolve_output_to_url(self, ctx, item): request = get_current_request() if not path.isabs(item): if ':' not in item: if 'asset_base' in ctx.config: if ctx.config['asset_base'].endswith(':'): item = ctx.config['asset_base'] + item else: item = path.join(ctx.config['asset_base'], item) else: item = path.join(ctx.directory, item) if ':' in item: filepath = self._resolve_spec(item) else: filepath = item if request is not None: for attempt in (filepath, item): try: return request.static_url(item) except __HOLE__: pass if USING_WEBASSETS_CONTEXT: return super(PyramidResolver, self).resolve_output_to_url( ctx, filepath ) else: # pragma: no cover return super(PyramidResolver, self).resolve_output_to_url(filepath)
ValueError
dataset/ETHPy150Open sontek/pyramid_webassets/pyramid_webassets/__init__.py/PyramidResolver.resolve_output_to_url
def get_webassets_env_from_settings(settings, prefix='webassets'): """This function will take all webassets.* parameters, and call the ``Environment()`` constructor with kwargs passed in. The only two parameters that are not passed as keywords are: * base_dir * base_url which are passed in positionally. Read the ``WebAssets`` docs for ``Environment`` for more details. """ # Make a dictionary of the webassets.* elements... kwargs = {} # assets settings cut_prefix = len(prefix) + 1 for k in settings: if k.startswith(prefix): val = settings[k] if isinstance(val, six.string_types): if val.lower() in auto_booly: val = asbool(val) elif val.lower().startswith('json:') and k[cut_prefix:] != 'manifest': val = json.loads(val[5:]) kwargs[k[cut_prefix:]] = val if 'base_dir' not in kwargs: raise Exception("You need to provide webassets.base_dir in your configuration") if 'base_url' not in kwargs: raise Exception("You need to provide webassets.base_url in your configuration") asset_dir = kwargs.pop('base_dir') asset_url = kwargs.pop('base_url') if ':' in asset_dir: try: resolved_dir = AssetResolver(None).resolve(asset_dir).abspath() except __HOLE__: pass else: # Store the original asset spec to use later kwargs['asset_base'] = asset_dir asset_dir = resolved_dir if not asset_url.startswith('/'): if six.moves.urllib.parse.urlparse(asset_url).scheme == '': asset_url = '/' + asset_url if 'debug' in kwargs: kwargs['debug'] = maybebool(kwargs['debug']) if 'cache' in kwargs: cache = kwargs['cache'] = maybebool(kwargs['cache']) if cache and isinstance(cache, six.string_types) and not path.isdir(cache): makedirs(cache) # 'updater' is just passed in... if 'auto_build' in kwargs: kwargs['auto_build'] = maybebool(kwargs['auto_build']) if 'jst_compiler' in kwargs: kwargs['JST_COMPILER'] = kwargs.pop('jst_compiler') if 'jst_namespace' in kwargs: kwargs['JST_NAMESPACE'] = kwargs.pop('jst_namespace') if 'manifest' in kwargs: kwargs['manifest'] = maybebool(kwargs['manifest']) if 'url_expire' in kwargs: kwargs['url_expire'] = maybebool(kwargs['url_expire']) if 'static_view' in kwargs: kwargs['static_view'] = asbool(kwargs['static_view']) else: kwargs['static_view'] = False if 'cache_max_age' in kwargs: kwargs['cache_max_age'] = int(kwargs.pop('cache_max_age')) else: kwargs['cache_max_age'] = None if 'load_path' in kwargs: # force load_path to be an array and split on whitespace if not isinstance(kwargs['load_path'], list): kwargs['load_path'] = kwargs['load_path'].split() paths = kwargs.pop('paths', None) if 'bundles' in kwargs: if isinstance(kwargs['bundles'], six.string_types): kwargs['bundles'] = kwargs['bundles'].split() bundles = kwargs.pop('bundles', None) assets_env = Environment(asset_dir, asset_url, **kwargs) if paths is not None: for map_path, map_url in json.loads(paths).items(): assets_env.append_path(map_path, map_url) def yaml_stream(fname, mode): if path.exists(fname): return open(fname, mode) else: return assets_env.resolver.resolver.resolve(fname).stream() if isinstance(bundles, list): fnames = reversed(bundles) fin = fileinput.input(fnames, openhook=yaml_stream) with closing(fin): lines = [text(line).rstrip() for line in fin] yamlin = six.StringIO('\n'.join(lines)) loader = YAMLLoader(yamlin) result = loader.load_bundles() assets_env.register(result) elif isinstance(bundles, dict): assets_env.register(bundles) return assets_env
ImportError
dataset/ETHPy150Open sontek/pyramid_webassets/pyramid_webassets/__init__.py/get_webassets_env_from_settings
def assets(request, *args, **kwargs): env = get_webassets_env_from_request(request) result = [] for f in args: try: result.append(env[f]) except __HOLE__: result.append(f) bundle = Bundle(*result, **kwargs) if USING_WEBASSETS_CONTEXT: with bundle.bind(env): urls = bundle.urls() else: # pragma: no cover urls = bundle.urls(env=env) return urls
KeyError
dataset/ETHPy150Open sontek/pyramid_webassets/pyramid_webassets/__init__.py/assets
def diff_map(self, inwrappers): """Generate SQL to transform existing data wrappers :param input_map: a YAML map defining the new data wrappers :return: list of SQL statements Compares the existing data wrapper definitions, as fetched from the catalogs, to the input map and generates SQL statements to transform the data wrappers accordingly. """ stmts = [] # check input data wrappers for fdw in inwrappers: infdw = inwrappers[fdw] # does it exist in the database? if fdw in self: stmts.append(self[fdw].diff_map(infdw)) else: # check for possible RENAME if hasattr(infdw, 'oldname'): oldname = infdw.oldname try: stmts.append(self[oldname].rename(infdw.name)) del self[oldname] except __HOLE__ as exc: exc.args = ("Previous name '%s' for data wrapper " "'%s' not found" % (oldname, infdw.name), ) raise else: # create new data wrapper stmts.append(infdw.create()) # check database data wrappers for fdw in self: # if missing, drop it if fdw not in inwrappers: self[fdw].dropped = True return stmts
KeyError
dataset/ETHPy150Open perseas/Pyrseas/pyrseas/dbobject/foreign.py/ForeignDataWrapperDict.diff_map
def diff_map(self, inservers): """Generate SQL to transform existing foreign servers :param inservers: a YAML map defining the new foreign servers :return: list of SQL statements Compares the existing server definitions, as fetched from the catalogs, to the input map and generates SQL statements to transform the foreign servers accordingly. """ stmts = [] # check input foreign servers for (fdw, srv) in inservers: insrv = inservers[(fdw, srv)] # does it exist in the database? if (fdw, srv) in self: stmts.append(self[(fdw, srv)].diff_map(insrv)) else: # check for possible RENAME if hasattr(insrv, 'oldname'): oldname = insrv.oldname try: stmts.append(self[(fdw, oldname)].rename(insrv.name)) del self[oldname] except __HOLE__ as exc: exc.args = ("Previous name '%s' for dictionary '%s' " "not found" % (oldname, insrv.name), ) raise else: # create new dictionary stmts.append(insrv.create()) # check database foreign servers for srv in self: # if missing, drop it if srv not in inservers: self[srv].dropped = True return stmts
KeyError
dataset/ETHPy150Open perseas/Pyrseas/pyrseas/dbobject/foreign.py/ForeignServerDict.diff_map
def diff_map(self, inusermaps): """Generate SQL to transform existing user mappings :param input_map: a YAML map defining the new user mappings :return: list of SQL statements Compares the existing user mapping definitions, as fetched from the catalogs, to the input map and generates SQL statements to transform the user mappings accordingly. """ stmts = [] # check input user mappings for (fdw, srv, usr) in inusermaps: inump = inusermaps[(fdw, srv, usr)] # does it exist in the database? if (fdw, srv, usr) in self: stmts.append(self[(fdw, srv, usr)].diff_map(inump)) else: # check for possible RENAME if hasattr(inump, 'oldname'): oldname = inump.oldname try: stmts.append(self[(fdw, srv, oldname)].rename( inump.name)) del self[(fdw, srv, oldname)] except __HOLE__ as exc: exc.args = ("Previous name '%s' for user mapping '%s' " "not found" % (oldname, inump.name), ) raise else: # create new user mapping stmts.append(inump.create()) # check database user mappings for (fdw, srv, usr) in self: # if missing, drop it if (fdw, srv, usr) not in inusermaps: stmts.append(self[(fdw, srv, usr)].drop()) return stmts
KeyError
dataset/ETHPy150Open perseas/Pyrseas/pyrseas/dbobject/foreign.py/UserMappingDict.diff_map
def from_map(self, schema, inobjs, newdb): """Initalize the dictionary of tables by converting the input map :param schema: schema owning the tables :param inobjs: YAML map defining the schema objects :param newdb: collection of dictionaries defining the database """ for key in inobjs: if not key.startswith('foreign table '): raise KeyError("Unrecognized object type: %s" % key) ftb = key[14:] self[(schema.name, ftb)] = ftable = ForeignTable( schema=schema.name, name=ftb) inftable = inobjs[key] if not inftable: raise ValueError("Foreign table '%s' has no specification" % ftb) try: newdb.columns.from_map(ftable, inftable['columns']) except __HOLE__ as exc: exc.args = ("Foreign table '%s' has no columns" % ftb, ) raise for attr in ['server', 'options', 'owner', 'description']: if attr in inftable: setattr(ftable, attr, inftable[attr]) if 'privileges' in inftable: ftable.privileges = privileges_from_map( inftable['privileges'], ftable.allprivs, ftable.owner)
KeyError
dataset/ETHPy150Open perseas/Pyrseas/pyrseas/dbobject/foreign.py/ForeignTableDict.from_map
def diff_map(self, intables): """Generate SQL to transform existing foreign tables :param intables: a YAML map defining the new foreign tables :return: list of SQL statements Compares the existing foreign table definitions, as fetched from the catalogs, to the input map and generates SQL statements to transform the foreign tables accordingly. """ stmts = [] # check input tables for (sch, tbl) in intables: intbl = intables[(sch, tbl)] # does it exist in the database? if (sch, tbl) not in self: # check for possible RENAME if hasattr(intbl, 'oldname'): oldname = intbl.oldname try: stmts.append(self[(sch, oldname)].rename(intbl.name)) del self[(sch, oldname)] except __HOLE__ as exc: exc.args = ("Previous name '%s' for foreign table " "'%s' not found" % (oldname, intbl.name), ) raise else: # create new table stmts.append(intbl.create()) # check database tables for (sch, tbl) in self: table = self[(sch, tbl)] # if missing, drop it if (sch, tbl) not in intables: stmts.append(table.drop()) else: # compare table objects stmts.append(table.diff_map(intables[(sch, tbl)])) return stmts
KeyError
dataset/ETHPy150Open perseas/Pyrseas/pyrseas/dbobject/foreign.py/ForeignTableDict.diff_map
def stepAxis(self, op, p, sourceSequence): targetSequence = [] for node in sourceSequence: if not isinstance(node,(ModelObject, etree._ElementTree, ModelAttribute)): raise XPathException(self.progHeader, 'err:XPTY0020', _('Axis step {0} context item is not a node: {1}').format(op, node)) targetNodes = [] if isinstance(p,QNameDef): ns = p.namespaceURI; localname = p.localName; axis = p.axis if p.isAttribute: if isinstance(node,ModelObject): attrTag = p.localName if p.unprefixed else p.clarkNotation modelAttribute = None try: modelAttribute = node.xAttributes[attrTag] except (AttributeError, TypeError, IndexError, KeyError): # may be lax or deferred validated try: xmlValidate(node.modelXbrl, node, p) modelAttribute = node.xAttributes[attrTag] except (AttributeError, __HOLE__, IndexError, KeyError): pass if modelAttribute is None: value = node.get(attrTag) if value is not None: targetNodes.append(ModelAttribute(node,p.clarkNotation,UNKNOWN,value,value,value)) elif modelAttribute.xValid >= VALID: targetNodes.append(modelAttribute) elif op == '/' or op is None: if axis is None or axis == "child": if isinstance(node,(ModelObject, etree._ElementTree)): targetNodes = XmlUtil.children(node, ns, localname) elif axis == "parent": if isinstance(node,ModelAttribute): parentNode = [ node.modelElement ] else: parentNode = [ XmlUtil.parent(node) ] if (isinstance(node,ModelObject) and (not ns or ns == parentNode.namespaceURI or ns == "*") and (localname == parentNode.localName or localname == "*")): targetNodes = [ parentNode ] elif axis == "self": if (isinstance(node,ModelObject) and (not ns or ns == node.namespaceURI or ns == "*") and (localname == node.localName or localname == "*")): targetNodes = [ node ] elif axis.startswith("descendant"): if isinstance(node,(ModelObject, etree._ElementTree)): targetNodes = XmlUtil.descendants(node, ns, localname) if (axis.endswith("-or-self") and isinstance(node,ModelObject) and (not ns or ns == node.namespaceURI or ns == "*") and (localname == node.localName or localname == "*")): targetNodes.append(node) elif axis.startswith("ancestor"): if isinstance(node,ModelObject): targetNodes = [ancestor for ancestor in XmlUtil.ancestors(node) if ((not ns or ns == ancestor.namespaceURI or ns == "*") and (localname == ancestor.localName or localname == "*"))] if (axis.endswith("-or-self") and isinstance(node,ModelObject) and (not ns or ns == node.namespaceURI or ns == "*") and (localname == node.localName or localname == "*")): targetNodes.insert(0, node) elif axis.endswith("-sibling"): if isinstance(node,ModelObject): targetNodes = [sibling for sibling in node.itersiblings(preceding=axis.startswith("preceding")) if ((not ns or ns == sibling.namespaceURI or ns == "*") and (localname == sibling.localName or localname == "*"))] elif axis == "preceding": if isinstance(node,ModelObject): for preceding in node.getroottree().iter(): if preceding == node: break elif ((not ns or ns == preceding.namespaceURI or ns == "*") and (localname == preceding.localName or localname == "*")): targetNodes.append(preceding) elif axis == "following": if isinstance(node,ModelObject): foundNode = False for following in node.getroottree().iter(): if following == node: foundNode = True elif (foundNode and (not ns or ns == following.namespaceURI or ns == "*") and (localname == following.localName or localname == "*")): targetNodes.append(following) elif op == '//': if isinstance(node,(ModelObject, etree. _ElementTree)): targetNodes = XmlUtil.descendants(node, ns, localname) elif op == '..': if isinstance(node,ModelAttribute): targetNodes = [ node.modelElement ] else: targetNodes = [ XmlUtil.parent(node) ] elif isinstance(p, OperationDef) and isinstance(p.name,QNameDef): if isinstance(node,ModelObject): if p.name.localName == "text": # note this is not string value, just child text targetNodes = [node.textValue] # todo: add element, attribute, node, etc... elif p == '*': # wildcard if op == '/' or op is None: if isinstance(node,(ModelObject, etree._ElementTree)): targetNodes = XmlUtil.children(node, '*', '*') elif op == '//': if isinstance(node,(ModelObject, etree._ElementTree)): targetNodes = XmlUtil.descendants(node, '*', '*') targetSequence.extend(targetNodes) return targetSequence
TypeError
dataset/ETHPy150Open Arelle/Arelle/arelle/XPathContext.py/XPathContext.stepAxis
def atomize(self, p, x): # sequence if isinstance(x, SEQUENCE_TYPES): sequence = [] for item in self.flattenSequence(x): atomizedItem = self.atomize(p, item) if atomizedItem != []: sequence.append(atomizedItem) return sequence # individual items if isinstance(x, _RANGE): return x baseXsdType = None e = None if isinstance(x, ModelFact): if x.isTuple: raise XPathException(p, 'err:FOTY0012', _('Atomizing tuple {0} that does not have a typed value').format(x)) if x.isNil: return [] baseXsdType = x.concept.baseXsdType v = x.value # resolves default value e = x elif isinstance(x, ModelAttribute): # ModelAttribute is a tuple (below), check this first! return x.xValue else: if isinstance(x, ModelObject): e = x if e is not None: if getattr(e, "xValid", 0) == VALID_NO_CONTENT: raise XPathException(p, 'err:FOTY0012', _('Atomizing element {0} that does not have a typed value').format(x)) if e.get("{http://www.w3.org/2001/XMLSchema-instance}nil") == "true": return [] try: if e.xValid >= VALID: return e.xValue except AttributeError: pass modelXbrl = x.modelXbrl modelConcept = modelXbrl.qnameConcepts.get(qname(x)) if modelConcept is not None: baseXsdType = modelConcept.baseXsdType v = x.stringValue if baseXsdType in ("float", "double"): try: x = float(v) except ValueError: raise XPathException(p, 'err:FORG0001', _('Atomizing {0} to a {1} does not have a proper value').format(x,baseXsdType)) elif baseXsdType == "decimal": try: x = Decimal(v) except InvalidOperation: raise XPathException(p, 'err:FORG0001', _('Atomizing {0} to decimal does not have a proper value')) elif baseXsdType in ("integer", "nonPositiveInteger","negativeInteger","nonNegativeInteger","positiveInteger", "long","unsignedLong", "int","unsignedInt", "short","unsignedShort", "byte","unsignedByte"): try: x = _INT(v) except __HOLE__: raise XPathException(p, 'err:FORG0001', _('Atomizing {0} to an integer does not have a proper value').format(x)) elif baseXsdType == "boolean": x = (v == "true" or v == "1") elif baseXsdType == "QName" and e is not None: x = qname(e, v) elif baseXsdType == "anyURI": x = anyURI(v.strip()) elif baseXsdType in ("normalizedString","token","language","NMTOKEN","Name","NCName","ID","IDREF","ENTITY"): x = v.strip() elif baseXsdType == "XBRLI_DATEUNION": x = dateTime(v, type=DATEUNION) elif baseXsdType == "date": x = dateTime(v, type=DATE) elif baseXsdType == "dateTime": x = dateTime(v, type=DATETIME) elif baseXsdType == "noContent": x = None # can't be atomized elif baseXsdType: x = str(v) return x
ValueError
dataset/ETHPy150Open Arelle/Arelle/arelle/XPathContext.py/XPathContext.atomize
def __init__(self, filename): super(JsonStore, self).__init__() self.filename = filename self.data = {} if exists(filename): try: with io.open(filename, encoding='utf-8') as fd: self.data = json.load(fd) except __HOLE__: print("Unable to read the state.db, content will be replaced.")
ValueError
dataset/ETHPy150Open kivy/kivy-ios/toolchain.py/JsonStore.__init__
def build_recipes(names, ctx): # gather all the dependencies print("Want to build {}".format(names)) graph = Graph() recipe_to_load = names recipe_loaded = [] while names: name = recipe_to_load.pop(0) if name in recipe_loaded: continue try: recipe = Recipe.get_recipe(name, ctx) except __HOLE__: print("ERROR: No recipe named {}".format(name)) sys.exit(1) graph.add(name, name) print("Loaded recipe {} (depends of {}, optional are {})".format(name, recipe.depends, recipe.optional_depends)) for depend in recipe.depends: graph.add(name, depend) recipe_to_load += recipe.depends for depend in recipe.optional_depends: # in case of compilation after the initial one, take in account # of the already compiled recipes key = "{}.build_all".format(depend) if key in ctx.state: recipe_to_load.append(name) graph.add(name, depend) else: graph.add_optional(name, depend) recipe_loaded.append(name) build_order = list(graph.find_order()) print("Build order is {}".format(build_order)) recipes = [Recipe.get_recipe(name, ctx) for name in build_order] for recipe in recipes: recipe.init_with_ctx(ctx) for recipe in recipes: recipe.execute()
ImportError
dataset/ETHPy150Open kivy/kivy-ios/toolchain.py/build_recipes
def GetUserAppAndServe(self, script, env, start_response): """Dispatch a WSGI request to <script>.""" try: app, mod_file = self.GetUserApp(script) except __HOLE__: logging.exception('Failed to import %s', script) start_response('500 Internal Server Error', [], sys.exc_info()) return ['<h1>500 Internal Server Error</h1>\n'] except ValueError: logging.exception('Invalid runtime.') start_response('500 Internal Server Error', [], sys.exc_info()) return ['<h1>500 Internal Server Error</h1>\n'] return self.ServeApp(app, mod_file, env, start_response)
ImportError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/ext/vmruntime/meta_app.py/MetaWSGIApp.GetUserAppAndServe
def ServeApp(self, app, mod_file, env, start_response): """(Further) wrap the provided WSGI app and dispatch a request to it.""" for key in ENV_KEYS: if key in os.environ: del os.environ[key] for key in ENV_KEYS: assert key not in os.environ assert os.getenv(key) is None for key in self.user_env_variables: if key not in os.environ: os.environ[key] = self.user_env_variables[key] os.environ['AUTH_DOMAIN'] = 'gmail.com' os.environ['USER_IS_ADMIN'] = '0' for key in ENV_KEYS: value = env.get('HTTP_X_APPENGINE_' + key) if value: os.environ[key] = value elif key not in os.environ: os.environ[key] = '' user_ip = os.environ.get(WSGI_REMOTE_ADDR_ENV_KEY) if not user_ip: user_ip = env.get(X_APPENGINE_USER_IP_ENV_KEY) if user_ip: env[WSGI_REMOTE_ADDR_ENV_KEY] = user_ip os.environ[WSGI_REMOTE_ADDR_ENV_KEY] = user_ip os.environ['PATH_TRANSLATED'] = mod_file try: import appengine_config add_middleware = appengine_config.webapp_add_wsgi_middleware except (ImportError, __HOLE__): pass else: try: app = add_middleware(app) except Exception: logging.exception('Failure adding WSGI middleware') return app(env, start_response)
AttributeError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/ext/vmruntime/meta_app.py/MetaWSGIApp.ServeApp
def ServeStaticFile(self, matcher, appinfo_external, unused_env, start_response): """Serve a static file.""" static_files = appinfo_external.static_files static_dir = appinfo_external.static_dir if static_files: filename = matcher.expand(static_files) elif static_dir: x = matcher.end() path = matcher.string static_dir = static_dir.lstrip('/') filename = static_dir + path[x:] filename = os.path.abspath(filename) pwd = os.getcwd() if not filename.startswith(os.path.join(pwd, '')): logging.warn('Requested bad filename %r', filename) start_response('404 Not Found', []) return try: fp = open(filename, 'rb') except __HOLE__: logging.warn('Requested non-existent filename %r', filename) start_response('404 Not Found', []) return try: encoding = None mime_type = appinfo_external.mime_type if not mime_type: mime_type, encoding = mimetypes.guess_type(filename) headers = [] if mime_type: headers.append(('Content-Type', mime_type)) if encoding: headers.append(('Content-Encoding', encoding)) start_response('200 OK', headers) while True: data = fp.read(8192) if not data: break yield data finally: fp.close()
IOError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/ext/vmruntime/meta_app.py/MetaWSGIApp.ServeStaticFile
@classmethod def execute(cls, fst_model, input, is_file=False, nbest=None): logger = logging.getLogger(__name__) cmd = ['phonetisaurus-g2p', '--model=%s' % fst_model, '--input=%s' % input, '--words'] if is_file: cmd.append('--isfile') if nbest is not None: cmd.extend(['--nbest=%d' % nbest]) cmd = [str(x) for x in cmd] try: # FIXME: We can't just use subprocess.call and redirect stdout # and stderr, because it looks like Phonetisaurus can't open # an already opened file descriptor a second time. This is why # we have to use this somehow hacky subprocess.Popen approach. proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdoutdata, stderrdata = proc.communicate() except __HOLE__: logger.error("Error occured while executing command '%s'", ' '.join(cmd), exc_info=True) raise if stderrdata: for line in stderrdata.splitlines(): message = line.strip() if message: logger.debug(message) if proc.returncode != 0: logger.error("Command '%s' return with exit status %d", ' '.join(cmd), proc.returncode) raise OSError("Command execution failed") result = {} if stdoutdata is not None: for word, precision, pronounc in cls.PATTERN.findall(stdoutdata): if word not in result: result[word] = [] result[word].append(pronounc) return result
OSError
dataset/ETHPy150Open jasperproject/jasper-client/client/g2p.py/PhonetisaurusG2P.execute
def _http_request(self, method, uri, headers=None, body_parts=None): """Makes an HTTP request using httplib. Args: method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc. uri: str or atom.http_core.Uri headers: dict of strings mapping to strings which will be sent as HTTP headers in the request. body_parts: list of strings, objects with a read method, or objects which can be converted to strings using str. Each of these will be sent in order as the body of the HTTP request. """ if isinstance(uri, (str, unicode)): uri = Uri.parse_uri(uri) connection = self._get_connection(uri, headers=headers) if self.debug: connection.debuglevel = 1 if connection.host != uri.host: connection.putrequest(method, str(uri)) else: connection.putrequest(method, uri._get_relative_path()) # Overcome a bug in Python 2.4 and 2.5 # httplib.HTTPConnection.putrequest adding # HTTP request header 'Host: www.google.com:443' instead of # 'Host: www.google.com', and thus resulting the error message # 'Token invalid - AuthSub token has wrong scope' in the HTTP response. if (uri.scheme == 'https' and int(uri.port or 443) == 443 and hasattr(connection, '_buffer') and isinstance(connection._buffer, list)): header_line = 'Host: %s:443' % uri.host replacement_header_line = 'Host: %s' % uri.host try: connection._buffer[connection._buffer.index(header_line)] = ( replacement_header_line) except __HOLE__: # header_line missing from connection._buffer pass # Send the HTTP headers. for header_name, value in headers.iteritems(): connection.putheader(header_name, value) connection.endheaders() # If there is data, send it in the request. if body_parts: for part in body_parts: _send_data_part(part, connection) # Return the HTTP Response from the server. return connection.getresponse()
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/atom/http_core.py/HttpClient._http_request
def parseXML(cls, xmlStr, storeXML=False): """Convert an XML string into a nice instance tree of XMLNodes. xmlStr -- the XML to parse storeXML -- if True, stores the XML string in the root XMLNode.xml """ def __parseXMLElement(element, thisNode): """Recursive call to process this XMLNode.""" thisNode.elementName = element.nodeName #print element.nodeName # add element attributes as attributes to this node for i in range(element.attributes.length): an = element.attributes.item(i) thisNode[an.name] = an.nodeValue for a in element.childNodes: if a.nodeType == xml.dom.Node.ELEMENT_NODE: child = XMLNode() try: list = getattr(thisNode, a.nodeName) except __HOLE__: setattr(thisNode, a.nodeName, []) # add the child node as an attrib to this node list = getattr(thisNode, a.nodeName); #print "appending child: %s to %s" % (a.nodeName, thisNode.elementName) list.append(child); __parseXMLElement(a, child) elif a.nodeType == xml.dom.Node.TEXT_NODE: thisNode.elementText += a.nodeValue return thisNode dom = xml.dom.minidom.parseString(xmlStr) # get the root rootNode = XMLNode() if storeXML: rootNode.xml = xmlStr return __parseXMLElement(dom.firstChild, rootNode)
AttributeError
dataset/ETHPy150Open sightmachine/SimpleCV/SimpleCV/MachineLearning/query_imgs/flickrapi2.py/XMLNode.parseXML
def __getCachedToken(self): """Read and return a cached token, or None if not found. The token is read from the cached token file, which is basically the entire RSP response containing the auth element. """ try: f = file(self.__getCachedTokenFilename(), "r") data = f.read() f.close() rsp = XMLNode.parseXML(data) return rsp.auth[0].token[0].elementText except __HOLE__: return None #-----------------------------------------------------------------------
IOError
dataset/ETHPy150Open sightmachine/SimpleCV/SimpleCV/MachineLearning/query_imgs/flickrapi2.py/FlickrAPI.__getCachedToken
@register.tag def forum_time(parser, token): try: tag, time = token.split_contents() except __HOLE__: raise template.TemplateSyntaxError('forum_time requires single argument') else: return ForumTimeNode(time)
ValueError
dataset/ETHPy150Open slav0nic/djangobb/djangobb_forum/templatetags/forum_extras.py/forum_time
def was_modified_since(header=None, mtime=0, size=0): """ Was something modified since the user last downloaded it? header This is the value of the If-Modified-Since header. If this is None, I'll just return True. mtime This is the modification time of the item we're talking about. size This is the size of the item we're talking about. """ try: if header is None: raise ValueError matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header, re.IGNORECASE) header_mtime = parse_http_date(matches.group(1)) header_len = matches.group(3) if header_len and int(header_len) != size: raise ValueError if int(mtime) > header_mtime: raise ValueError except (__HOLE__, ValueError, OverflowError): return True return False
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/static.py/was_modified_since
def get_rdata_class(rdclass, rdtype): def import_module(name): mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod mod = _rdata_modules.get((rdclass, rdtype)) rdclass_text = dns.rdataclass.to_text(rdclass) rdtype_text = dns.rdatatype.to_text(rdtype) rdtype_text = rdtype_text.replace('-', '_') if not mod: mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype)) if not mod: try: mod = import_module('.'.join([_module_prefix, rdclass_text, rdtype_text])) _rdata_modules[(rdclass, rdtype)] = mod except ImportError: try: mod = import_module('.'.join([_module_prefix, 'ANY', rdtype_text])) _rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod except __HOLE__: mod = None if mod: cls = getattr(mod, rdtype_text) else: cls = GenericRdata return cls
ImportError
dataset/ETHPy150Open catap/namebench/nb_third_party/dns/rdata.py/get_rdata_class
def try_printout(data, out, opts): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: return get_printout(out, opts)(data).rstrip() except (KeyError, __HOLE__): log.debug(traceback.format_exc()) try: return get_printout('nested', opts)(data).rstrip() except (KeyError, AttributeError): log.error('Nested output failed: ', exc_info=True) return get_printout('raw', opts)(data).rstrip()
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/output/__init__.py/try_printout
def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except __HOLE__: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter)
KeyError
dataset/ETHPy150Open saltstack/salt/salt/output/__init__.py/update_progress
def display_output(data, out=None, opts=None): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts) output_filename = opts.get('output_file', None) log.trace('data = {0}'.format(data)) try: # output filename can be either '' or None if output_filename: with salt.utils.fopen(output_filename, 'a') as ofh: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (__HOLE__, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass ofh.write(fdata) ofh.write('\n') return if display_data: print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc
UnicodeDecodeError
dataset/ETHPy150Open saltstack/salt/salt/output/__init__.py/display_output
def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts: # new --out option out = opts['output'] if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except __HOLE__: fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.is_windows(): opts['color'] = False else: opts['color'] = True outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter {0} specified, fall back to nested'.format(out)) return outputters['nested'] return outputters[out]
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/output/__init__.py/get_printout
def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ testMethod = getattr(self, self._testMethodName) skipped = (getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False)) if not skipped: try: self._pre_setup() except (KeyboardInterrupt, __HOLE__): raise except Exception: result.addError(self, sys.exc_info()) return super(SimpleTestCase, self).__call__(result) if not skipped: try: self._post_teardown() except (KeyboardInterrupt, SystemExit): raise except Exception: result.addError(self, sys.exc_info()) return
SystemExit
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/test/testcases.py/SimpleTestCase.__call__
def assertJSONEqual(self, raw, expected_data, msg=None): try: data = json.loads(raw) except ValueError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, six.string_types): try: expected_data = json.loads(expected_data) except __HOLE__: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/test/testcases.py/SimpleTestCase.assertJSONEqual
def UpdateProjectList(self): new_project_paths = [] for project in self.manifest.projects.values(): if project.relpath: new_project_paths.append(project.relpath) file_name = 'project.list' file_path = os.path.join(self.manifest.repodir, file_name) old_project_paths = [] if os.path.exists(file_path): fd = open(file_path, 'r') try: old_project_paths = fd.read().split('\n') finally: fd.close() for path in old_project_paths: if not path: continue if path not in new_project_paths: """If the path has already been deleted, we don't need to do it """ if os.path.exists(self.manifest.topdir + '/' + path): project = Project( manifest = self.manifest, name = path, remote = RemoteSpec('origin'), gitdir = os.path.join(self.manifest.topdir, path, '.git'), worktree = os.path.join(self.manifest.topdir, path), relpath = path, revisionExpr = 'HEAD', revisionId = None) if project.IsDirty(): print >>sys.stderr, 'error: Cannot remove project "%s": \ uncommitted changes are present' % project.relpath print >>sys.stderr, ' commit changes, then run sync again' return -1 else: print >>sys.stderr, 'Deleting obsolete path %s' % project.worktree shutil.rmtree(project.worktree) # Try deleting parent subdirs if they are empty dir = os.path.dirname(project.worktree) while dir != self.manifest.topdir: try: os.rmdir(dir) except __HOLE__: break dir = os.path.dirname(dir) new_project_paths.sort() fd = open(file_path, 'w') try: fd.write('\n'.join(new_project_paths)) fd.write('\n') finally: fd.close() return 0
OSError
dataset/ETHPy150Open android/tools_repo/subcmds/sync.py/Sync.UpdateProjectList
def Execute(self, opt, args): if opt.jobs: self.jobs = opt.jobs if opt.network_only and opt.detach_head: print >>sys.stderr, 'error: cannot combine -n and -d' sys.exit(1) if opt.network_only and opt.local_only: print >>sys.stderr, 'error: cannot combine -n and -l' sys.exit(1) if opt.smart_sync: if not self.manifest.manifest_server: print >>sys.stderr, \ 'error: cannot smart sync: no manifest server defined in manifest' sys.exit(1) try: server = xmlrpclib.Server(self.manifest.manifest_server) p = self.manifest.manifestProject b = p.GetBranch(p.CurrentBranch) branch = b.merge if branch.startswith(R_HEADS): branch = branch[len(R_HEADS):] env = os.environ.copy() if (env.has_key('TARGET_PRODUCT') and env.has_key('TARGET_BUILD_VARIANT')): target = '%s-%s' % (env['TARGET_PRODUCT'], env['TARGET_BUILD_VARIANT']) [success, manifest_str] = server.GetApprovedManifest(branch, target) else: [success, manifest_str] = server.GetApprovedManifest(branch) if success: manifest_name = "smart_sync_override.xml" manifest_path = os.path.join(self.manifest.manifestProject.worktree, manifest_name) try: f = open(manifest_path, 'w') try: f.write(manifest_str) finally: f.close() except __HOLE__: print >>sys.stderr, 'error: cannot write manifest to %s' % \ manifest_path sys.exit(1) self.manifest.Override(manifest_name) else: print >>sys.stderr, 'error: %s' % manifest_str sys.exit(1) except socket.error: print >>sys.stderr, 'error: cannot connect to manifest server %s' % ( self.manifest.manifest_server) sys.exit(1) rp = self.manifest.repoProject rp.PreSync() mp = self.manifest.manifestProject mp.PreSync() if opt.repo_upgraded: _PostRepoUpgrade(self.manifest) if not opt.local_only: mp.Sync_NetworkHalf(quiet=opt.quiet) if mp.HasChanges: syncbuf = SyncBuffer(mp.config) mp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) self.manifest._Unload() all = self.GetProjects(args, missing_ok=True) if not opt.local_only: to_fetch = [] now = time.time() if (24 * 60 * 60) <= (now - rp.LastFetch): to_fetch.append(rp) to_fetch.extend(all) fetched = self._Fetch(to_fetch, opt) _PostRepoFetch(rp, opt.no_repo_verify) if opt.network_only: # bail out now; the rest touches the working tree return if mp.HasChanges: syncbuf = SyncBuffer(mp.config) mp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) _ReloadManifest(self) mp = self.manifest.manifestProject all = self.GetProjects(args, missing_ok=True) missing = [] for project in all: if project.gitdir not in fetched: missing.append(project) self._Fetch(missing, opt) if self.manifest.IsMirror: # bail out now, we have no working tree return if self.UpdateProjectList(): sys.exit(1) syncbuf = SyncBuffer(mp.config, detach_head = opt.detach_head) pm = Progress('Syncing work tree', len(all)) for project in all: pm.update() if project.worktree: project.Sync_LocalHalf(syncbuf) pm.end() print >>sys.stderr if not syncbuf.Finish(): sys.exit(1)
IOError
dataset/ETHPy150Open android/tools_repo/subcmds/sync.py/Sync.Execute
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): # we are called internally, so short-circuit if fastpath: # data is an ndarray, index is defined if not isinstance(data, SingleBlockManager): data = SingleBlockManager(data, index, fastpath=True) if copy: data = data.copy() if index is None: index = data.index else: if index is not None: index = _ensure_index(index) if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, MultiIndex): raise NotImplementedError("initializing a Series from a " "MultiIndex is not supported") elif isinstance(data, Index): # need to copy to avoid aliasing issues if name is None: name = data.name data = data._to_embed(keep_tz=True) copy = True elif isinstance(data, np.ndarray): pass elif isinstance(data, Series): if name is None: name = data.name if index is None: index = data.index else: data = data.reindex(index, copy=copy) data = data._data elif isinstance(data, dict): if index is None: if isinstance(data, OrderedDict): index = Index(data) else: index = Index(_try_sort(data)) try: if isinstance(index, DatetimeIndex): if len(data): # coerce back to datetime objects for lookup data = _dict_compat(data) data = lib.fast_multiget(data, index.astype('O'), default=np.nan) else: data = np.nan # GH #12169 elif isinstance(index, (PeriodIndex, TimedeltaIndex)): data = ([data.get(i, nan) for i in index] if data else np.nan) else: data = lib.fast_multiget(data, index.values, default=np.nan) except __HOLE__: data = ([data.get(i, nan) for i in index] if data else np.nan) elif isinstance(data, SingleBlockManager): if index is None: index = data.index else: data = data.reindex(index, copy=copy) elif isinstance(data, Categorical): # GH12574: Allow dtype=category only, otherwise error if ((dtype is not None) and not is_categorical_dtype(dtype)): raise ValueError("cannot specify a dtype with a " "Categorical unless " "dtype='category'") elif (isinstance(data, types.GeneratorType) or (compat.PY3 and isinstance(data, map))): data = list(data) elif isinstance(data, (set, frozenset)): raise TypeError("{0!r} type is unordered" "".format(data.__class__.__name__)) else: # handle sparse passed here (and force conversion) if isinstance(data, ABCSparseArray): data = data.to_dense() if index is None: if not is_list_like(data): data = [data] index = _default_index(len(data)) # create/copy the manager if isinstance(data, SingleBlockManager): if dtype is not None: data = data.astype(dtype=dtype, raise_on_error=False) elif copy: data = data.copy() else: data = _sanitize_array(data, index, dtype, copy, raise_cast_failure=True) data = SingleBlockManager(data, index, fastpath=True) generic.NDFrame.__init__(self, data, fastpath=True) self.name = name self._set_axis(0, index, fastpath=True)
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series.__init__
def _ixs(self, i, axis=0): """ Return the i-th value or values in the Series by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence) """ try: # dispatch to the values if we need values = self._values if isinstance(values, np.ndarray): return _index.get_value_at(values, i) else: return values[i] except __HOLE__: raise except: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) else: label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis, convert=True) else: return _index.get_value_at(self, i)
IndexError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series._ixs
def __getitem__(self, key): try: result = self.index.get_value(self, key) if not lib.isscalar(result): if is_list_like(result) and not isinstance(result, Series): # we need to box if we have a non-unique index here # otherwise have inline ndarray/lists if not self.index.is_unique: result = self._constructor( result, index=[key] * len(result), dtype=self.dtype).__finalize__(self) return result except InvalidIndexError: pass except (__HOLE__, ValueError): if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # kludge pass elif key is Ellipsis: return self elif is_bool_indexer(key): pass else: # we can try to coerce the indexer (or this will raise) new_key = self.index._convert_scalar_indexer(key, kind='getitem') if type(new_key) != type(key): return self.__getitem__(new_key) raise except Exception: raise if com.is_iterator(key): key = list(key) if is_bool_indexer(key): key = check_bool_indexer(self.index, key) return self._get_with(key)
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series.__getitem__
def __setitem__(self, key, value): def setitem(key, value): try: self._set_with_engine(key, value) return except (SettingWithCopyError): raise except (__HOLE__, ValueError): values = self._values if (com.is_integer(key) and not self.index.inferred_type == 'integer'): values[key] = value return elif key is Ellipsis: self[:] = value return elif is_bool_indexer(key): pass elif com.is_timedelta64_dtype(self.dtype): # reassign a null value to iNaT if isnull(value): value = tslib.iNaT try: self.index._engine.set_value(self._values, key, value) return except TypeError: pass self.loc[key] = value return except TypeError as e: if (isinstance(key, tuple) and not isinstance(self.index, MultiIndex)): raise ValueError("Can only tuple-index with a MultiIndex") # python 3 type errors should be raised if 'unorderable' in str(e): # pragma: no cover raise IndexError(key) if is_bool_indexer(key): key = check_bool_indexer(self.index, key) try: self.where(~key, value, inplace=True) return except InvalidIndexError: pass self._set_with(key, value) # do the setitem cacher_needs_updating = self._check_is_chained_assignment_possible() setitem(key, value) if cacher_needs_updating: self._maybe_update_cacher()
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series.__setitem__
def _set_with_engine(self, key, value): values = self._values try: self.index._engine.set_value(values, key, value) return except __HOLE__: values[self.index.get_loc(key)] = value return
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series._set_with_engine
def set_value(self, label, value, takeable=False): """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index Parameters ---------- label : object Partial indexing with MultiIndex not allowed value : object Scalar value takeable : interpret the index as indexers, default False Returns ------- series : Series If label is contained, will be reference to calling Series, otherwise a new object """ try: if takeable: self._values[label] = value else: self.index._engine.set_value(self._values, label, value) return self except __HOLE__: # set using a non-recursive method self.loc[label] = value return self
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series.set_value
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, index=True, length=False, dtype=False, name=False, max_rows=None): """ Render a string representation of the Series Parameters ---------- buf : StringIO-like, optional buffer to write to na_rep : string, optional string representation of NAN to use, default 'NaN' float_format : one-parameter function, optional formatter function to apply to columns' elements if they are floats default None header: boolean, default True Add the Series header (index name) index : bool, optional Add index (row) labels, default True length : boolean, default False Add the Series length dtype : boolean, default False Add the Series dtype name : boolean, default False Add the Series name if not None max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- formatted : string (if not buffer passed) """ the_repr = self._get_repr(float_format=float_format, na_rep=na_rep, header=header, index=index, length=length, dtype=dtype, name=name, max_rows=max_rows) # catch contract violations if not isinstance(the_repr, compat.text_type): raise AssertionError("result must be of type unicode, type" " of result is {0!r}" "".format(the_repr.__class__.__name__)) if buf is None: return the_repr else: try: buf.write(the_repr) except __HOLE__: with open(buf, 'w') as f: f.write(the_repr)
AttributeError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series.to_string
@Appender(generic._shared_docs['sort_values'] % _shared_doc_kwargs) def sort_values(self, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): axis = self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError("This Series is a view of some other array, to " "sort in-place you must create a copy") def _try_kind_sort(arr): # easier to ask forgiveness than permission try: # if kind==mergesort, it can fail for object dtype return arr.argsort(kind=kind) except __HOLE__: # stable sort not available for object dtype # uses the argsort default quicksort return arr.argsort(kind='quicksort') arr = self._values sortedIdx = np.empty(len(self), dtype=np.int32) bad = isnull(arr) good = ~bad idx = _default_index(len(self)) argsorted = _try_kind_sort(arr[good]) if not ascending: argsorted = argsorted[::-1] if na_position == 'last': n = good.sum() sortedIdx[:n] = idx[good][argsorted] sortedIdx[n:] = idx[bad] elif na_position == 'first': n = bad.sum() sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] else: raise ValueError('invalid na_position: {!r}'.format(na_position)) result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx]) if inplace: self._update_inplace(result) else: return result.__finalize__(self)
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series.sort_values
def _dir_additions(self): rv = set() for accessor in self._accessors: try: getattr(self, accessor) rv.add(accessor) except __HOLE__: pass return rv
AttributeError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/Series._dir_additions
def _sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): """ sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified """ if dtype is not None: dtype = _coerce_to_dtype(dtype) if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): data, fill_value = _maybe_upcast(data, copy=True) data[mask] = fill_value else: data = data.copy() def _try_cast(arr, take_fast_path): # perf shortcut as this is the most common case if take_fast_path: if _possibly_castable(arr) and not copy and dtype is None: return arr try: subarr = _possibly_cast_to_datetime(arr, dtype) if not is_extension_type(subarr): subarr = np.array(subarr, dtype=dtype, copy=copy) except (__HOLE__, TypeError): if is_categorical_dtype(dtype): subarr = Categorical(arr) elif dtype is not None and raise_cast_failure: raise else: subarr = np.array(arr, dtype=object, copy=copy) return subarr # GH #846 if isinstance(data, (np.ndarray, Index, Series)): if dtype is not None: subarr = np.array(data, copy=False) # possibility of nan -> garbage if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype): if not isnull(data).any(): subarr = _try_cast(data, True) elif copy: subarr = data.copy() else: subarr = _try_cast(data, True) elif isinstance(data, Index): # don't coerce Index types # e.g. indexes can have different conversions (so don't fast path # them) # GH 6140 subarr = _sanitize_index(data, index, copy=True) else: subarr = _try_cast(data, True) if copy: subarr = data.copy() elif isinstance(data, Categorical): subarr = data if copy: subarr = data.copy() return subarr elif isinstance(data, list) and len(data) > 0: if dtype is not None: try: subarr = _try_cast(data, False) except Exception: if raise_cast_failure: # pragma: no cover raise subarr = np.array(data, dtype=object, copy=copy) subarr = lib.maybe_convert_objects(subarr) else: subarr = _possibly_convert_platform(data) subarr = _possibly_cast_to_datetime(subarr, dtype) else: subarr = _try_cast(data, False) def create_from_value(value, index, dtype): # return a new empty value suitable for the dtype if is_datetimetz(dtype): subarr = DatetimeIndex([value] * len(index), dtype=dtype) elif is_categorical_dtype(dtype): subarr = Categorical([value] * len(index)) else: if not isinstance(dtype, (np.dtype, type(np.dtype))): dtype = dtype.dtype subarr = np.empty(len(index), dtype=dtype) subarr.fill(value) return subarr # scalar like if subarr.ndim == 0: if isinstance(data, list): # pragma: no cover subarr = np.array(data, dtype=object) elif index is not None: value = data # figure out the dtype from the value (upcast if necessary) if dtype is None: dtype, value = _infer_dtype_from_scalar(value) else: # need to possibly convert the value here value = _possibly_cast_to_datetime(value, dtype) subarr = create_from_value(value, index, dtype) else: return subarr.item() # the result that we want elif subarr.ndim == 1: if index is not None: # a 1-element ndarray if len(subarr) != len(index) and len(subarr) == 1: subarr = create_from_value(subarr[0], index, subarr.dtype) elif subarr.ndim > 1: if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: subarr = _asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. if issubclass(subarr.dtype.type, compat.string_types): subarr = np.array(data, dtype=object, copy=copy) return subarr # backwards compatiblity
ValueError
dataset/ETHPy150Open pydata/pandas/pandas/core/series.py/_sanitize_array
@util.cached_property def time_sent(self): try: timestamp_text = _timestamp_xpb.one_(self._message_element) except __HOLE__: timestamp_text = _em_timestamp_xpb.one_(self._message_element) return helpers.parse_date_updated(timestamp_text)
IndexError
dataset/ETHPy150Open IvanMalison/okcupyd/okcupyd/messaging.py/Message.time_sent
@util.cached_property def correspondent_id(self): """ :returns: The id assigned to the correspondent of this message. """ try: return int(self._thread_element.attrib['data-personid']) except (__HOLE__, KeyError): try: return int(self.correspondent_profile.id) except: pass
ValueError
dataset/ETHPy150Open IvanMalison/okcupyd/okcupyd/messaging.py/MessageThread.correspondent_id
@util.cached_property def correspondent(self): """ :returns: The username of the user with whom the logged in user is conversing in this :class:`~.MessageThread`. """ try: return self._correspondent_xpb.one_(self._thread_element).strip() except __HOLE__: raise errors.NoCorrespondentError()
IndexError
dataset/ETHPy150Open IvanMalison/okcupyd/okcupyd/messaging.py/MessageThread.correspondent
@property def initiator(self): """ :returns: A :class:`~okcupyd.profile.Profile` instance belonging to the initiator of this :class:`~.MessageThread`. """ try: return self.messages[0].sender except __HOLE__: pass
IndexError
dataset/ETHPy150Open IvanMalison/okcupyd/okcupyd/messaging.py/MessageThread.initiator
@property def respondent(self): """ :returns: A :class:`~okcupyd.profile.Profile` instance belonging to the respondent of this :class:`~.MessageThread`. """ try: return self.messages[0].recipient except __HOLE__: pass
IndexError
dataset/ETHPy150Open IvanMalison/okcupyd/okcupyd/messaging.py/MessageThread.respondent
def load_config(self, file): """Attempts to load a JSON config file for Cardinal. Takes a file path, attempts to decode its contents from JSON, then validate known config options to see if they can safely be loaded in. their place. The final merged dictionary object is saved to the If they can't, the default value from the config spec is used in the instance and returned. Keyword arguments: file -- Path to a JSON config file. Returns: dict -- Dictionary object of the entire config. """ # Attempt to load and parse the config file try: f = open(file, 'r') json_config = self._utf8_json(json.load(f)) f.close() # File did not exist or we can't open it for another reason except IOError: self.logger.warning( "Can't open %s (using defaults / command-line values)" % file ) # Thrown by json.load() when the content isn't valid JSON except ValueError: self.logger.warning( "Invalid JSON in %s, (using defaults / command-line values)" % file ) else: # For every option, for option in self.spec.options: try: # If the option wasn't defined in the config, default if option not in json_config: json_config[option] = None self.config[option] = self.spec.return_value_or_default( option, json_config[option]) except __HOLE__: self.logger.warning("Option %s not in spec -- ignored" % option) # If we didn't load the config earlier, or there was nothing in it... if self.config == {} and self.spec.options != {}: for option in self.spec.options: # Grab the default self.config[option] = self.spec.options[option][1] return self.config
KeyError
dataset/ETHPy150Open JohnMaguire/Cardinal/cardinal/config.py/ConfigParser.load_config
def merge_argparse_args_into_config(self, args): """Merges the args returned by argparse.ArgumentParser into the config. Keyword arguments: args -- The args object returned by argsparse.parse_args(). Returns: dict -- Dictionary object of the entire config. """ for option in self.spec.options: try: # If the value exists in args and is set, then update the # config's value value = getattr(args, option) if value is not None: self.config[option] = value except __HOLE__: self.logger.debug( "Option %s not in CLI arguments -- not updated" % option ) return self.config
AttributeError
dataset/ETHPy150Open JohnMaguire/Cardinal/cardinal/config.py/ConfigParser.merge_argparse_args_into_config
def validate_remote(ctx, param, value): if value: try: remote, branch = value.split('/') return (remote, branch) except __HOLE__: raise click.BadParameter('remote need to be in format <remote>/<branch>')
ValueError
dataset/ETHPy150Open marcwebbie/passpie/passpie/validators.py/validate_remote
def validate_cols(ctx, param, value): if value: try: validated = {c: index for index, c in enumerate(value.split(',')) if c} for col in ('name', 'login', 'password'): assert col in validated return validated except (AttributeError, ValueError): raise click.BadParameter('cols need to be in format col1,col2,col3') except __HOLE__ as e: raise click.BadParameter('missing mandatory column: {}'.format(e))
AssertionError
dataset/ETHPy150Open marcwebbie/passpie/passpie/validators.py/validate_cols
def read_headers(rfile, hdict=None): """Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens. """ if hdict is None: hdict = {} while True: line = rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") if line[0] in (SPACE, TAB): # It's a continuation line. v = line.strip() else: try: k, v = line.split(COLON, 1) except __HOLE__: raise ValueError("Illegal header line.") # TODO: what about TE and WWW-Authenticate? k = k.strip().title() v = v.strip() hname = k if k in comma_separated_headers: existing = hdict.get(hname) if existing: v = b", ".join((existing, v)) hdict[hname] = v return hdict
ValueError
dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/libs/cherrypy/wsgiserver/wsgiserver3.py/read_headers
def _fetch(self): if self.closed: return line = self.rfile.readline() self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise MaxSizeExceeded("Request Entity Too Large", self.maxlen) line = line.strip().split(SEMICOLON, 1) try: chunk_size = line.pop(0) chunk_size = int(chunk_size, 16) except __HOLE__: raise ValueError("Bad chunked transfer size: " + repr(chunk_size)) if chunk_size <= 0: self.closed = True return ## if line: chunk_extension = line[0] if self.maxlen and self.bytes_read + chunk_size > self.maxlen: raise IOError("Request Entity Too Large") chunk = self.rfile.read(chunk_size) self.bytes_read += len(chunk) self.buffer += chunk crlf = self.rfile.read(2) if crlf != CRLF: raise ValueError( "Bad chunked transfer coding (expected '\\r\\n', " "got " + repr(crlf) + ")")
ValueError
dataset/ETHPy150Open timonwong/OmniMarkupPreviewer/OmniMarkupLib/libs/cherrypy/wsgiserver/wsgiserver3.py/ChunkedRFile._fetch