function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def _cull(self): if int(self._num_entries) < self._max_entries: return try: filelist = sorted(os.listdir(self._dir)) except (__HOLE__, OSError): return if self._cull_frequency == 0: doomed = filelist else: doomed = [os.path.join(self._dir, k) for (i, k) in enumerate(filelist) if i % self._cull_frequency == 0] for topdir in doomed: try: for root, _, files in os.walk(topdir): for f in files: self._delete(os.path.join(root, f)) except (IOError, OSError): pass
IOError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/cache/backends/filebased.py/FileBasedCache._cull
def _createdir(self): try: os.makedirs(self._dir) except __HOLE__: raise EnvironmentError("Cache directory '%s' does not exist and could not be created'" % self._dir)
OSError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/cache/backends/filebased.py/FileBasedCache._createdir
def clear(self): try: shutil.rmtree(self._dir) except (__HOLE__, OSError): pass # For backwards compatibility
IOError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/cache/backends/filebased.py/FileBasedCache.clear
def update(self, req, id, body): """Bulk delete floating IPs.""" context = req.environ['nova.context'] authorize(context) if id != "delete": msg = _("Unknown action") raise webob.exc.HTTPNotFound(explanation=msg) try: ip_range = body['ip_range'] except (__HOLE__, KeyError): raise webob.exc.HTTPUnprocessableEntity() try: ips = (objects.FloatingIPList.make_ip_info(address, None, None) for address in self._address_to_hosts(ip_range)) except exception.InvalidInput as exc: raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) objects.FloatingIPList.destroy(context, ips) return {"floating_ips_bulk_delete": ip_range}
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ips_bulk.py/FloatingIPBulkController.update
def _address_to_hosts(self, addresses): """Iterate over hosts within an address range. If an explicit range specifier is missing, the parameter is interpreted as a specific individual address. """ try: return [netaddr.IPAddress(addresses)] except __HOLE__: net = netaddr.IPNetwork(addresses) if net.size < 4: reason = _("/%s should be specified as single address(es) " "not in cidr format") % net.prefixlen raise exception.InvalidInput(reason=reason) else: return net.iter_hosts() except netaddr.AddrFormatError as exc: raise exception.InvalidInput(reason=six.text_type(exc))
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ips_bulk.py/FloatingIPBulkController._address_to_hosts
def __init__(self, loop, url, **kwargs): loop = loop self._lock = RLock() # initialize the heartbeart. It will PING the lookupd server to say # it's alive try: self.heartbeat_timeout = kwargs.pop('heartbeat') except __HOLE__: self.heartbeat_timeout = 15.0 self._heartbeat = pyuv.Timer(loop) # define status self.active = False self.closed = False # dict to maintain list of sent messages to handle replies from the # lookupd server self.messages = dict() super(LookupClient, self).__init__(loop, url, **kwargs)
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/gafferd/lookup.py/LookupClient.__init__
def on_message(self, message): try: result = json.loads(message) except ValueError as e: LOGGER.error('invalid json: %r' % str(e)) return msgid = result.get('msgid') if not msgid: LOGGER.error('invalid message: %r' % str(e)) return try: msg = self.messages.pop(msgid) except __HOLE__: return msg.reply(result)
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/gafferd/lookup.py/LookupClient.on_message
def test_optimizeSpherearray_nolowhigh(self): self.top.add('comp', SphereFunctionArray()) self.top.driver.workflow.add('comp') self.top.driver.add_objective("comp.total") try: self.top.driver.add_parameter('comp.x[0]') except __HOLE__ as err: self.assertEqual(str(err), "driver: Trying to add parameter 'comp.x[0]', " "but no lower limit was found and no 'low' " "argument was given. One or the other must be " "specified.") else: self.fail('TypeError expected')
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py/TestCase.test_optimizeSpherearray_nolowhigh
def test_list_remove_clear_params(self): self.top.add('comp', SphereFunction()) self.top.driver.workflow.add('comp') self.top.driver.add_parameter('comp.x') self.top.driver.add_parameter('comp.y') params = self.top.driver.list_param_targets() self.assertEqual(set(params), set(['comp.x', 'comp.y'])) self.assertEqual(len(params), 2) self.top.driver.remove_parameter('comp.x') params = self.top.driver.list_param_targets() self.assertEqual(params, ['comp.y']) try: self.top.driver.remove_parameter('xyz') except __HOLE__ as err: self.assertEqual(str(err), "driver: Trying to remove parameter 'xyz' that is " "not in this driver.") else: self.fail('RuntimeError Expected') self.top.driver.add_parameter('comp.x') self.top.driver.clear_parameters() params = self.top.driver.list_param_targets() self.assertEqual(params, []) self.top.driver.add_parameter('comp.y') try: self.top.driver.add_parameter('comp.y') except ValueError as err: self.assertEqual(str(err), "driver: ['comp.y'] are already Parameter targets") else: self.fail('RuntimeError expected')
AttributeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py/TestCase.test_list_remove_clear_params
def test_improper_parameter_type(self): class SomeComp(Component): """Arbitrary component with a few variables, but which does not really do any calculations""" z = Str("test", iotype="in") class Simulation(Assembly): """Top Level Assembly used for simulation""" def configure(self): """Adds the Genetic driver to the assembly""" self.add('driver', Genetic()) self.add('comp', SomeComp()) self.driver.workflow.add('comp') self.driver.add_parameter('comp.z') try: s = set_as_top(Simulation()) except __HOLE__ as err: self.assertEqual(str(err), "driver: The value of parameter 'comp.z' must be a real or " "integral type, but its type is 'str'.") else: self.fail("ValueError expected")
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/drivers/test/test_opt_genetic.py/TestCase.test_improper_parameter_type
def __update_data(self, data): """ Automatically updates the contents of the 'data' object with any fields that are set in the object but weren't specified in the data. This makes method calls simpler. This method will also verify the data in the dictionary. :param data: A map of Slack API fields to desired values. :type data: dict :returns: A copy of the `data` dictionary, but with extra values if they were specified in the object constructor. """ # Duplicate the data to make this method non-destructive. return_data = dict(data) # Iterate over each of the supported fields. for field in supported_fields: # Let's see if we have a value defined for that attribute. # Note that this requires the object's attributes to have the same # name as the Slack API fields. try: value = getattr(self, field) except __HOLE__: # Didn't have it, but let's not throw an error. Just continue. continue # If the field isn't already in the data, add it. # This ensure that overriding calls are not overridden themselves. if value is not None and not field in return_data: return_data[field] = value # Ensure the dictionary is good-to-go. self.__verify_data(data) return return_data
AttributeError
dataset/ETHPy150Open univ-of-utah-marriott-library-apple/management_tools/management_tools/slack.py/IncomingWebhooksSender.__update_data
def _parse_int(num): try: return num and int(num) except __HOLE__: pass
ValueError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/views.py/_parse_int
@require_POST @csrf_exempt def bulksubmission(request, username): # puts it in a temp directory. # runs "import_tools(temp_directory)" # deletes posting_user = get_object_or_404(User, username__iexact=username) # request.FILES is a django.utils.datastructures.MultiValueDict # for each key we have a list of values try: temp_postfile = request.FILES.pop("zip_submission_file", []) except IOError: return HttpResponseBadRequest(_(u"There was a problem receiving your " u"ODK submission. [Error: IO Error " u"reading data]")) if len(temp_postfile) != 1: return HttpResponseBadRequest(_(u"There was a problem receiving your" u" ODK submission. [Error: multiple " u"submission files (?)]")) postfile = temp_postfile[0] tempdir = tempfile.gettempdir() our_tfpath = os.path.join(tempdir, postfile.name) with open(our_tfpath, 'wb') as f: f.write(postfile.read()) with open(our_tfpath, 'rb') as f: total_count, success_count, errors = import_instances_from_zip( f, posting_user) # chose the try approach as suggested by the link below # http://stackoverflow.com/questions/82831 try: os.remove(our_tfpath) except __HOLE__: # TODO: log this Exception somewhere pass json_msg = { 'message': _(u"Submission complete. Out of %(total)d " u"survey instances, %(success)d were imported, " u"(%(rejected)d were rejected as duplicates, " u"missing forms, etc.)") % {'total': total_count, 'success': success_count, 'rejected': total_count - success_count}, 'errors': u"%d %s" % (len(errors), errors) } audit = { "bulk_submission_log": json_msg } audit_log(Actions.USER_BULK_SUBMISSION, request.user, posting_user, _("Made bulk submissions."), audit, request) response = HttpResponse(json.dumps(json_msg)) response.status_code = 200 response['Location'] = request.build_absolute_uri(request.path) return response
IOError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/views.py/bulksubmission
@require_http_methods(["HEAD", "POST"]) @csrf_exempt def submission(request, username=None): if username: formlist_user = get_object_or_404(User, username__iexact=username) profile, created = UserProfile.objects.get_or_create( user=formlist_user) if profile.require_auth: authenticator = HttpDigestAuthenticator() if not authenticator.authenticate(request): return authenticator.build_challenge_response() if request.method == 'HEAD': response = OpenRosaResponse(status=204) if username: response['Location'] = request.build_absolute_uri().replace( request.get_full_path(), '/%s/submission' % username) else: response['Location'] = request.build_absolute_uri().replace( request.get_full_path(), '/submission') return response xml_file_list = [] media_files = [] # request.FILES is a django.utils.datastructures.MultiValueDict # for each key we have a list of values try: xml_file_list = request.FILES.pop("xml_submission_file", []) if len(xml_file_list) != 1: return OpenRosaResponseBadRequest( _(u"There should be a single XML submission file.") ) # save this XML file and media files as attachments media_files = request.FILES.values() # get uuid from post request uuid = request.POST.get('uuid') error, instance = safe_create_instance( username, xml_file_list[0], media_files, uuid, request) if error: return error elif instance is None: return OpenRosaResponseBadRequest( _(u"Unable to create submission.")) audit = { "xform": instance.xform.id_string } audit_log( Actions.SUBMISSION_CREATED, request.user, instance.xform.user, _("Created submission on form %(id_string)s.") % { "id_string": instance.xform.id_string }, audit, request) # response as html if posting with a UUID if not username and uuid: response = _html_submission_response(request, instance) else: response = _submission_response(request, instance) # ODK needs two things for a form to be considered successful # 1) the status code needs to be 201 (created) # 2) The location header needs to be set to the host it posted to response.status_code = 201 response['Location'] = request.build_absolute_uri(request.path) return response except __HOLE__ as e: if _bad_request(e): return OpenRosaResponseBadRequest( _(u"File transfer interruption.")) else: raise finally: if len(xml_file_list): [_file.close() for _file in xml_file_list] if len(media_files): [_file.close() for _file in media_files]
IOError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/views.py/submission
@csrf_exempt def ziggy_submissions(request, username): """ Accepts ziggy JSON submissions. - stored in mongo, ziggy_instances - ZiggyInstance Django Model Copy form_instance - to create actual Instances for a specific form? """ data = {'message': _(u"Invalid request!")} status = 400 form_user = get_object_or_404(User, username__iexact=username) if request.method == 'POST': json_post = request.body if json_post: # save submission # i.e pick entity_id, instance_id, server_version, client_version? # reporter_id records = ZiggyInstance.create_ziggy_instances( form_user, json_post) data = {'status': 'success', 'message': _(u"Successfully processed %(records)s records" % {'records': records})} status = 201 else: # get clientVersion and reportId reporter_id = request.GET.get('reporter-id', None) client_version = request.GET.get('timestamp', 0) if reporter_id is not None and client_version is not None: try: cursor = ZiggyInstance.get_current_list( reporter_id, client_version) except __HOLE__ as e: status = 400 data = {'message': '%s' % e} else: status = 200 data = [record for record in cursor] return HttpResponse(json.dumps(data), status=status)
ValueError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/logger/views.py/ziggy_submissions
def __getitem__(self, item): try: return dict.__getitem__(self, item) except __HOLE__: value = self[item] = type(self)() return value
KeyError
dataset/ETHPy150Open yvesalexandre/bandicoot/bandicoot/helper/tools.py/AutoVivification.__getitem__
def __init__(self, name, level, pathname, lineno, msg, args, exc_info, func=None): """ Initialize a logging record with interesting information. """ ct = time.time() self.name = name self.msg = msg # # The following statement allows passing of a dictionary as a sole # argument, so that you can do something like # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) # Suggested by Stefan Behnel. # Note that without the test for args[0], we get a problem because # during formatting, we test to see if the arg is present using # 'if self.args:'. If the event being logged is e.g. 'Value is %d' # and if the passed arg fails 'if self.args:' then no formatting # is done. For example, logger.warn('Value is %d', 0) would log # 'Value is %d' instead of 'Value is 0'. # For the use case of passing a dictionary, this should not be a # problem. if args and len(args) == 1 and ( type(args[0]) == types.DictType ) and args[0]: args = args[0] self.args = args self.levelname = getLevelName(level) self.levelno = level self.pathname = pathname try: self.filename = os.path.basename(pathname) self.module = os.path.splitext(self.filename)[0] except (TypeError, __HOLE__, AttributeError): self.filename = pathname self.module = "Unknown module" self.exc_info = exc_info self.exc_text = None # used to cache the traceback text self.lineno = lineno self.funcName = func self.created = ct self.msecs = (ct - long(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() self.threadName = threading.current_thread().name else: self.thread = None self.threadName = None if logProcesses and hasattr(os, 'getpid'): self.process = os.getpid() else: self.process = None
ValueError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/logging/__init__.py/LogRecord.__init__
def emit(self, record): """ Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an 'encoding' attribute, it is used to encode the message before output to the stream. """ try: msg = self.format(record) fs = "%s\n" if not hasattr(types, "UnicodeType"): #if no unicode support... self.stream.write(fs % msg) else: try: if getattr(self.stream, 'encoding', None) is not None: self.stream.write(fs % msg.encode(self.stream.encoding)) else: self.stream.write(fs % msg) except UnicodeError: self.stream.write(fs % msg.encode("UTF-8")) self.flush() except (__HOLE__, SystemExit): raise except: self.handleError(record)
KeyboardInterrupt
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/logging/__init__.py/StreamHandler.emit
def make_msgid(idstring=None): """Returns a string suitable for RFC 2822 compliant Message-ID, e.g: <[email protected]> Optional idstring if given is a string used to strengthen the uniqueness of the message id. """ timeval = time.time() utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval)) try: pid = os.getpid() except __HOLE__: # No getpid() in Jython, for example. pid = 1 randint = random.randrange(100000) if idstring is None: idstring = '' else: idstring = '.' + idstring idhost = DNS_NAME msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost) return msgid # Header names that contain structured address data (RFC #5322)
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/core/mail/message.py/make_msgid
def __init__(self, *args, **kwargs): """Instantiate the settings.""" # update this dict from default settings (but only for ALL_CAPS # settings) for setting in dir(default_settings): if setting == setting.upper(): setattr(self, setting, getattr(default_settings, setting)) # use user-provided settings try: settings_module_path = os.environ[ENVIRONMENT_VARIABLE] try: settings_module = importlib.import_module(settings_module_path) for setting in dir(settings_module): if setting == setting.upper(): setting_value = getattr(settings_module, setting) setattr(self, setting, setting_value) except __HOLE__ as e: error = ImportError( 'Could not import settings "%s" (Is it on sys.path?): %s' % (settings_module_path, e)) logger.warn(error) except KeyError: # NOTE: This is arguably an EnvironmentError, but that causes # problems with Python's interactive help. logger.warn( ('Environment variable %s is undefined. ' 'Use default settings for now.') % ENVIRONMENT_VARIABLE) # Instantiate the settings globally.
ImportError
dataset/ETHPy150Open Nextdoor/ndscheduler/ndscheduler/__init__.py/Settings.__init__
def byte_to_human(size): """Transform the size in bytes into the appropriate unit of measurement. :param int size: Size in bytes. :return: Actual size in B/KB/MB/GB. :rtype: str """ try: size = int(size) except __HOLE__: return (0, "B") um = "B" if size < pow(2,20): size = size / pow(2,10) um = "KB" elif size < pow(2,30): size = size / pow(2, 20) um = "MB" else: size = size / pow(2, 30) um = "GB" return (size, um)
ValueError
dataset/ETHPy150Open owtf/owtf/install/space_checker_utils.py/byte_to_human
def get_wget_download_size(command): """Get the whole download size in bytes. :param str command: Wget command. :return: The actual download size in bytes. :rtype: int """ # Wget supports HTTP, HTTPS and FTP protocols. if not command.startswith('wget '): return 0 # Check the url(s) is/are valid. urls = re.findall(r'((?:https|http|ftp)://[^\s]+)', command) if not urls: return 0 size = 0 for url in urls: try: response = urllib2.urlopen(url) size += int(response.info().getheader('Content-Length')) except (urllib2.HTTPError, urllib2.URLError, ValueError, __HOLE__): pass return size
TypeError
dataset/ETHPy150Open owtf/owtf/install/space_checker_utils.py/get_wget_download_size
def get_fs_free_space(): """Get the available space of the current filesystem. :return: The available size in KB. :rtype: int """ try: stat = os.statvfs('.') return (stat.f_bavail * stat.f_frsize) / 1024 except __HOLE__: print("[!] Failed to get the filesystem disk space usage") return 0
OSError
dataset/ETHPy150Open owtf/owtf/install/space_checker_utils.py/get_fs_free_space
def send_next_work(sock, works): try: work = works.next() sock.send_json(work) except __HOLE__: # If no more work is available, we still have to reply something. sock.send_json({})
StopIteration
dataset/ETHPy150Open mdup/easycluster/master.py/send_next_work
def dns_record_match(dns_conn, server, zone, name, record_type, data): urec = data_to_dns_record(record_type, data) select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA try: buflen, res = dns_conn.DnssrvEnumRecords2( dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, zone, name, None, record_type, select_flags, None, None) except __HOLE__, e: return None if not res or res.count == 0: return None rec_match = None for rec in res.rec[0].records: if rec.wType != record_type: continue found = False if record_type == dnsp.DNS_TYPE_A: if rec.data == urec.data: found = True elif record_type == dnsp.DNS_TYPE_AAAA: if rec.data == urec.data: found = True elif record_type == dnsp.DNS_TYPE_PTR: if dns_name_equal(rec.data, urec.data): found = True elif record_type == dnsp.DNS_TYPE_CNAME: if dns_name_equal(rec.data, urec.data): found = True elif record_type == dnsp.DNS_TYPE_NS: if dns_name_equal(rec.data, urec.data): found = True elif record_type == dnsp.DNS_TYPE_MX: if dns_name_equal(rec.data.nameExchange, urec.data.nameExchange) and \ rec.data.wPreference == urec.data.wPreference: found = True elif record_type == dnsp.DNS_TYPE_SRV: if rec.data.wPriority == urec.data.wPriority and \ rec.data.wWeight == urec.data.wWeight and \ rec.data.wPort == urec.data.wPort and \ dns_name_equal(rec.data.nameTarget, urec.data.nameTarget): found = True elif record_type == dnsp.DNS_TYPE_SOA: if rec.data.dwSerialNo == urec.data.dwSerialNo and \ rec.data.dwRefresh == urec.data.dwRefresh and \ rec.data.dwRetry == urec.data.dwRetry and \ rec.data.dwExpire == urec.data.dwExpire and \ rec.data.dwMinimumTtl == urec.data.dwMinimumTtl and \ dns_name_equal(rec.data.NamePrimaryServer, urec.data.NamePrimaryServer) and \ dns_name_equal(rec.data.ZoneAdministratorEmail, urec.data.ZoneAdministratorEmail): found = True elif record_type == dnsp.DNS_TYPE_TXT: if rec.data.count == urec.data.count: found = True for i in xrange(rec.data.count): found = found and \ (rec.data.str[i].str == urec.data.str[i].str) if found: rec_match = rec break return rec_match
RuntimeError
dataset/ETHPy150Open byt3bl33d3r/pth-toolkit/lib/python2.7/site-packages/samba/netcmd/dns.py/dns_record_match
def _get_from_members_items_or_properties(obj, key): try: if hasattr(obj, key): return obj.id if hasattr(obj, 'properties') and key in obj.properties: return obj.properties[key] except (KeyError, TypeError, __HOLE__): pass try: if key in obj: return obj[key] elif 'properties' in obj and key in obj['properties']: return obj['properties'][key] except (KeyError, TypeError): pass return None ## TODO: what does this do on an unsaved Synapse Entity object?
AttributeError
dataset/ETHPy150Open Sage-Bionetworks/synapsePythonClient/synapseclient/utils.py/_get_from_members_items_or_properties
def itersubclasses(cls, _seen=None): """ http://code.activestate.com/recipes/576949/ (r3) itersubclasses(cls) Generator over all subclasses of a given class, in depth first order. >>> list(itersubclasses(int)) == [bool] True >>> class A(object): pass >>> class B(A): pass >>> class C(A): pass >>> class D(B,C): pass >>> class E(D): pass >>> >>> for cls in itersubclasses(A): ... print(cls.__name__) B D E C >>> # get ALL (new-style) classes currently defined >>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS ['type', ...'tuple', ...] """ if not isinstance(cls, type): raise TypeError('itersubclasses must be called with ' 'new-style classes, not %.100r' % cls) if _seen is None: _seen = set() try: subs = cls.__subclasses__() except __HOLE__: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in subs: if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub
TypeError
dataset/ETHPy150Open Sage-Bionetworks/synapsePythonClient/synapseclient/utils.py/itersubclasses
def touch(path, times=None): """ Make sure a file exists. Update its access and modified times. """ basedir = os.path.dirname(path) if not os.path.exists(basedir): try: os.makedirs(basedir) except __HOLE__ as err: ## alternate processes might be creating these at the same time if err.errno != errno.EEXIST: raise with open(path, 'a'): os.utime(path, times) return path
OSError
dataset/ETHPy150Open Sage-Bionetworks/synapsePythonClient/synapseclient/utils.py/touch
def makeService(config): f = ftp.FTPFactory() r = ftp.FTPRealm(config['root']) p = portal.Portal(r, config.get('credCheckers', [])) f.tld = config['root'] f.userAnonymous = config['userAnonymous'] f.portal = p f.protocol = ftp.FTP try: portno = int(config['port']) except __HOLE__: portno = 2121 return internet.TCPServer(portno, f)
KeyError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/tap/ftp.py/makeService
@errors.callback def filter(self, result): log.debug("Converting date using format '%s'", self.arguments) try: return str(time.mktime(time.strptime(result, self.arguments))) except __HOLE__: if self.default is not None: return self.default else: raise errors.TestCritical( "Failed to parse date with format '%s'" % self.arguments)
ValueError
dataset/ETHPy150Open marineam/nagcat/python/nagcat/plugins/filter_time.py/DateFilter.filter
def afterTest(self, test): """Clear capture buffer. """ if self.nocaptureall: if not self.hadError: logAsInfo("\tpassed in %s", time.time() - self.testStartTime) else: logAsInfo("\tfailed in %s seconds. See logs in %s", time.time() - self.testStartTime, self.fname) if self.stdoutFD is None: return setLoggingLevel(logging.ERROR) sys.stdout.flush() sys.stderr.flush() os.dup2(self.stdoutFD, 1) os.close(self.stdoutFD) os.dup2(self.stderrFD, 2) os.close(self.stderrFD) self.stdoutFD = None self.stderrFD = None self.outfile.flush() self.outfile.close() self.outfile = None if not self.hadError: try: os.remove(self.fname) except __HOLE__: pass logAsInfo("\tpassed in %s", time.time() - self.testStartTime) else: #the test failed. Report the failure logAsInfo("\tfailed in %s seconds. See logs in %s", time.time() - self.testStartTime, self.fname)
OSError
dataset/ETHPy150Open ufora/ufora/ufora/test/OutputCaptureNosePlugin.py/OutputCaptureNosePlugin.afterTest
@defer.inlineCallbacks def write(self, data): """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of :class:`str` or a file-like object. :Parameters: - `data`: string of bytes or file-like object to be written to the file """ if self._closed: raise ValueError("TxMongo: cannot write to a closed file.") try: # file-like read = data.read except AttributeError: # string if not isinstance(data, (bytes, unicode)): raise TypeError("TxMongo: can only write strings or file-like objects.") if isinstance(data, unicode): try: data = data.encode(self.encoding) except __HOLE__: raise TypeError("TxMongo: must specify an encoding for file in " "order to write {0}".format(data)) read = StringIO(data).read if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete space = self.chunk_size - self._buffer.tell() if space: to_write = read(space) self._buffer.write(to_write) if len(to_write) < space: return # EOF or incomplete yield self.__flush_buffer() to_write = read(self.chunk_size) while to_write and len(to_write) == self.chunk_size: yield self.__flush_data(to_write) to_write = read(self.chunk_size) self._buffer.write(to_write)
AttributeError
dataset/ETHPy150Open twisted/txmongo/txmongo/_gridfs/grid_file.py/GridIn.write
def test_do_requires_workflow_dict(self): try: self.base_step.do() except __HOLE__: exception = True self.assertTrue(exception)
TypeError
dataset/ETHPy150Open globocom/database-as-a-service/dbaas/workflow/steps/tests/test_base_step.py/BaseStepTestCase.test_do_requires_workflow_dict
def test_undo_requires_workflow_dict(self): try: self.base_step.undo() except __HOLE__: exception = True self.assertTrue(exception)
TypeError
dataset/ETHPy150Open globocom/database-as-a-service/dbaas/workflow/steps/tests/test_base_step.py/BaseStepTestCase.test_undo_requires_workflow_dict
def _dataTable(self, list_fields, sort_by = [[1, "asc"]], represent={}, ajax_item_id=None, dt_bulk_select=[], ): """ Method to get the data for the dataTable This can be either a raw html representation or and ajax call update Additional data will be cached to limit calls back to the server @param list_fields: list of field names @param sort_by: list of sort by columns @param represent: a dict of field callback functions used to change how the data will be displayed keyed on the field identifier @return: a dict() In html representations this will be a table of the data plus the sortby instructions In ajax this will be a json response In addition the following values will be made available: recordsTotal Number of records in the filtered data set recordsFiltered Number of records to display start Start point in the ordered data set limit Number of records in the ordered set NOTE: limit - recordsFiltered = total cached """ from s3data import S3DataTable request = self.request resource = self.resource s3 = current.response.s3 # Controller Filter if s3.filter is not None: self.resource.add_filter(s3.filter) representation = request.representation # Datatable Filter totalrows = None if representation == "aadata": searchq, orderby, left = resource.datatable_filter(list_fields, request.get_vars) if searchq is not None: totalrows = resource.count() resource.add_filter(searchq) else: orderby, left = None, None # Start/Limit if representation == "aadata": get_vars = request.get_vars start = get_vars.get("displayStart", None) limit = get_vars.get("pageLength", None) draw = int(get_vars.draw or 0) else: # catch all start = 0 limit = s3.ROWSPERPAGE if limit is not None: try: start = int(start) limit = int(limit) except __HOLE__: start = None limit = None # use default else: start = None # use default if not orderby: orderby = ~resource.table.error data = resource.select(list_fields, start=start, limit=limit, count=True, orderby=orderby, left=left) rows = data["rows"] displayrows = data["numrows"] if totalrows is None: totalrows = displayrows # Represent the data _represent = represent.items() for row in rows: record_id = row["s3_import_item.id"] for column, method in _represent: if column in row: row[column] = method(record_id, row[column]) # Build the datatable rfields = resource.resolve_selectors(list_fields)[0] dt = S3DataTable(rfields, rows, orderby=orderby) datatable_id = "s3import_1" if representation == "aadata": # Ajax callback output = dt.json(totalrows, displayrows, datatable_id, draw, dt_bulk_actions = [current.T("Import")]) else: # Initial HTML response url = "/%s/%s/%s/import.aadata?job=%s" % (request.application, request.controller, request.function, ajax_item_id) items = dt.html(totalrows, displayrows, datatable_id, dt_ajax_url=url, dt_bulk_actions = [current.T("Import")], dt_bulk_selected = dt_bulk_select) output = {"items":items} current.response.s3.dataTableID = [datatable_id] return output # -------------------------------------------------------------------------
ValueError
dataset/ETHPy150Open sahana/eden/modules/s3/s3import.py/S3Importer._dataTable
def execute_import_task(self, task): """ Execute each import job, in order """ start = datetime.now() if task[0] == 1: s3db = current.s3db response = current.response errorString = "prepopulate error: file %s missing" # Store the view view = response.view _debug("Running job %s %s (filename=%s transform=%s)", task[1], task[2], task[3], task[4], ) prefix = task[1] name = task[2] tablename = "%s_%s" % (prefix, name) if tablename in self.alternateTables: details = self.alternateTables[tablename] if "tablename" in details: tablename = details["tablename"] s3db.table(tablename) if "loader" in details: loader = details["loader"] if loader is not None: loader() if "prefix" in details: prefix = details["prefix"] if "name" in details: name = details["name"] try: resource = s3db.resource(tablename) except __HOLE__: # Table cannot be loaded self.errorList.append("WARNING: Unable to find table %s import job skipped" % tablename) return # Check if the source file is accessible filename = task[3] if filename[:7] == "http://": req = urllib2.Request(url=filename) try: f = urllib2.urlopen(req) except urllib2.HTTPError, e: self.errorList.append("Could not access %s: %s" % (filename, e.read())) return except: self.errorList.append(errorString % filename) return else: csv = f else: try: csv = open(filename, "r") except IOError: self.errorList.append(errorString % filename) return # Check if the stylesheet is accessible try: S = open(task[4], "r") except IOError: self.errorList.append(errorString % task[4]) return else: S.close() # Customise the resource customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, name, current.request) customise(request, tablename) extra_data = None if task[5]: try: extradata = self.unescape(task[5], {"'": '"'}) extradata = json.loads(extradata) extra_data = extradata except: self.errorList.append("WARNING:5th parameter invalid, parameter %s ignored" % task[5]) auth = current.auth auth.rollback = True try: # @todo: add extra_data and file attachments resource.import_xml(csv, format="csv", stylesheet=task[4], extra_data=extra_data) except SyntaxError, e: self.errorList.append("WARNING: import error - %s (file: %s, stylesheet: %s)" % (e, filename, task[4])) auth.rollback = False return if not resource.error: current.db.commit() else: # Must roll back if there was an error! error = resource.error self.errorList.append("%s - %s: %s" % ( task[3], resource.tablename, error)) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False # Restore the view response.view = view end = datetime.now() duration = end - start csvName = task[3][task[3].rfind("/") + 1:] try: # Python 2.7 duration = '{:.2f}'.format(duration.total_seconds() / 60) msg = "%s import job completed in %s mins" % (csvName, duration) except AttributeError: # older Python msg = "%s import job completed in %s" % (csvName, duration) self.resultList.append(msg) current.log.debug(msg) # -------------------------------------------------------------------------
AttributeError
dataset/ETHPy150Open sahana/eden/modules/s3/s3import.py/S3BulkImporter.execute_import_task
def execute_special_task(self, task): """ Execute import tasks which require a custom function, such as import_role """ start = datetime.now() s3 = current.response.s3 if task[0] == 2: fun = task[1] filepath = task[2] extraArgs = task[3] if filepath is None: if extraArgs is None: error = s3[fun]() else: error = s3[fun](*extraArgs) elif extraArgs is None: error = s3[fun](filepath) else: error = s3[fun](filepath, *extraArgs) if error: self.errorList.append(error) end = datetime.now() duration = end - start try: # Python 2.7 duration = '{:.2f}'.format(duration.total_seconds()/60) msg = "%s import job completed in %s mins" % (fun, duration) except __HOLE__: # older Python msg = "%s import job completed in %s" % (fun, duration) self.resultList.append(msg) current.log.debug(msg) # -------------------------------------------------------------------------
AttributeError
dataset/ETHPy150Open sahana/eden/modules/s3/s3import.py/S3BulkImporter.execute_special_task
def import_role(self, filename): """ Import Roles from CSV """ # Check if the source file is accessible try: openFile = open(filename, "r") except __HOLE__: return "Unable to open file %s" % filename auth = current.auth acl = auth.permission create_role = auth.s3_create_role def parseACL(_acl): permissions = _acl.split("|") aclValue = 0 for permission in permissions: if permission == "READ": aclValue = aclValue | acl.READ if permission == "CREATE": aclValue = aclValue | acl.CREATE if permission == "UPDATE": aclValue = aclValue | acl.UPDATE if permission == "DELETE": aclValue = aclValue | acl.DELETE if permission == "REVIEW": aclValue = aclValue | acl.REVIEW if permission == "APPROVE": aclValue = aclValue | acl.APPROVE if permission == "PUBLISH": aclValue = aclValue | acl.PUBLISH if permission == "ALL": aclValue = aclValue | acl.ALL return aclValue reader = self.csv.DictReader(openFile) roles = {} acls = {} args = {} for row in reader: if row != None: role = row["role"] if "description" in row: desc = row["description"] else: desc = "" rules = {} extra_param = {} if "controller" in row and row["controller"]: rules["c"] = row["controller"] if "function" in row and row["function"]: rules["f"] = row["function"] if "table" in row and row["table"]: rules["t"] = row["table"] if row["oacl"]: rules["oacl"] = parseACL(row["oacl"]) if row["uacl"]: rules["uacl"] = parseACL(row["uacl"]) #if "org" in row and row["org"]: #rules["organisation"] = row["org"] #if "facility" in row and row["facility"]: #rules["facility"] = row["facility"] if "entity" in row and row["entity"]: rules["entity"] = row["entity"] if "hidden" in row and row["hidden"]: extra_param["hidden"] = row["hidden"] if "system" in row and row["system"]: extra_param["system"] = row["system"] if "protected" in row and row["protected"]: extra_param["protected"] = row["protected"] if "uid" in row and row["uid"]: extra_param["uid"] = row["uid"] if role in roles: acls[role].append(rules) else: roles[role] = [role, desc] acls[role] = [rules] if len(extra_param) > 0 and role not in args: args[role] = extra_param for rulelist in roles.values(): if rulelist[0] in args: create_role(rulelist[0], rulelist[1], *acls[rulelist[0]], **args[rulelist[0]]) else: create_role(rulelist[0], rulelist[1], *acls[rulelist[0]]) # -------------------------------------------------------------------------
IOError
dataset/ETHPy150Open sahana/eden/modules/s3/s3import.py/S3BulkImporter.import_role
def import_image(self, filename, tablename, idfield, imagefield): """ Import images, such as a logo or person image filename a CSV list of records and filenames tablename the name of the table idfield the field used to identify the record imagefield the field to where the image will be added Example: bi.import_image ("org_logos.csv", "org_organisation", "name", "logo") and the file org_logos.csv may look as follows id file Sahana Software Foundation sahanalogo.jpg American Red Cross icrc.gif """ # Check if the source file is accessible try: openFile = open(filename, "r") except IOError: return "Unable to open file %s" % filename prefix, name = tablename.split("_", 1) reader = self.csv.DictReader(openFile) db = current.db s3db = current.s3db audit = current.audit table = s3db[tablename] idfield = table[idfield] base_query = (table.deleted != True) # Get callbacks get_config = s3db.get_config onvalidation = get_config(tablename, "update_onvalidation") or \ get_config(tablename, "onvalidation") onaccept = get_config(tablename, "update_onaccept") or \ get_config(tablename, "onaccept") update_realm = get_config(tablename, "update_realm") if update_realm: set_realm_entity = current.auth.set_realm_entity update_super = s3db.update_super for row in reader: if row != None: # Open the file image = row["file"] try: # Extract the path to the CSV file, image should be in # this directory, or relative to it path = os.path.split(filename)[0] imagepath = os.path.join(path, image) openFile = open(imagepath, "rb") except __HOLE__: current.log.error("Unable to open image file %s" % image) continue image_source = StringIO(openFile.read()) # Get the id of the resource query = base_query & (idfield == row["id"]) record = db(query).select(limitby=(0, 1) ).first() try: record_id = record.id except: current.log.error("Unable to get record %s of the resource %s to attach the image file to" % (row["id"], tablename)) continue # Create and accept the form form = SQLFORM(table, record, fields=["id", imagefield]) form_vars = Storage() form_vars._formname = "%s/%s" % (tablename, record_id) form_vars.id = record_id source = Storage() source.filename = imagepath source.file = image_source form_vars[imagefield] = source if form.accepts(form_vars, onvalidation=onvalidation): # Audit audit("update", prefix, name, form=form, record=record_id, representation="csv") # Update super entity links update_super(table, form_vars) # Update realm if update_realm: set_realm_entity(table, form_vars, force_update=True) # Execute onaccept callback(onaccept, form, tablename=tablename) else: for (key, error) in form.errors.items(): current.log.error("error importing logo %s: %s %s" % (image, key, error)) # -------------------------------------------------------------------------
IOError
dataset/ETHPy150Open sahana/eden/modules/s3/s3import.py/S3BulkImporter.import_image
def import_remote_csv(self, url, prefix, resource, stylesheet): """ Import CSV files from remote servers """ extension = url.split(".")[-1] if extension not in ("csv", "zip"): current.log.error("error importing remote file %s: invalid extension" % (url)) return # Copy the current working directory to revert back to later cwd = os.getcwd() # Create the working directory TEMP = os.path.join(cwd, "temp") if not os.path.exists(TEMP): # use web2py/temp/remote_csv as a cache import tempfile TEMP = tempfile.gettempdir() tempPath = os.path.join(TEMP, "remote_csv") if not os.path.exists(tempPath): try: os.mkdir(tempPath) except __HOLE__: current.log.error("Unable to create temp folder %s!" % tempPath) return filename = url.split("/")[-1] if extension == "zip": filename = filename.replace(".zip", ".csv") if os.path.exists(os.path.join(tempPath, filename)): current.log.warning("Using cached copy of %s" % filename) else: # Download if we have no cached copy # Set the current working directory os.chdir(tempPath) try: _file = fetch(url) except urllib2.URLError, exception: current.log.error(exception) # Revert back to the working directory as before. os.chdir(cwd) return if extension == "zip": # Need to unzip import zipfile try: myfile = zipfile.ZipFile(StringIO(_file)) except zipfile.BadZipfile, exception: # e.g. trying to download through a captive portal current.log.error(exception) # Revert back to the working directory as before. os.chdir(cwd) return files = myfile.infolist() for f in files: filename = f.filename extension = filename.split(".")[-1] if extension == "csv": _file = myfile.read(filename) _f = open(filename, "w") _f.write(_file) _f.close() break myfile.close() else: f = open(filename, "w") f.write(_file) f.close() # Revert back to the working directory as before. os.chdir(cwd) task = [1, prefix, resource, os.path.join(tempPath, filename), os.path.join(current.request.folder, "static", "formats", "s3csv", prefix, stylesheet ), None ] self.execute_import_task(task) # -------------------------------------------------------------------------
OSError
dataset/ETHPy150Open sahana/eden/modules/s3/s3import.py/S3BulkImporter.import_remote_csv
def import_xml(self, filepath, prefix, resourcename, format): """ Import XML data using an XSLT: static/formats/<format>/import.xsl """ # Remove any spaces and enclosing double quote prefix = prefix.strip('" ') resourcename = resourcename.strip('" ') errorString = "prepopulate error: file %s missing" try: File = open(filepath, "r") except IOError: self.errorList.append(errorString % filepath) return stylesheet = os.path.join(current.request.folder, "static", "formats", format, "import.xsl") try: S = open(stylesheet, "r") except __HOLE__: self.errorList.append(errorString % stylesheet) return else: S.close() tablename = "%s_%s" % (prefix, resourcename) resource = current.s3db.resource(tablename) # Customise the resource customise = current.deployment_settings.customise_resource(tablename) if customise: request = S3Request(prefix, resourcename, current.request) customise(request, tablename) auth = current.auth auth.rollback = True try: resource.import_xml(File, stylesheet=stylesheet) except SyntaxError, e: self.errorList.append("WARNING: import error - %s (file: %s, stylesheet: %s/import.xsl)" % (e, filepath, format)) auth.rollback = False return if not resource.error: current.db.commit() else: # Must roll back if there was an error! error = resource.error self.errorList.append("%s - %s: %s" % ( filepath, tablename, error)) errors = current.xml.collect_errors(resource) if errors: self.errorList.extend(errors) current.db.rollback() auth.rollback = False # -------------------------------------------------------------------------
IOError
dataset/ETHPy150Open sahana/eden/modules/s3/s3import.py/S3BulkImporter.import_xml
@transaction.commit_on_success def _create(self, request, project_slug, data): # Check for unavailable fields try: self._check_fields(data.iterkeys(), self.allowed_fields) except __HOLE__, e: msg = "Field '%s' is not allowed." % e logger.warning(msg) raise BadRequestError(msg) # Check for obligatory fields for field in ('name', 'slug', 'i18n_type', ): if field not in data: msg = "Field '%s' must be specified." % field logger.warning(msg) raise BadRequestError(msg) try: project = Project.objects.get(slug=project_slug) except Project.DoesNotExist, e: logger.warning(unicode(e), exc_info=True) raise NotFoundError(unicode(e)) # In multipart/form-encode request variables have lists # as values. So we use __getitem__ isntead of pop, which returns # the last value method = data['i18n_type']; del data['i18n_type'] if not registry.is_supported(method): msg = "i18n_type %s is not supported." % method logger.warning(msg) raise BadRequestError(msg) try: slug = data['slug']; del data['slug'] name = data['name']; del data['name'] # TODO for all fields except KeyError, e: msg = "Required field is missing: %s" % e logger.warning(msg) raise BadRequestError(msg) if len(slug) > 50: raise BadRequestError( "The value for slug is too long. It should be less than " "50 characters." ) try: content = self._get_content(request, data) filename = self._get_filename(request, data) except NoContentError, e: raise BadRequestError(unicode(e)) try: rb = ResourceBackend() rb_create = rb.create( project, slug, name, method, project.source_language, content, extra_data={'filename': filename} ) post_resource_save.send(sender=None, instance=Resource.objects.get( slug=slug, project=project), created=True, user=request.user) return rb_create except ResourceBackendError, e: raise BadRequestError(unicode(e))
AttributeError
dataset/ETHPy150Open rvanlaar/easy-transifex/src/transifex/transifex/resources/api.py/ResourceHandler._create
def _update(self, request, project_slug, resource_slug): data = getattr(request, 'data', None) if not data: # Check for {} as well return BAD_REQUEST("Empty request") try: self._check_fields(data.iterkeys(), self.written_fields) except __HOLE__, e: return BAD_REQUEST("Field '%s' is not allowed." % e) try: project = Project.objects.get(slug=project_slug) except Project.DoesNotExist: return rc.NOT_FOUND i18n_type = data.pop('i18n_type', None) try: resource = Resource.objects.get(slug=resource_slug) except Resource.DoesNotExist: return BAD_REQUEST("Resource %s does not exist" % resource_slug) try: for key, value in data.iteritems(): setattr(resource, key, value) # if i18n_type is not None: # resource.i18n_method = i18n_type resource.save() except: return rc.BAD_REQUEST return rc.ALL_OK
AttributeError
dataset/ETHPy150Open rvanlaar/easy-transifex/src/transifex/transifex/resources/api.py/ResourceHandler._update
def _get_content(self, request, data): """Get the content from the request. If it is file-based, return the contents of the file. Args: request: The django request object. Returns: The content of the string/file. """ if 'application/json' in request.content_type: try: return data['content'] except __HOLE__, e: msg = "No content provided" logger.warning(msg) raise NoContentError(msg) elif 'multipart/form-data' in request.content_type: if not request.FILES: msg = "No file has been uploaded." logger.warning(msg) raise NoContentError(msg) return content_from_uploaded_file(request.FILES) else: msg = "No content or file found" logger.warning(msg) raise NoContentError(msg)
KeyError
dataset/ETHPy150Open rvanlaar/easy-transifex/src/transifex/transifex/resources/api.py/ResourceHandler._get_content
def _update(self, request, project_slug, resource_slug, lang_code=None): # Permissions handling try: resource = Resource.objects.get( slug=resource_slug, project__slug=project_slug ) except Resource.DoesNotExist: return rc.NOT_FOUND if lang_code == "source": language = resource.source_language else: try: language = Language.objects.by_code_or_alias(lang_code) except Language.DoesNotExist: logger.error("Weird! Selected language code (%s) does " "not match with any language in the database." % lang_code) return BAD_REQUEST( "Selected language code (%s) does not match with any" "language in the database." % lang_code ) team = Team.objects.get_or_none(resource.project, lang_code) check = ProjectPermission(request.user) if (not check.submit_translations(team or resource.project) or\ not resource.accept_translations) and not\ check.maintain(resource.project): return rc.FORBIDDEN try: t = Translation.get_object("create", request, resource, language) res = t.create() except BadRequestError, e: return BAD_REQUEST(unicode(e)) except NoContentError, e: return BAD_REQUEST(unicode(e)) except __HOLE__, e: return BAD_REQUEST("The content type of the request is not valid.") return t.__class__.to_http_for_create(t, res)
AttributeError
dataset/ETHPy150Open rvanlaar/easy-transifex/src/transifex/transifex/resources/api.py/TranslationHandler._update
def _entity_descriptor(entity, key): """Return a class attribute given an entity and string name. May return :class:`.InstrumentedAttribute` or user-defined attribute. """ insp = inspection.inspect(entity) if insp.is_selectable: description = entity entity = insp.c elif insp.is_aliased_class: entity = insp.entity description = entity elif hasattr(insp, "mapper"): description = entity = insp.mapper.class_ else: description = entity try: return getattr(entity, key) except __HOLE__: raise sa_exc.InvalidRequestError( "Entity '%s' has no property '%s'" % (description, key) )
AttributeError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/orm/base.py/_entity_descriptor
def _check_conf(cfg, req_keys, cat=""): """Generic checking for required keys. """ for k in req_keys: try: cfg[k] except __HOLE__: raise KeyError(("Required key is missing in the " "{} configuration file: {}").format(cat, k))
KeyError
dataset/ETHPy150Open BenoitDamota/mempamal/mempamal/configuration.py/_check_conf
def test_start_last_line_match(): target = 'classBackgroundLayer(' should_match = 'class BackgroundLayer(' filling = '\n'.join([ 'this is a line', 'this is another' ]) text = filling + '\n' + should_match lines = text.split('\n') it = enumerate(lines) start_line = get_start_line(it, target) assert start_line == 2 # here next should raise StoptIteration StopIteration_raised = False try: six.next(it) except __HOLE__: StopIteration_raised = True assert StopIteration_raised
StopIteration
dataset/ETHPy150Open los-cocos/cocos/utest/test_uniform_snippet.py/test_start_last_line_match
def test_start_no_match(): target = 'classBackgroundLayer(' filling = '\n'.join([ 'this is a line', 'this is another' ]) text = filling lines = text.split('\n') it = enumerate(lines) start_line = get_start_line(it, target) assert start_line is None # here next should raise StoptIteration StopIteration_raised = False try: six.next(it) except __HOLE__: StopIteration_raised = True assert StopIteration_raised # start tests for get_endplus_line # better name can be : seek_end_of_indented_block
StopIteration
dataset/ETHPy150Open los-cocos/cocos/utest/test_uniform_snippet.py/test_start_no_match
def _delete_namespace(self, namespace): try: namespace.delete() except __HOLE__ as e: # If the namespace didn't exist when delete was attempted, mission # accomplished. Otherwise, re-raise the exception if 'No such file or directory' not in str(e): raise e
RuntimeError
dataset/ETHPy150Open openstack/neutron/neutron/tests/functional/agent/l3/test_namespace_manager.py/NamespaceManagerTestFramework._delete_namespace
@app.route('/inventory', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/inventory') def inventory(env): """Fetch all (active) nodes from PuppetDB and stream a table displaying those nodes along with a set of facts about them. Downside of the streaming aproach is that since we've already sent our headers we can't abort the request if we detect an error. Because of this we'll end up with an empty table instead because of how yield_or_stop works. Once pagination is in place we can change this but we'll need to provide a search feature instead. :param env: Search for facts in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) fact_desc = [] # a list of fact descriptions to go # in the table header fact_names = [] # a list of inventory fact names factvalues = {} # values of the facts for all the nodes # indexed by node name and fact name nodedata = {} # a dictionary containing list of inventoried # facts indexed by node name nodelist = set() # a set of node names # load the list of items/facts we want in our inventory try: inv_facts = app.config['INVENTORY_FACTS'] except __HOLE__: inv_facts = [ ('Hostname' ,'fqdn' ), ('IP Address' ,'ipaddress' ), ('OS' ,'lsbdistdescription'), ('Architecture' ,'hardwaremodel' ), ('Kernel Version','kernelrelease' ) ] # generate a list of descriptions and a list of fact names # from the list of tuples inv_facts. for description,name in inv_facts: fact_desc.append(description) fact_names.append(name) if env == '*': query = '["or", {0}]]'.format( ', '.join('["=", "name", "{0}"]'.format(name) for name in fact_names)) else: query = '["and", ["=", "environment", "{0}"], ["or", {1}]]'.format( env, ', '.join('["=", "name", "{0}"]'.format(name) for name in fact_names)) # get all the facts from PuppetDB facts = puppetdb.facts(query=query) # convert the json in easy to access data structure for fact in facts: factvalues[fact.node,fact.name] = fact.value nodelist.add(fact.node) # generate the per-host data for node in nodelist: nodedata[node] = [] for fact_name in fact_names: try: nodedata[node].append(factvalues[node,fact_name]) except KeyError: nodedata[node].append("undef") return Response(stream_with_context( stream_template('inventory.html', nodedata=nodedata, fact_desc=fact_desc, envs=envs, current_env=env)))
KeyError
dataset/ETHPy150Open voxpupuli/puppetboard/puppetboard/app.py/inventory
@app.route('/node/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/node/<node_name>') def node(env, node_name): """Display a dashboard for a node showing as much data as we have on that node. This includes facts and reports but not Resources as that is too heavy to do within a single request. :param env: Ensure that the node, facts and reports are in this environment :type env: :obj:`string` """ envs = environments() check_env(env, envs) if env == '*': query = '["=", "certname", "{0}"]]'.format(node_name) else: query='["and", ["=", "environment", "{0}"],' \ '["=", "certname", "{1}"]]'.format(env, node_name), node = get_or_abort(puppetdb.node, node_name) facts = node.facts() reports = get_or_abort(puppetdb.reports, query=query, limit=app.config['REPORTS_COUNT'], order_by='[{"field": "start_time", "order": "desc"}]') reports, reports_events = tee(reports) report_event_counts = {} for report in reports_events: counts = get_or_abort(puppetdb.event_counts, query='["and", ["=", "environment", "{0}"],' \ '["=", "certname", "{1}"], ["=", "report", "{2}"]]'.format( env, node_name, report.hash_), summarize_by="certname") try: report_event_counts[report.hash_] = counts[0] except __HOLE__: report_event_counts[report.hash_] = {} return render_template( 'node.html', node=node, facts=yield_or_stop(facts), reports=yield_or_stop(reports), reports_count=app.config['REPORTS_COUNT'], report_event_counts=report_event_counts, envs=envs, current_env=env)
IndexError
dataset/ETHPy150Open voxpupuli/puppetboard/puppetboard/app.py/node
@app.route('/reports/', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'page': 1}) @app.route('/<env>/reports/', defaults={'page': 1}) @app.route('/<env>/reports/page/<int:page>') def reports(env, page): """Displays a list of reports and status from all nodes, retreived using the reports endpoint, sorted by start_time. :param env: Search for all reports in this environment :type env: :obj:`string` :param page: Calculates the offset of the query based on the report count and this value :type page: :obj:`int` """ envs = environments() check_env(env, envs) if env == '*': reports_query = None total_query = '["extract", [["function", "count"]], ["~", "certname", ""]]' else: reports_query = '["=", "environment", "{0}"]'.format(env) total_query = '["extract", [["function", "count"]],'\ '["and", ["=", "environment", "{0}"]]]'.format(env) reports = get_or_abort(puppetdb.reports, query=reports_query, limit=app.config['REPORTS_COUNT'], offset=(page-1) * app.config['REPORTS_COUNT'], order_by='[{"field": "start_time", "order": "desc"}]') total = get_or_abort(puppetdb._query, 'reports', query=total_query) total = total[0]['count'] reports, reports_events = tee(reports) report_event_counts = {} if total == 0 and page != 1: abort(404) for report in reports_events: if env == '*': event_count_query = '["and",' \ '["=", "certname", "{0}"],' \ '["=", "report", "{1}"]]'.format( report.node, report.hash_) else: event_count_query = '["and",' \ '["=", "environment", "{0}"],' \ '["=", "certname", "{1}"],' \ '["=", "report", "{2}"]]'.format( env, report.node, report.hash_) counts = get_or_abort(puppetdb.event_counts, query=event_count_query, summarize_by="certname") try: report_event_counts[report.hash_] = counts[0] except __HOLE__: report_event_counts[report.hash_] = {} return Response(stream_with_context(stream_template( 'reports.html', reports=yield_or_stop(reports), reports_count=app.config['REPORTS_COUNT'], report_event_counts=report_event_counts, pagination=Pagination(page, app.config['REPORTS_COUNT'], total), envs=envs, current_env=env)))
IndexError
dataset/ETHPy150Open voxpupuli/puppetboard/puppetboard/app.py/reports
@app.route('/reports/<node_name>/', defaults={'env': app.config['DEFAULT_ENVIRONMENT'], 'page': 1}) @app.route('/<env>/reports/<node_name>', defaults={'page': 1}) @app.route('/<env>/reports/<node_name>/page/<int:page>') def reports_node(env, node_name, page): """Fetches all reports for a node and processes them eventually rendering a table displaying those reports. :param env: Search for reports in this environment :type env: :obj:`string` :param node_name: Find the reports whose certname match this value :type node_name: :obj:`string` :param page: Calculates the offset of the query based on the report count and this value :type page: :obj:`int` """ envs = environments() check_env(env, envs) if env == '*': query = '["=", "certname", "{0}"]]'.format(node_name) else: query='["and",' \ '["=", "environment", "{0}"],' \ '["=", "certname", "{1}"]]'.format(env, node_name), reports = get_or_abort(puppetdb.reports, query=query, limit=app.config['REPORTS_COUNT'], offset=(page-1) * app.config['REPORTS_COUNT'], order_by='[{"field": "start_time", "order": "desc"}]') total = get_or_abort(puppetdb._query, 'reports', query='["extract", [["function", "count"]],' \ '["and", ["=", "environment", "{0}"], ["=", "certname", "{1}"]]]'.format( env, node_name)) total = total[0]['count'] reports, reports_events = tee(reports) report_event_counts = {} if total == 0 and page != 1: abort(404) for report in reports_events: if env == '*': event_count_query = '["and",' \ '["=", "certname", "{0}"],' \ '["=", "report", "{1}"]]'.format( report.node, report.hash_) else: event_count_query = '["and",' \ '["=", "environment", "{0}"],' \ '["=", "certname", "{1}"],' \ '["=", "report", "{2}"]]'.format( env, report.node, report.hash_) counts = get_or_abort(puppetdb.event_counts, query=event_count_query, summarize_by="certname") try: report_event_counts[report.hash_] = counts[0] except __HOLE__: report_event_counts[report.hash_] = {} return render_template( 'reports.html', reports=reports, reports_count=app.config['REPORTS_COUNT'], report_event_counts=report_event_counts, pagination=Pagination(page, app.config['REPORTS_COUNT'], total), envs=envs, current_env=env)
IndexError
dataset/ETHPy150Open voxpupuli/puppetboard/puppetboard/app.py/reports_node
@app.route('/report/latest/<node_name>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/report/latest/<node_name>') def report_latest(env, node_name): """Redirect to the latest report of a given node. :param env: Search for reports in this environment :type env: :obj:`string` :param node_name: Find the reports whose certname match this value :type node_name: :obj:`string` """ envs = environments() check_env(env, envs) if env == '*': node_query = '["=", "certname", "{0}"]'.format(node_name) else: node_query = '["and",' \ '["=", "report_environment", "{0}"],' \ '["=", "certname", "{1}"]]'.format(env, node_name) try: node = next(get_or_abort(puppetdb.nodes, query=node_query, with_status=True)) except StopIteration: abort(404) if node.latest_report_hash is not None: hash_ = node.latest_report_hash else: if env == '*': query='["and",' \ '["=", "certname", "{0}"],' \ '["=", "latest_report?", true]]'.format(node.name) else: query='["and",' \ '["=", "environment", "{0}"],' \ '["=", "certname", "{1}"],' \ '["=", "latest_report?", true]]'.format( env, node.name) reports = get_or_abort(puppetdb.reports, query=query) try: report = next(reports) hash_ = report.hash_ except __HOLE__: abort(404) return redirect( url_for('report', env=env, node_name=node_name, report_id=hash_))
StopIteration
dataset/ETHPy150Open voxpupuli/puppetboard/puppetboard/app.py/report_latest
@app.route('/report/<node_name>/<report_id>', defaults={'env': app.config['DEFAULT_ENVIRONMENT']}) @app.route('/<env>/report/<node_name>/<report_id>') def report(env, node_name, report_id): """Displays a single report including all the events associated with that report and their status. The report_id may be the puppetdb's report hash or the configuration_version. This allows for better integration into puppet-hipchat. :param env: Search for reports in this environment :type env: :obj:`string` :param node_name: Find the reports whose certname match this value :type node_name: :obj:`string` :param report_id: The hash or the configuration_version of the desired report :type report_id: :obj:`string` """ envs = environments() check_env(env, envs) if env == '*': query = '["and", ["=", "certname", "{0}"],' \ '["or", ["=", "hash", "{1}"], ["=", "configuration_version", "{1}"]]]'.format( node_name, report_id) else: query = '["and", ["=", "environment", "{0}"], ["=", "certname", "{1}"],' \ '["or", ["=", "hash", "{2}"], ["=", "configuration_version", "{2}"]]]'.format( env, node_name, report_id) reports = puppetdb.reports(query=query) try: report = next(reports) except __HOLE__: abort(404) return render_template( 'report.html', report=report, events=yield_or_stop(report.events()), logs=report.logs, metrics=report.metrics, envs=envs, current_env=env)
StopIteration
dataset/ETHPy150Open voxpupuli/puppetboard/puppetboard/app.py/report
def from_url(self, url): """extract host and port from an URL string""" parts = urlparse.urlsplit(url) if parts.scheme != "socket": raise SerialException( 'expected a string in the form ' '"socket://<host>:<port>[?logging={debug|info|warning|error}]": ' 'not starting with socket:// ({!r})'.format(parts.scheme)) try: # process options now, directly altering self for option, values in urlparse.parse_qs(parts.query, True).items(): if option == 'logging': logging.basicConfig() # XXX is that good to call it here? self.logger = logging.getLogger('pySerial.socket') self.logger.setLevel(LOGGER_LEVELS[values[0]]) self.logger.debug('enabled logging') else: raise ValueError('unknown option: {!r}'.format(option)) if not 0 <= parts.port < 65536: raise ValueError("port not in range 0...65535") except __HOLE__ as e: raise SerialException( 'expected a string in the form ' '"socket://<host>:<port>[?logging={debug|info|warning|error}]": {}'.format(e)) return (parts.hostname, parts.port) # - - - - - - - - - - - - - - - - - - - - - - - -
ValueError
dataset/ETHPy150Open pyserial/pyserial/serial/urlhandler/protocol_socket.py/Serial.from_url
def read(self, size=1): """\ Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read. """ if not self.is_open: raise portNotOpenError read = bytearray() timeout = self._timeout while len(read) < size: try: start_time = time.time() ready, _, _ = select.select([self._socket], [], [], timeout) # If select was used with a timeout, and the timeout occurs, it # returns with empty lists -> thus abort read operation. # For timeout == 0 (non-blocking operation) also abort when # there is nothing to read. if not ready: break # timeout buf = self._socket.recv(size - len(read)) # read should always return some data as select reported it was # ready to read when we get to this point, unless it is EOF if not buf: raise SerialException('socket disconnected') read.extend(buf) if timeout is not None: timeout -= time.time() - start_time if timeout <= 0: break except socket.timeout: # timeout is used for write support, just go reading again pass except socket.error as e: # connection fails -> terminate loop raise SerialException('connection failed ({})'.format(e)) except __HOLE__ as e: # this is for Python 3.x where select.error is a subclass of # OSError ignore EAGAIN errors. all other errors are shown if e.errno != errno.EAGAIN: raise SerialException('read failed: {}'.format(e)) except select.error as e: # this is for Python 2.x # ignore EAGAIN errors. all other errors are shown # see also http://www.python.org/dev/peps/pep-3151/#select if e[0] != errno.EAGAIN: raise SerialException('read failed: {}'.format(e)) return bytes(read)
OSError
dataset/ETHPy150Open pyserial/pyserial/serial/urlhandler/protocol_socket.py/Serial.read
def get_x_y_estimated_beta(self): """ Reference: --------- http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams006.html#toc23 """ shape = (4, 4, 1) num_samples = 10 coefficient = 0.05 num_ft = shape[0] * shape[1] * shape[2] X = np.random.random((num_samples, num_ft)) beta = np.random.random((num_ft, 1)) # y = dot(X, beta) + noise y = np.dot(X, beta) + np.random.random((num_samples, 1)) * 0.0001 try: import spams # Normalization for X X = np.asfortranarray(X) X = np.asfortranarray(X - np.tile( np.mean(X, 0), (X.shape[0], 1))) X = spams.normalize(X) # Normalization for y y = np.asfortranarray(y) y = np.asfortranarray(y - np.tile( np.mean(y, 0), (y.shape[0], 1))) y = spams.normalize(y) weight0 = np.zeros((X.shape[1], y.shape[1]), dtype=np.float64, order="FORTRAN") param = {'numThreads': 1, 'verbose': True, 'lambda1': coefficient, 'it0': 10, 'max_it': 200, 'L0': 0.1, 'tol': 1e-3, 'intercept': False, 'pos': False} param['compute_gram'] = True param['loss'] = 'square' param['regul'] = 'l2' (weight_ridge, optim_info) = spams.fistaFlat(y, X, weight0, True, **param) param['regul'] = 'l1' (weight_l1, optim_info) = spams.fistaFlat(y, X, weight0, True, **param) # print "X = ", repr(X) # print "y = ", repr(y) # print "weight_ridge =", repr(weight_ridge) # print "weight_l1 =", repr(weight_l1) except __HOLE__: # TODO: Don't use print directly. print "Cannot import spams. Default values will be used." X = np.asarray([ [ 0.26856766, 0.30620391, 0.26995615, 0.3806023 , 0.41311465, -0.24685479, 0.34108499, -0.22786788, -0.2267594 , 0.30325884, -0.00382229, 0.3503643 , 0.21786749, -0.15275043, -0.24074157, -0.25639825], [-0.14305316, -0.19553497, 0.45250255, -0.17317269, -0.00304901, 0.43838073, 0.01606735, 0.09267714, 0.47763275, 0.23234948, 0.38694597, 0.72591941, 0.21028899, 0.42317021, 0.276003 , 0.42198486], [-0.08738645, 0.10795947, 0.45813373, -0.34232048, 0.43621128, -0.36984753, 0.16555311, 0.55188325, -0.48169657, -0.52844883, 0.15140672, 0.06074575, -0.36873621, 0.23679974, 0.47195386, -0.09728514], [ 0.16461237, 0.30299873, -0.32108348, -0.53918274, 0.02287831, 0.01105383, -0.11124968, 0.18629018, 0.30017151, -0.04217922, -0.46066699, -0.33612491, -0.52611772, -0.25397362, -0.27198468, -0.42883518], [ 0.4710195 , 0.35047152, -0.07990029, 0.34911632, 0.07206932, -0.20270895, -0.0684226 , -0.18958745, -0.08433092, 0.14453963, 0.28095469, -0.35894296, 0.11680455, -0.37598039, -0.28331446, -0.00825299], [-0.420528 , -0.74469306, 0.22732681, 0.34362884, 0.16006124, -0.29691759, 0.27029047, -0.31077084, -0.048071 , 0.36495065, 0.49364453, -0.16903801, 0.07577839, -0.36492748, 0.09448284, -0.37055486], [ 0.4232946 , -0.26373387, -0.01430445, -0.2353587 , -0.5005603 , -0.35899458, 0.32702596, -0.38311949, 0.31862621, -0.31931012, -0.41836583, -0.02855145, -0.50315227, -0.34807958, -0.05252361, 0.11551424], [-0.28443208, 0.07677476, -0.23720305, 0.11056299, -0.48742565, 0.36772457, -0.56074202, 0.3145033 , -0.22811763, 0.36482173, -0.01786535, -0.02929555, 0.35635411, 0.45838473, 0.45853286, 0.00159594], [-0.45779277, 0.10020579, -0.30873257, 0.28114072, 0.18120182, 0.33333004, 0.17928387, 0.31572323, 0.32902088, -0.10396976, -0.33296829, 0.05277326, 0.27139148, 0.18653329, 0.06068255, -0.01942451], [ 0.06569833, -0.04065228, -0.44669538, -0.17501657, -0.29450165, 0.32483427, -0.55889145, -0.34973144, -0.35647584, -0.41601239, -0.07926316, -0.26784983, 0.14952119, 0.19082353, -0.51309079, 0.6416559 ]]) y = np.asarray([ [ 0.15809895], [ 0.69496971], [ 0.01214928], [-0.39826324], [-0.01682498], [-0.03372654], [-0.45148804], [ 0.21735376], [ 0.08795349], [-0.27022239]]) weight_ridge = np.asarray([ [ 0.038558 ], [ 0.12605106], [ 0.19115798], [ 0.07187217], [ 0.09472713], [ 0.14943554], [-0.01968095], [ 0.11695959], [ 0.15049031], [ 0.18930644], [ 0.26086626], [ 0.23243305], [ 0.17425178], [ 0.13200238], [ 0.11710994], [ 0.11272092]]) weight_l1 = np.asarray([ [ 0. ], [ 0.02664519], [ 0. ], [ 0. ], [ 0. ], [ 0.10357106], [ 0. ], [ 0.2103012 ], [ 0.00399881], [ 0.10815184], [ 0.32221254], [ 0.49350083], [ 0.21351531], [ 0. ], [ 0. ], [ 0. ]]) ret_data = {} ret_data['X'] = X ret_data['y'] = y ret_data['weight_ridge'] = weight_ridge ret_data['weight_l1'] = weight_l1 ret_data['coefficient'] = coefficient ret_data['shape'] = shape ret_data['num_samples'] = num_samples ret_data['num_ft'] = num_ft return ret_data
ImportError
dataset/ETHPy150Open neurospin/pylearn-parsimony/tests/spamsdata.py/SpamsGenerator.get_x_y_estimated_beta
def make_batches(data, batch_size=100): """ Split the data into minibatches of size batch_size This procedure generates subsamples ids for batches by only considering features that are active in the minibatch Args: data - the data to be split into minibatches (must be rank 2) batch_size - the size of the minibatches Returns: batches - a list: [(batch, subsample_ids) for batch in minibatchs] """ n = data.shape[0] perm = random.permutation(range(n)) i = 0 batches = [] while i < n: batch = perm[i:i+batch_size] i += batch_size batches.append(data[batch]) try: ids = [find((b.sum(0) != 0).A.flatten()) for b in batches] except __HOLE__: ids = [find((b.sum(0) != 0).flatten()) for b in batches] batches = [(b[:,i].toarray(), i) for b,i in zip(batches, ids)] return batches
AttributeError
dataset/ETHPy150Open theusual/kaggle-seeclickfix-ensemble/Miroslaw/sparserbm.py/make_batches
def _clean_column_name(name): try: return _clean_name_cache[name] except __HOLE__: clean = NON_ALPHA_REGEX.sub("_", START_BADCHAR_REGEX.sub("", END_BADCHAR_REGEX.sub("", name))) _clean_name_cache[name] = clean return clean
KeyError
dataset/ETHPy150Open datastax/python-driver/cassandra/query.py/_clean_column_name
@classmethod def from_message(cls, query_id, column_metadata, pk_indexes, cluster_metadata, query, prepared_keyspace, protocol_version): if not column_metadata: return PreparedStatement(column_metadata, query_id, None, query, prepared_keyspace, protocol_version) if pk_indexes: routing_key_indexes = pk_indexes else: routing_key_indexes = None first_col = column_metadata[0] ks_meta = cluster_metadata.keyspaces.get(first_col.keyspace_name) if ks_meta: table_meta = ks_meta.tables.get(first_col.table_name) if table_meta: partition_key_columns = table_meta.partition_key # make a map of {column_name: index} for each column in the statement statement_indexes = dict((c.name, i) for i, c in enumerate(column_metadata)) # a list of which indexes in the statement correspond to partition key items try: routing_key_indexes = [statement_indexes[c.name] for c in partition_key_columns] except __HOLE__: # we're missing a partition key component in the prepared pass # statement; just leave routing_key_indexes as None return PreparedStatement(column_metadata, query_id, routing_key_indexes, query, prepared_keyspace, protocol_version)
KeyError
dataset/ETHPy150Open datastax/python-driver/cassandra/query.py/PreparedStatement.from_message
def bind(self, values): """ Binds a sequence of values for the prepared statement parameters and returns this instance. Note that `values` *must* be: * a sequence, even if you are only binding one value, or * a dict that relates 1-to-1 between dict keys and columns .. versionchanged:: 2.6.0 :data:`~.UNSET_VALUE` was introduced. These can be bound as positional parameters in a sequence, or by name in a dict. Additionally, when using protocol v4+: * short sequences will be extended to match bind parameters with UNSET_VALUE * names may be omitted from a dict with UNSET_VALUE implied. .. versionchanged:: 3.0.0 method will not throw if extra keys are present in bound dict (PYTHON-178) """ if values is None: values = () proto_version = self.prepared_statement.protocol_version col_meta = self.prepared_statement.column_metadata # special case for binding dicts if isinstance(values, dict): values_dict = values values = [] # sort values accordingly for col in col_meta: try: values.append(values_dict[col.name]) except __HOLE__: if proto_version >= 4: values.append(UNSET_VALUE) else: raise KeyError( 'Column name `%s` not found in bound dict.' % (col.name)) value_len = len(values) col_meta_len = len(col_meta) if value_len > col_meta_len: raise ValueError( "Too many arguments provided to bind() (got %d, expected %d)" % (len(values), len(col_meta))) # this is fail-fast for clarity pre-v4. When v4 can be assumed, # the error will be better reported when UNSET_VALUE is implicitly added. if proto_version < 4 and self.prepared_statement.routing_key_indexes and \ value_len < len(self.prepared_statement.routing_key_indexes): raise ValueError( "Too few arguments provided to bind() (got %d, required %d for routing key)" % (value_len, len(self.prepared_statement.routing_key_indexes))) self.raw_values = values self.values = [] for value, col_spec in zip(values, col_meta): if value is None: self.values.append(None) elif value is UNSET_VALUE: if proto_version >= 4: self._append_unset_value() else: raise ValueError("Attempt to bind UNSET_VALUE while using unsuitable protocol version (%d < 4)" % proto_version) else: try: self.values.append(col_spec.type.serialize(value, proto_version)) except (TypeError, struct.error) as exc: actual_type = type(value) message = ('Received an argument of invalid type for column "%s". ' 'Expected: %s, Got: %s; (%s)' % (col_spec.name, col_spec.type, actual_type, exc)) raise TypeError(message) if proto_version >= 4: diff = col_meta_len - len(self.values) if diff: for _ in range(diff): self._append_unset_value() return self
KeyError
dataset/ETHPy150Open datastax/python-driver/cassandra/query.py/BoundStatement.bind
def label_for_value(self, value): key = self.rel.get_related_field().name try: obj = self.rel.model._default_manager.using(self.db).get(**{key: value}) except (__HOLE__, self.rel.model.DoesNotExist): return '' label = '&nbsp;<strong>{}</strong>' text = Truncator(obj).words(14, truncate='...') try: change_url = reverse( '%s:%s_%s_change' % ( self.admin_site.name, obj._meta.app_label, obj._meta.object_name.lower(), ), args=(obj.pk,) ) except NoReverseMatch: pass # Admin not registered for target model. else: text = format_html('<a href="{}">{}</a>', change_url, text) return format_html(label, text)
ValueError
dataset/ETHPy150Open django/django/django/contrib/admin/widgets.py/ForeignKeyRawIdWidget.label_for_value
def setup(): global app_paths pymodules = [] for app in frappe.get_all_apps(True): try: pymodules.append(frappe.get_module(app)) except __HOLE__: pass app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
ImportError
dataset/ETHPy150Open frappe/frappe/frappe/build.py/setup
def get_build_maps(): """get all build.jsons with absolute paths""" # framework js and css files build_maps = {} for app_path in app_paths: path = os.path.join(app_path, 'public', 'build.json') if os.path.exists(path): with open(path) as f: try: for target, sources in json.loads(f.read()).iteritems(): # update app path source_paths = [] for source in sources: if isinstance(source, list): s = frappe.get_pymodule_path(source[0], *source[1].split("/")) else: s = os.path.join(app_path, source) source_paths.append(s) build_maps[target] = source_paths except __HOLE__, e: print path print 'JSON syntax error {0}'.format(str(e)) return build_maps
ValueError
dataset/ETHPy150Open frappe/frappe/frappe/build.py/get_build_maps
@checker('.py') def check_style_and_encoding(fn, lines): encoding = 'ascii' for lno, line in enumerate(lines): if len(line) > 90: yield lno+1, "line too long" m = not_ix_re.search(line) if m: yield lno+1, '"' + m.group() + '"' if is_const_re.search(line): yield lno+1, 'using == None/True/False' if lno < 2: co = coding_re.search(line) if co: encoding = co.group(1) try: line.decode(encoding) except __HOLE__, err: yield lno+1, "not decodable: %s\n Line: %r" % (err, line) except LookupError, err: yield 0, "unknown encoding: %s" % encoding encoding = 'latin1'
UnicodeDecodeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Pygments-1.3.1/scripts/check_sources.py/check_style_and_encoding
def main(argv): try: gopts, args = getopt.getopt(argv[1:], "vi:") except getopt.GetoptError: print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0] return 2 opts = {} for opt, val in gopts: if opt == '-i': val = abspath(val) opts.setdefault(opt, []).append(val) if len(args) == 0: path = '.' elif len(args) == 1: path = args[0] else: print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0] return 2 verbose = '-v' in opts num = 0 out = cStringIO.StringIO() # TODO: replace os.walk run with iteration over output of # `svn list -R`. for root, dirs, files in os.walk(path): if '.svn' in dirs: dirs.remove('.svn') if '-i' in opts and abspath(root) in opts['-i']: del dirs[:] continue # XXX: awkward: for the Makefile call: don't check non-package # files for file headers in_pocoo_pkg = root.startswith('./pygments') for fn in files: fn = join(root, fn) if fn[:2] == './': fn = fn[2:] if '-i' in opts and abspath(fn) in opts['-i']: continue ext = splitext(fn)[1] checkerlist = checkers.get(ext, None) if not checkerlist: continue if verbose: print "Checking %s..." % fn try: f = open(fn, 'r') lines = list(f) except (IOError, __HOLE__), err: print "%s: cannot open: %s" % (fn, err) num += 1 continue for checker in checkerlist: if not in_pocoo_pkg and checker.only_pkg: continue for lno, msg in checker(fn, lines): print >>out, "%s:%d: %s" % (fn, lno, msg) num += 1 if verbose: print if num == 0: print "No errors found." else: print out.getvalue().rstrip('\n') print "%d error%s found." % (num, num > 1 and "s" or "") return int(num > 0)
OSError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Pygments-1.3.1/scripts/check_sources.py/main
@property def host(self): """ Rackspace uses a separate host for API calls which is only provided after an initial authentication request. If we haven't made that request yet, do it here. Otherwise, just return the management host. """ if not self.__host: # Initial connection used for authentication conn = self.conn_classes[self.secure](self.auth_host, self.port[self.secure]) conn.request( method='GET', url='/%s' % (AUTH_API_VERSION), headers={ 'X-Auth-User': self.user_id, 'X-Auth-Key': self.key } ) resp = conn.getresponse() if resp.status != httplib.NO_CONTENT: raise InvalidCredsError() headers = dict(resp.getheaders()) try: self.server_url = headers['x-server-management-url'] self.storage_url = headers['x-storage-url'] self.cdn_management_url = headers['x-cdn-management-url'] self.auth_token = headers['x-auth-token'] except __HOLE__: raise InvalidCredsError() scheme, server, self.request_path, param, query, fragment = ( urlparse.urlparse(getattr(self, self._url_key)) ) if scheme is "https" and self.secure is not True: raise InvalidCredsError() # Set host to where we want to make further requests to; self.__host = server conn.close() return self.__host
KeyError
dataset/ETHPy150Open cloudkick/libcloud/libcloud/common/rackspace.py/RackspaceBaseConnection.host
def __init__(self, element, spec): self.name = element.attrib['name'] self.require = [] for required in chain.from_iterable(element.findall('require')): if required.tag == 'type': continue data = {'enum': spec.enums, 'command': spec.commands}[required.tag] try: self.require.append(data[required.attrib['name']]) except __HOLE__: pass # TODO
KeyError
dataset/ETHPy150Open Dav1dde/glad/glad/parse.py/Extension.__init__
def __init__(self, element, spec): Extension.__init__(self, element, spec) self.spec = spec # not every spec has a ._remove member, but there shouldn't be a remove # tag without that member, if there is, blame me! for removed in chain.from_iterable(element.findall('remove')): if removed.tag == 'type': continue data = {'enum': spec.enums, 'command': spec.commands}[removed.tag] try: spec._remove.add(data[removed.attrib['name']]) except __HOLE__: pass # TODO self.number = tuple(map(int, element.attrib['number'].split('.'))) self.api = element.attrib['api']
KeyError
dataset/ETHPy150Open Dav1dde/glad/glad/parse.py/Feature.__init__
def check_key(request): """ Check to see if we already have an access_key stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to. """ try: access_key = request.session.get('oauth_token', None) if not access_key: return False except __HOLE__: return False return True
KeyError
dataset/ETHPy150Open marcelcaraciolo/foursquare/examples/django/example/djfoursquare/views.py/check_key
def get_constant_value(lib, name): try: return lib.__dict__[name] except __HOLE__: return None
KeyError
dataset/ETHPy150Open cgarrard/osgeopy-code/ospybook/ospybook/__init__.py/get_constant_value
def validate(self, arguments): try: # Base class will check if argument is present or ensure it has its default value super(IntegerParameter, self).validate(arguments) if arguments[self.name]: newVal = int(arguments[self.name]) # Validate range if defined if self.minValue is not None and newVal < self.minValue: raise RangeError("Argument \"%s\"=%d is less than minimum: %d" % ( self.name, newVal, self.minValue)) if self.maxValue is not None and self.maxValue < newVal: raise RangeError("Argument \"%s\"=%d is more than maximum: %d" % ( self.name, self.maxValue, newVal)) arguments[self.name] = newVal except RangeError, e: raise e except __HOLE__, e: raise e
ValidationError
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/jobs.py/IntegerParameter.validate
def _load(name, motherClass): try: moduleName, cls = name.rsplit(".", 1) except __HOLE__: raise JobTypeImportError("Invalid job type name '%s'. It should be like 'some.module.JobClassName'." % name) try: module = __import__(moduleName, fromlist=[cls]) except ImportError, error: traceback.print_exc() raise JobTypeImportError("No module '%s' on PYTHONPATH:\n%s. (%s)" % (moduleName, "\n".join(sys.path), error)) try: jobtype = getattr(module, cls) except AttributeError: raise JobTypeImportError("No such job type '%s' defined in module '%s'." % (cls, name)) if not issubclass(jobtype, motherClass): motherClassName = "%s.%s" % (motherClass.__module__, motherClass.__name__) raise JobTypeImportError("%s (loaded as '%s') is not a valid %s." % (jobtype, name, motherClassName)) return jobtype
ValueError
dataset/ETHPy150Open mikrosimage/OpenRenderManagement/src/puliclient/jobs.py/_load
def get_package_value(key, default_value=None): """ Get value from default/user package configuration settings. """ try: config = sublime.load_settings(S.FILE_PACKAGE_SETTINGS) if config and config.has(key): return config.get(key) except __HOLE__: sublime.set_timeout(lambda: load_package_values(), 0) if S.CONFIG_PACKAGE: if key in S.CONFIG_PACKAGE: return S.CONFIG_PACKAGE[key] return default_value
RuntimeError
dataset/ETHPy150Open martomo/SublimeTextXdebug/xdebug/config.py/get_package_value
def get_project_value(key, default_value=None): """ Get value from project configuration settings. """ # Load project coniguration settings try: load_project_values() except __HOLE__: sublime.set_timeout(lambda: load_project_values(), 0) # Find value in project configuration if S.CONFIG_PROJECT: if key in S.CONFIG_PROJECT: return S.CONFIG_PROJECT[key] # Otherwise use default value return default_value
RuntimeError
dataset/ETHPy150Open martomo/SublimeTextXdebug/xdebug/config.py/get_project_value
def test_2(): try: from msmbuilder.msm import MarkovStateModel except __HOLE__ as e: raise SkipTest(e) X = [np.random.randint(2, size=10), np.random.randint(2, size=11)] out = fit_and_score_estimator( MarkovStateModel(), {'verbose': False}, cv=2, X=X, y=None, verbose=0) np.testing.assert_array_equal(out['n_train_samples'], [11, 10]) np.testing.assert_array_equal(out['n_test_samples'], [10, 11])
ImportError
dataset/ETHPy150Open msmbuilder/osprey/osprey/tests/test_fit_estimator.py/test_2
def minCut_dp(self, s): """ dp a b a b b b a b b a b a i k if s[i:k+1] is palindrome, #cut is 0; otherwise cut s[i:k+1] into palindrome, the #cut: cut the s[i:k+1] to two parts cut the left part into palindrome, #cut is dp[i, j] cut the right part into palindrome, #cut is dp[j+1, k+1] find the minimum for above dp[i, n+1] = min(dp[i, j]+dp[j, k+1]+1) when drawing the matrix, you will find it difficult to construct it at one shot (especially, vertical line) To avoid TLE, use 1-d dp instead of 2-d dp D[i] represents #cut for s[i:length+1] if s[i:j] is palindrome and we need #cut for s[j:] is D[j], then for minimum: D[i] = min(D[j+1]+1) for all j To avoid TLE, use dp for determination of palindrome Determine s[i:k+1] is palindrome: P[i, k+1] = P[i+1, k] && s[i]==s[k] * another algorithm is dfs with global_min * to tell s[i:k+1] whether it is palindrome can be optimized by dp :param s: str :return: int """ if not s: return 0 length = len(s) # palindrome dp P = [[False for _ in xrange(length+1)] for _ in xrange(length+1)] for i in xrange(length+1): try: P[i][i] = True P[i][i+1] = True except IndexError: pass for i in xrange(length, -1, -1): for j in xrange(i+2, length+1): try: P[i][j] = P[i+1][j-1] and s[i] == s[j-1] except __HOLE__: P[i][j] = True # min cut dp D = [length-i-1 for i in xrange(length)] # max is all cut for i in xrange(length-1, -1, -1): if P[i][length]: D[i] = 0 else: for j in xrange(i+1, length): if P[i][j]: D[i] = min(D[i], D[j]+1) return D[0]
IndexError
dataset/ETHPy150Open algorhythms/LeetCode/132 Palindrome Partitioning II.py/Solution.minCut_dp
def minCut_TLE(self, s): """ dp a b a b b b a b b a b a i k if s[i:k+1] is palindrome, #cut is 0; otherwise cut s[i:k+1] into palindrome, the #cut: cut the s[i:k+1] to two parts cut the left part into palindrome, #cut is dp[i, j] cut the right part into palindrome, #cut is dp[j+1, k+1] find the minimum for above dp[i, n+1] = min(dp[i, j]+dp[j, k+1]+1) when drawing the matrix, you will find it difficult to construct it at one shot (especially, vertical line) * another algorithm is dfs with global_min * to tell s[i:k+1] whether it is palindrome can be optimized by dp :param s: str :return: int """ if not s: return 0 length = len(s) dp = [[1<<32-1 for _ in xrange(length+1)] for _ in xrange(length+1)] for i in xrange(length+1): try: dp[i][i] = 0 dp[i][i+1] = 0 except __HOLE__: pass for i in xrange(length, -1, -1): for k in xrange(i, length+1): if self.is_palindrome(s[i:k]): dp[i][k] = 0 else: dp[i][k] = min(1+dp[i][j]+dp[j][k] for j in xrange(i+1, k)) return dp[0][length]
IndexError
dataset/ETHPy150Open algorhythms/LeetCode/132 Palindrome Partitioning II.py/Solution.minCut_TLE
def minCut_TLE2(self, s): """ dp a b a b b b a b b a b a i k if s[i:k+1] is palindrome, #cut is 0; otherwise cut s[i:k+1] into palindrome, the #cut: cut the s[i:k+1] to two parts cut the left part into palindrome, #cut is dp[i, j] cut the right part into palindrome, #cut is dp[j+1, k+1] find the minimum for above dp[i, n+1] = min(dp[i, j]+dp[j, k+1]+1) when drawing the matrix, you will find it difficult to construct it at one shot (especially, vertical line) Determine s[i:k+1] is palindrome: dp2[i, k+1] = dp2[i+1, k] && s[i]==s[k] * another algorithm is dfs with global_min * to tell s[i:k+1] whether it is palindrome can be optimized by dp :param s: str :return: int """ if not s: return 0 length = len(s) # palindrome dp dp2 = [[False for _ in xrange(length+1)] for _ in xrange(length+1)] for i in xrange(length+1): try: dp2[i][i] = True dp2[i][i+1] = True except IndexError: pass for i in xrange(length, -1, -1): for j in xrange(i+2, length+1): try: dp2[i][j] = dp2[i+1][j-1] and s[i] == s[j-1] except IndexError: dp2[i][j] = True # min cut dp dp = [[1<<32-1 for _ in xrange(length+1)] for _ in xrange(length+1)] for i in xrange(length+1): try: dp[i][i] = 0 dp[i][i+1] = 0 except __HOLE__: pass for i in xrange(length, -1, -1): for k in xrange(i, length+1): if dp2[i][k]: dp[i][k] = 0 else: dp[i][k] = min(1+dp[i][j]+dp[j][k] for j in xrange(i+1, k)) return dp[0][length]
IndexError
dataset/ETHPy150Open algorhythms/LeetCode/132 Palindrome Partitioning II.py/Solution.minCut_TLE2
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ source = None if loader is not None and hasattr(loader, "get_source"): try: source = loader.get_source(module_name) except ImportError: # Traceback (most recent call last): # File "/Users/dcramer/Development/django-sentry/sentry/client/handlers.py", line 31, in emit # get_client().create_from_record(record, request=request) # File "/Users/dcramer/Development/django-sentry/sentry/client/base.py", line 325, in create_from_record # data['__sentry__']['frames'] = varmap(shorten, get_stack_info(stack)) # File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 112, in get_stack_info # pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name) # File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 24, in get_lines_from_file # source = loader.get_source(module_name) # File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 287, in get_source # fullname = self._fix_name(fullname) # File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 262, in _fix_name # "module %s" % (self.fullname, fullname)) # ImportError: Loader for module cProfile cannot handle module __main__ source = None if source is not None: source = source.splitlines() if source is None: try: source = linecache.getlines(filename) except (__HOLE__, IOError): return None, None, None if not source: return None, None, None lower_bound = max(0, lineno - context_lines) upper_bound = min(lineno + 1 + context_lines, len(source)) try: pre_context = [line.strip('\r\n') for line in source[lower_bound:lineno]] context_line = source[lineno].strip('\r\n') post_context = [line.strip('\r\n') for line in source[(lineno + 1):upper_bound]] except IndexError: # the file may have changed since it was loaded into memory return None, None, None return pre_context, context_line, post_context
OSError
dataset/ETHPy150Open getsentry/raven-python/raven/utils/stacks.py/get_lines_from_file
def _SequentialApply(self, func, args_iterator, exception_handler, caller_id, arg_checker, should_return_results, fail_on_error): """Performs all function calls sequentially in the current thread. No other threads or processes will be spawned. This degraded functionality is used when the multiprocessing module is not available or the user requests only one thread and one process. """ # Create a WorkerThread to handle all of the logic needed to actually call # the function. Note that this thread will never be started, and all work # is done in the current thread. worker_thread = WorkerThread(None, False) args_iterator = iter(args_iterator) # Count of sequential calls that have been made. Used for producing # suggestion to use gsutil -m. sequential_call_count = 0 while True: # Try to get the next argument, handling any exceptions that arise. try: args = args_iterator.next() except __HOLE__, e: break except Exception, e: # pylint: disable=broad-except _IncrementFailureCount() if fail_on_error: raise else: try: exception_handler(self, e) except Exception, _: # pylint: disable=broad-except self.logger.debug( 'Caught exception while handling exception for %s:\n%s', func, traceback.format_exc()) continue sequential_call_count += 1 if sequential_call_count == OFFER_GSUTIL_M_SUGGESTION_THRESHOLD: # Output suggestion near beginning of run, so user sees it early and can # ^C and try gsutil -m. self._MaybeSuggestGsutilDashM() if arg_checker(self, args): # Now that we actually have the next argument, perform the task. task = Task(func, args, caller_id, exception_handler, should_return_results, arg_checker, fail_on_error) worker_thread.PerformTask(task, self) if sequential_call_count >= gslib.util.GetTermLines(): # Output suggestion at end of long run, in case user missed it at the # start and it scrolled off-screen. self._MaybeSuggestGsutilDashM() # If the final iterated argument results in an exception, and that # exception modifies shared_attrs, we need to publish the results. worker_thread.shared_vars_updater.Update(caller_id, self) # pylint: disable=g-doc-args
StopIteration
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/command.py/Command._SequentialApply
def run(self): num_tasks = 0 cur_task = None last_task = None try: args_iterator = iter(self.args_iterator) while True: try: args = args_iterator.next() except __HOLE__, e: break except Exception, e: # pylint: disable=broad-except _IncrementFailureCount() if self.fail_on_error: self.iterator_exception = e raise else: try: self.exception_handler(self.cls, e) except Exception, _: # pylint: disable=broad-except self.cls.logger.debug( 'Caught exception while handling exception for %s:\n%s', self.func, traceback.format_exc()) self.shared_variables_updater.Update(self.caller_id, self.cls) continue if self.arg_checker(self.cls, args): num_tasks += 1 last_task = cur_task cur_task = Task(self.func, args, self.caller_id, self.exception_handler, self.should_return_results, self.arg_checker, self.fail_on_error) if last_task: self.task_queue.put(last_task) except Exception, e: # pylint: disable=broad-except # This will also catch any exception raised due to an error in the # iterator when fail_on_error is set, so check that we failed for some # other reason before claiming that we had an unknown exception. if not self.iterator_exception: self.unknown_exception = e finally: # We need to make sure to update total_tasks[caller_id] before we enqueue # the last task. Otherwise, a worker can retrieve the last task and # complete it, then check total_tasks and determine that we're not done # producing all before we update total_tasks. This approach forces workers # to wait on the last task until after we've updated total_tasks. total_tasks[self.caller_id] = num_tasks if not cur_task: # This happens if there were zero arguments to be put in the queue. cur_task = Task(None, ZERO_TASKS_TO_DO_ARGUMENT, self.caller_id, None, None, None, None) self.task_queue.put(cur_task) # It's possible that the workers finished before we updated total_tasks, # so we need to check here as well. _NotifyIfDone(self.caller_id, caller_id_finished_count.get(self.caller_id))
StopIteration
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/command.py/ProducerThread.run
def GetFailureCount(): """Returns the number of failures processed during calls to Apply().""" try: if isinstance(failure_count, int): return failure_count else: # It's a multiprocessing.Value() of type 'i'. return failure_count.value except __HOLE__: # If it wasn't initialized, Apply() wasn't called. return 0
NameError
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/command.py/GetFailureCount
def ResetFailureCount(): """Resets the failure_count variable to 0 - useful if error is expected.""" try: global failure_count if isinstance(failure_count, int): failure_count = 0 else: # It's a multiprocessing.Value() of type 'i'. failure_count = multiprocessing.Value('i', 0) except __HOLE__: # If it wasn't initialized, Apply() wasn't called. pass
NameError
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/command.py/ResetFailureCount
def get_column_letter(idx,): """Convert a column index into a column letter (3 -> 'C') """ try: return _STRING_COL_CACHE[idx] except __HOLE__: raise ValueError("Invalid column index {0}".format(idx))
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/utils/__init__.py/get_column_letter
def column_index_from_string(str_col): """Convert a column name into a numerical index ('A' -> 1) """ # we use a function argument to get indexed name lookup try: return _COL_STRING_CACHE[str_col.upper()] except __HOLE__: raise ValueError("{0} is not a valid column name".format(str_col))
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/utils/__init__.py/column_index_from_string
def attach_upload(self, request, resource_name, pk, **kwargs): """Attaches uploaded files to the resource""" try: obj = self.cached_obj_get( request=request, pk=pk, **self.remove_api_resource_names(kwargs)) except __HOLE__: return http.HttpNotFound() except MultipleObjectsReturned: return http.HttpMultipleChoices( "More than one resource is found at this URI.") for field_name in getattr(self._meta, "uploads", []): uploaded_file = request.FILES.get(field_name, None) if uploaded_file is not None: setattr(obj, field_name, uploaded_file) obj.save() bundle = self.build_bundle(obj=obj, request=request) bundle = self.full_dehydrate(bundle) bundle = self.alter_detail_data_to_serialize(request, bundle) return self.create_response(request, bundle, http.HttpAccepted)
ObjectDoesNotExist
dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-tastytools/tastytools/resources.py/ModelResource.attach_upload
def create_test_resource(self, force=False, *args, **kwargs): force = force or {} try: return self._meta.testdata.create_test_resource(force=force, *args, **kwargs) except __HOLE__ as e: msg = "%s: Did you forget to define a testdata class for %s?" msg %= (e, self.__class__.__name__) raise Exception(msg)
AttributeError
dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-tastytools/tastytools/resources.py/ModelResource.create_test_resource
def get_testdata_data_view(self, request, api_name=None, resource_name=None): if self._meta.testdata is not None: output = { 'POST': self._meta.testdata.post, 'GET': self._meta.testdata.get } requested_type = request.GET.get('type', 'False') try: output = output[requested_type.upper()] except __HOLE__: pass response_class = HttpResponse else: output = { 'error': 'missing api' } response_class = http.HttpBadRequest return self.create_response(request, output, response_class=response_class)
KeyError
dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-tastytools/tastytools/resources.py/ModelResource.get_testdata_data_view
def root(self, request, url): """ Handles main URL routing for the databrowse app. `url` is the remainder of the URL -- e.g. 'objects/3'. """ # Delegate to the appropriate method, based on the URL. if url is None: return self.main_view(request) try: plugin_name, rest_of_url = url.split('/', 1) except ValueError: # need more than 1 value to unpack plugin_name, rest_of_url = url, None try: plugin = self.plugins[plugin_name] except __HOLE__: raise http.Http404('A plugin with the requested name ' 'does not exist.') return plugin.model_view(request, self, rest_of_url)
KeyError
dataset/ETHPy150Open Alir3z4/django-databrowse/django_databrowse/sites.py/ModelDatabrowse.root
def model_page(self, request, app_label, model_name, rest_of_url=None): """ Handles the model-specific functionality of the databrowse site, delegating<to the appropriate ModelDatabrowse class. """ try: model = get_model(app_label, model_name) except LookupError: model = None if model is None: raise http.Http404("App %r, model %r, not found." % (app_label, model_name)) try: databrowse_class = self.registry[model] except __HOLE__: raise http.Http404("This model exists but has not been registered " "with databrowse.") return databrowse_class(model, self).root(request, rest_of_url)
KeyError
dataset/ETHPy150Open Alir3z4/django-databrowse/django_databrowse/sites.py/DatabrowseSite.model_page
def __CastDate(self, values): """Cast DATE values (year/month/day) from input (to datetime.datetime). Casts DATE input values formulated as ISO string or time tuple inputs. Args: values: either a single string with ISO time representation or 3 integer valued date tuple (year, month, day). Returns: datetime.datetime value parsed from the input values. """ if len(values) == 1: value = self.__EncodeIfNeeded(values[0]) if isinstance(value, str): try: time_tuple = time.strptime(value, '%Y-%m-%d')[0:6] except ValueError, err: self.__CastError('DATE', values, err) else: self.__CastError('DATE', values, 'Single input value not a string') elif len(values) == 3: time_tuple = (values[0], values[1], values[2], 0, 0, 0) else: self.__CastError('DATE', values, 'function takes 1 string or 3 integer values') try: return datetime.datetime(*time_tuple) except __HOLE__, err: self.__CastError('DATE', values, err)
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/ext/gql/__init__.py/GQL.__CastDate
def __CastTime(self, values): """Cast TIME values (hour/min/sec) from input (to datetime.datetime). Casts TIME input values formulated as ISO string or time tuple inputs. Args: values: either a single string with ISO time representation or 1-4 integer valued time tuple (hour), (hour, minute), (hour, minute, second), (hour, minute, second, microsec). Returns: datetime.datetime value parsed from the input values. """ if len(values) == 1: value = self.__EncodeIfNeeded(values[0]) if isinstance(value, str): try: time_tuple = time.strptime(value, '%H:%M:%S') except __HOLE__, err: self.__CastError('TIME', values, err) time_tuple = (1970, 1, 1) + time_tuple[3:] time_tuple = time_tuple[0:6] elif isinstance(value, int): time_tuple = (1970, 1, 1, value) else: self.__CastError('TIME', values, 'Single input value not a string or integer hour') elif len(values) <= 4: time_tuple = (1970, 1, 1) + tuple(values) else: self.__CastError('TIME', values, 'function takes 1 to 4 integers or 1 string') try: return datetime.datetime(*time_tuple) except ValueError, err: self.__CastError('TIME', values, err)
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/ext/gql/__init__.py/GQL.__CastTime
def __CastDatetime(self, values): """Cast DATETIME values (string or tuple) from input (to datetime.datetime). Casts DATETIME input values formulated as ISO string or datetime tuple inputs. Args: values: either a single string with ISO representation or 3-7 integer valued time tuple (year, month, day, ...). Returns: datetime.datetime value parsed from the input values. """ if len(values) == 1: value = self.__EncodeIfNeeded(values[0]) if isinstance(value, str): try: time_tuple = time.strptime(str(value), '%Y-%m-%d %H:%M:%S')[0:6] except __HOLE__, err: self.__CastError('DATETIME', values, err) else: self.__CastError('DATETIME', values, 'Single input value not a string') else: time_tuple = values try: return datetime.datetime(*time_tuple) except ValueError, err: self.__CastError('DATETIME', values, err)
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/ext/gql/__init__.py/GQL.__CastDatetime
def Run(self, *args, **keyword_args): """Runs this query. Similar to datastore.Query.Run. Assumes that limit == -1 or > 0 Args: args: arguments used to bind to references in the compiled query object. keyword_args: dictionary-based arguments (for named parameters). Returns: A list of results if a query count limit was passed. A result iterator if no limit was given. """ bind_results = self.Bind(args, keyword_args) offset = 0 if self.__offset != -1: offset = self.__offset if self.__limit == -1: it = bind_results.Run() try: for i in xrange(offset): it.next() except __HOLE__: pass return it else: res = bind_results.Get(self.__limit, offset) return res
StopIteration
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/ext/gql/__init__.py/GQL.Run
def __Literal(self): """Parse literals from our token list. Returns: The parsed literal from the input string (currently either a string, integer, or floating point value). """ logging.log(LOG_LEVEL, 'Try Literal') literal = None try: literal = int(self.__symbols[self.__next_symbol]) except ValueError: pass else: self.__next_symbol += 1 if literal is None: try: literal = float(self.__symbols[self.__next_symbol]) except __HOLE__: pass else: self.__next_symbol += 1 if literal is None: literal = self.__AcceptRegex(self.__quoted_string_regex) if literal: literal = literal[1:-1].replace("''", "'") if literal is None: if self.__Accept('TRUE'): literal = True elif self.__Accept('FALSE'): literal = False if literal is not None: return Literal(literal) if self.__Accept('NULL'): return Literal(None) else: return None
ValueError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/ext/gql/__init__.py/GQL.__Literal
@idiokit.stream def _collect_set(): result_set = set() while True: try: value = yield idiokit.next() except __HOLE__: break result_set.add(value) idiokit.stop(result_set)
StopIteration
dataset/ETHPy150Open abusesa/abusehelper/abusehelper/core/transformation.py/_collect_set
def load_data(self): try: data = self.request.get_signed_cookie(self.prefix) except __HOLE__: data = None except BadSignature: raise SuspiciousOperation('WizardView cookie manipulated') if data is None: return None return json.loads(data, cls=json.JSONDecoder)
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/formtools/wizard/storage/cookie.py/CookieStorage.load_data