function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def link_or_copy(src, dst): """link_or_copy(src:str, dst:str) -> None Tries to create a hard link to a file. If it is not possible, it will copy file src to dst """ # Links if possible, but we're across devices, we need to copy. try: os.link(src, dst) except __HOLE__, e: if e.errno == 18: # Across-device linking is not possible. Let's copy. shutil.copyfile(src, dst) else: raise e
OSError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/system/linux.py/link_or_copy
def test3(self): """ Test if origin of link_or_copy'ed file is deleteable. """ import tempfile import os (fd1, name1) = tempfile.mkstemp() os.close(fd1) (fd2, name2) = tempfile.mkstemp() os.close(fd2) os.unlink(name2) link_or_copy(name1, name2) try: os.unlink(name1) except __HOLE__: self.fail("Should not throw") os.unlink(name2)
OSError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/system/linux.py/TestLinux.test3
def cache_data(line): args = shlex.split(line) reqid = args[0] cached = reqid in Request.cache._cached_reqs if reqid in Request.cache._last_used: last_used = Request.cache._last_used[reqid] else: last_used = 'NOT IN _last_used' in_all = reqid in Request.cache.all_ids in_unmangled = reqid in Request.cache.unmangled_ids try: ordered_ids_pos = Request.cache.ordered_ids.index(reqid) except __HOLE__: ordered_ids_pos = 'Not in ordered_ids' in_inmem = reqid in Request.cache.inmem_reqs print '' print 'Cache data about request %s ----------' % reqid print 'Cahced: %s' % cached print 'Last used: %s' % last_used print 'In all_ids: %s' % in_all print 'In unmangled: %s' % in_unmangled print 'Ordered id pos: %s' % ordered_ids_pos print 'Is inmem: %s' % in_inmem print ''
ValueError
dataset/ETHPy150Open roglew/pappy-proxy/pappyproxy/plugins/debug.py/cache_data
def pluralize(singular): """Return plural form of given lowercase singular word (English only). Based on ActiveState recipe http://code.activestate.com/recipes/413172/ >>> pluralize('') '' >>> pluralize('goose') 'geese' >>> pluralize('dolly') 'dollies' >>> pluralize('genius') 'genii' >>> pluralize('jones') 'joneses' >>> pluralize('pass') 'passes' >>> pluralize('zero') 'zeros' >>> pluralize('casino') 'casinos' >>> pluralize('hero') 'heroes' >>> pluralize('church') 'churches' >>> pluralize('x') 'xs' >>> pluralize('car') 'cars' """ if not singular: return '' plural = ABERRANT_PLURAL_MAP.get(singular) if plural: return plural root = singular try: if singular[-1] == 'y' and singular[-2] not in VOWELS: root = singular[:-1] suffix = 'ies' elif singular[-1] == 's': if singular[-2] in VOWELS: if singular[-3:] == 'ius': root = singular[:-2] suffix = 'i' else: root = singular[:-1] suffix = 'ses' else: suffix = 'es' elif singular[-2:] in ('ch', 'sh'): suffix = 'es' else: suffix = 's' except __HOLE__: suffix = 's' plural = root + suffix return plural
IndexError
dataset/ETHPy150Open haystack/eyebrowse-server/common/npl/pluralize.py/pluralize
def get_response(self): try: msg = smart_str(self.msg.decode()) except (__HOLE__,): msg = smart_str(self.msg) error = { 'success': False, 'data': { 'code': self.code, 'message': msg } } error.update(self.extra) response = self.RESPONSES[self.code]() response.content = json.dumps(error, separators=(',', ':')) return response
AttributeError
dataset/ETHPy150Open joestump/django-ajax/ajax/exceptions.py/AJAXError.get_response
def parse_workflow_config(rawactions): """Given a list of options from [ticket-workflow]""" required_attrs = { 'oldstates': [], 'newstate': '', 'name': '', 'label': '', 'default': 0, 'operations': [], 'permissions': [], } optional_attrs = { 'set_owner': [], 'set_resolution': [], } known_attrs = required_attrs.copy() known_attrs.update(optional_attrs) actions = defaultdict(dict) for option, value in rawactions: parts = option.split('.') name = parts[0] if len(parts) == 1: try: # Base name, of the syntax: old,states,here -> newstate oldstates, newstate = [x.strip() for x in value.split('->')] except __HOLE__: continue # Syntax error, a warning will be logged later actions[name]['oldstates'] = to_list(oldstates) actions[name]['newstate'] = newstate else: attribute = parts[1] if attribute not in known_attrs.keys() or \ isinstance(known_attrs[attribute], str): actions[name][attribute] = value elif isinstance(known_attrs[attribute], int): actions[name][attribute] = int(value) elif isinstance(known_attrs[attribute], list): actions[name][attribute] = to_list(value) for action, attributes in actions.items(): if 'label' not in attributes: if 'name' in attributes: # backwards-compatibility, #11828 attributes['label'] = attributes['name'] else: attributes['label'] = action.replace("_", " ").strip() for key, val in required_attrs.items(): attributes.setdefault(key, val) return actions
ValueError
dataset/ETHPy150Open edgewall/trac/trac/ticket/default_workflow.py/parse_workflow_config
def current_user_person(self): """https://familysearch.org/developers/docs/api/tree/Current_Tree_Person_resource""" try: url = self.collections["FSFT"]["response"]["collections"][0][ "links"]["current-user-person"]["href"] except __HOLE__: self.update_collection("FSFT") url = self.collections["FSFT"]["response"]["collections"][0][ "links"]["current-user-person"]["href"] return url
KeyError
dataset/ETHPy150Open AmEv7Fam/familysearch-python-sdk-opensource/familysearch/user.py/User.current_user_person
def current_user_history(self): """https://familysearch.org/developers/docs/api/users/Current_User_History_resource""" try: url = self.collections["FSFT"]["response"]["collections"][0][ "links"]["current-user-history"]["href"] except __HOLE__: self.update_collection("FSFT") url = self.collections["FSFT"]["response"]["collections"][0][ "links"]["current-user-history"]["href"] return url
KeyError
dataset/ETHPy150Open AmEv7Fam/familysearch-python-sdk-opensource/familysearch/user.py/User.current_user_history
def validate_port(confvar): """ Validate that the value of confvar is between [0, 65535]. Returns [(confvar, error_msg)] or [] """ port_val = confvar.get() error_res = [(confvar, 'Port should be an integer between 0 and 65535 (inclusive).')] try: port = int(port_val) if port < 0 or port > 65535: return error_res except __HOLE__: return error_res return [ ]
ValueError
dataset/ETHPy150Open cloudera/hue/desktop/core/src/desktop/lib/conf.py/validate_port
def _is_empty(self): if self.is_folder: try: dirs, files = default_storage.listdir(self.path) except __HOLE__: from mezzanine.core.exceptions import FileSystemEncodingChanged raise FileSystemEncodingChanged() if not dirs and not files: return True return False
UnicodeDecodeError
dataset/ETHPy150Open stephenmcd/filebrowser-safe/filebrowser_safe/base.py/FileObject._is_empty
def _applicable_fixture(self, fixture, user_id): """Determine if this fixture is applicable for given user id.""" is_public = fixture["is_public"] try: uid = fixture["properties"]["user_id"] except __HOLE__: uid = None return uid == user_id or is_public
KeyError
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/tests/api/openstack/test_images.py/ImageControllerWithGlanceServiceTest._applicable_fixture
def secure_project(self, secureops="secureops"): """Calling this does two things: It calls useradd to create a new Linux user, and it changes permissions on settings.py so only that user can access it. This is a necessary step before calling configure_apache() Pass in the path to the secureops binary, otherwise PATH is searched """ # Touch certian files and directories so they can be secured before # they're filled with sensitive information self._pre_secure() # Attempt to create a linux user, and change user permissions # of the settings.py and the sqlite database # Name the user after opus and the project name newname = "opus"+self.projectname command = [secureops, "-c", newname, ] # Set sensitive files appropriately settingsfile = os.path.join(self.projectdir, "settings.py") command.append(settingsfile) # Also secure log directory command.append(os.path.join(self.projectdir, "log")) # And the opus settings command.append(os.path.join(self.projectdir, "opussettings.json")) # And sqlite dir and file command.append(os.path.join(self.projectdir, "sqlite")) command.append(os.path.join(self.projectdir, "sqlite", "database.sqlite")) command.append(os.path.join(self.projectdir, "ssl.crt")) command.append(os.path.join(self.projectdir, "ssl.key")) command.append(os.path.join(self.projectdir, "run")) command.append(os.path.join(self.projectdir, "opus_secure_uploads")) # Set writable several directories under the requirements env command.append(os.path.join(self.projectdir, "env")) for d in (['bin'], ['include'], ['lib','python*','site-packages']): p = glob(os.path.join(self.projectdir, "env", *d))[0] command.append(p) log.info("Calling secure operation with arguments {0!r}".format(command)) log.debug("cwd: {0}".format(os.getcwd())) proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) output = proc.communicate()[0] ret = proc.wait() log.debug("Secure ops finished. Ret: {1}, Output: {0!r}".format(output, ret)) if ret: raise DeploymentException("Could not create user and/or change file permissions. {0}. Ret: {1}".format(output, ret)) # Also an important step: delete settings.pyc if it exists, which could # have sensitive information in it (although not likely, the usual # setup is to store settings in opussettings.json settingspyc = os.path.join(self.projectdir, "settings.pyc") if os.path.exists(settingspyc): try: os.unlink(settingspyc) except __HOLE__, e: raise DeploymentException("Couldn't delete settings.pyc! {0}".format(e)) # Generate a new secret key for the settings. One may have been set at # create time, but it should be considered public knowledge since the # permissions hadn't been set yet. self.config["SECRET_KEY"] = ''.join([random.choice( 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for _ in range(50)]) self.config.save()
IOError
dataset/ETHPy150Open bmbouter/Opus/opus/lib/deployer/__init__.py/ProjectDeployer.secure_project
def configure_apache(self, apache_conf_dir, httpport, sslport, servername_suffix, pythonpath="", secureops="secureops", ssl_crt=None, ssl_key=None, ssl_chain=None): """Configures apache to serve this Django project. apache_conf_dir should be apache's conf.d directory where a .conf file can be dropped httpport and sslport are used in the port part of the <VirtualHost> directive in the apache config file. These can be None to omit serving on that port/protocol. servername_suffix is a string that will be appended to the end of the project name for the apache ServerName directive ssl_crt and ssl_key, if specified, will be used in lieu of a self signed certificate. """ # Check if our dest file exists, so as not to overwrite it config_path = os.path.join(apache_conf_dir, "opus"+self.projectname+".conf") # Write out a wsgi config to the project dir wsgi_dir = os.path.join(self.projectdir, "wsgi") try: os.mkdir(wsgi_dir) except __HOLE__, e: import errno if e.errno != errno.EEXIST: raise # Directory already exists, no big deal if pythonpath: ppdirective = "sys.path.append({0!r})\n".format(pythonpath) else: ppdirective = "" envpath = glob(os.path.join(self.projectdir, "env", "lib", "python*", "site-packages"))[0] with open(os.path.join(wsgi_dir, "django.wsgi"), 'w') as wsgi: wsgi.write(""" import os import sys import site os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' os.environ['OPUS_SETTINGS_FILE'] = {settingspath!r} os.environ['CELERY_LOADER'] = 'django' {additionalpaths} # Any dependencies for installed applications go in here site.addsitedir({envpath!r}) # Needed so that apps can import their own things without having to know the # project name. sys.path.append({projectpath!r}) #import django.core.handlers.wsgi #application = django.core.handlers.wsgi.WSGIHandler() import opus.lib.profile application = opus.lib.profile.OpusWSGIHandler() """.format(projectname = self.projectname, projectpath = self.projectdir, additionalpaths = ppdirective, settingspath = os.path.join(self.projectdir, "opussettings.json"), envpath = envpath )) # Discover the group under which to run the daemon proceses. Should be # an unpriviliged group, try to discover what that is. for group in ('nogroup', 'nobody', ): try: groupinfo = grp.getgrnam(group) except KeyError: pass else: break else: raise DeploymentException("Couldn't guess the unprivileged group to use. Bailing") # Write out apache config # I know the following lines are confusing. Perhaps a TODO later would # be to push most of the templates out of the code with open(config_path, 'w') as config: config.write("""WSGIDaemonProcess {name} threads=4 processes=2 home={projectpath} maximum-requests=1000 user={user} group={group} display-name={projectname} """.format( name="opus"+self.projectname, user="opus"+self.projectname, group=group, projectname=self.projectname, projectpath=self.projectdir, )) for port in (httpport, sslport): if not port: continue if port == sslport: if not (ssl_crt and ssl_key): ssl_crt = os.path.join(self.projectdir, "ssl.crt") ssl_key = os.path.join(self.projectdir, "ssl.key") ssl_chain = "" else: ssl_chain = "SSLCertificateChainFile " + ssl_chain ssllines = """ SSLEngine On SSLCertificateFile {0} SSLCertificateKeyFile {1} {2} """.format(ssl_crt, ssl_key, ssl_chain) else: ssllines = "" config.write(""" <VirtualHost {namevirtualhost}> {ssllines} ServerName {projectname}{servername_suffix} Alias /adminmedia {adminmedia} Alias /media {mediadir} WSGIProcessGroup {name} WSGIApplicationGroup %{{GLOBAL}} WSGIScriptAlias / {wsgifile} <Directory {wsgidir}> Order allow,deny Allow from all </Directory> <Directory {mediadir}> Order allow,deny Allow from all </Directory> </VirtualHost> \n""".format( port=port, ssllines=ssllines, projectname=self.projectname, servername_suffix=servername_suffix, mediadir=os.path.join(self.projectdir, "media"), name="opus"+self.projectname, namevirtualhost="*:{0}".format(port), wsgidir=wsgi_dir, wsgifile=os.path.join(wsgi_dir,"django.wsgi"), adminmedia=os.path.join(__import__("django").__path__[0], 'contrib','admin','media'), )) # Restart apache gracefully proc = subprocess.Popen([secureops,"-r"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = proc.communicate()[0] ret = proc.wait() if ret: raise DeploymentException("Could not restart apache. {0}".format(output))
OSError
dataset/ETHPy150Open bmbouter/Opus/opus/lib/deployer/__init__.py/ProjectDeployer.configure_apache
def start_supervisord(self,secureops="secureops"): env = dict(os.environ) try: del env['DJANGO_SETTINGS_MODULE'] except __HOLE__: pass username = "opus"+self.projectname # Start it up log.info("Starting up supervisord for the project") proc = subprocess.Popen([secureops, '-s', username, self.projectdir, '-S'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, close_fds=True, ) output = proc.communicate()[0] ret = proc.wait() if ret: raise DeploymentException("Failed to start supervisord. Ret:{0}. Output:{1}".format(ret, output)) log.debug("Secure ops finished. Ret: {0}".format(ret))
KeyError
dataset/ETHPy150Open bmbouter/Opus/opus/lib/deployer/__init__.py/ProjectDeployer.start_supervisord
def _build_installer_in_docker(self, cluster, online_installer=None, unique=False): if online_installer is None: paTestOnlineInstaller = os.environ.get('PA_TEST_ONLINE_INSTALLER') online_installer = paTestOnlineInstaller is not None container_name = 'installer' installer_container = DockerCluster( container_name, [], DEFAULT_LOCAL_MOUNT_POINT, DEFAULT_DOCKER_MOUNT_POINT) try: installer_container.create_image( BASE_TD_DOCKERFILE_DIR, BASE_TD_IMAGE_NAME, BASE_IMAGE_NAME ) installer_container.start_containers( BASE_TD_IMAGE_NAME ) except DockerClusterException as e: installer_container.tear_down() self.testcase.fail(e.msg) try: shutil.copytree( prestoadmin.main_dir, os.path.join( installer_container.get_local_mount_dir(container_name), 'presto-admin'), ignore=shutil.ignore_patterns('tmp', '.git', 'presto*.rpm') ) # Pin pip to 7.1.2 because 8.0.0 removed support for distutils # installed projects, of which the system setuptools is one on our # Docker image. pip 8.0.1 or 8.0.2 replaced the error with a # deprecation warning, and also warns that Python 2.6 is # deprecated. While we still need to support Python 2.6, we'll pin # pip to a 7.x version, but we should revisit this once we no # longer need to support 2.6: # https://github.com/pypa/pip/issues/3384 installer_container.run_script_on_host( 'set -e\n' 'pip install --upgrade pip==7.1.2\n' 'pip install --upgrade wheel==0.23.0\n' 'pip install --upgrade setuptools==20.1.1\n' 'mv %s/presto-admin ~/\n' 'cd ~/presto-admin\n' 'make %s\n' 'cp dist/prestoadmin-*.tar.bz2 %s' % (installer_container.mount_dir, 'dist' if online_installer else 'dist-offline', installer_container.mount_dir), container_name) try: os.makedirs(cluster.get_dist_dir(unique)) except __HOLE__, e: if e.errno != errno.EEXIST: raise local_container_dist_dir = os.path.join( prestoadmin.main_dir, installer_container.get_local_mount_dir(container_name) ) installer_file = fnmatch.filter( os.listdir(local_container_dist_dir), 'prestoadmin-*.tar.bz2')[0] shutil.copy( os.path.join(local_container_dist_dir, installer_file), cluster.get_dist_dir(unique)) finally: installer_container.tear_down()
OSError
dataset/ETHPy150Open prestodb/presto-admin/tests/product/prestoadmin_installer.py/PrestoadminInstaller._build_installer_in_docker
def setup_module(module): from nose import SkipTest try: tagger = Senna('/usr/share/senna-v2.0', ['pos', 'chk', 'ner']) except __HOLE__: raise SkipTest("Senna executable not found")
OSError
dataset/ETHPy150Open nltk/nltk/nltk/tag/senna.py/setup_module
def tearDown(self): super(TestCreate, self).setUp() try: self.service.event_types.delete(self.event_type_name) except __HOLE__: pass
KeyError
dataset/ETHPy150Open splunk/splunk-sdk-python/tests/test_event_type.py/TestCreate.tearDown
def tearDown(self): super(TestEventType, self).setUp() try: self.service.event_types.delete(self.event_type_name) except __HOLE__: pass
KeyError
dataset/ETHPy150Open splunk/splunk-sdk-python/tests/test_event_type.py/TestEventType.tearDown
def unzip_snap_mp4(abspath, quiet=False): zipped_snap = ZipFile(abspath) # unzip /path/to/zipfile.mp4 to /path/to/zipfile unzip_dir = os.path.splitext(abspath)[0] zipped_snap.extractall(unzip_dir) # move /path/to/zipfile.mp4 to /path/to/zipfile.zip os.rename(abspath, unzip_dir + '.zip') for f in os.listdir(unzip_dir): # mv /path/to/zipfile/media~* /path/to/zipfile.mp4 if f.split('~')[0] == 'media': os.rename(os.path.join(unzip_dir, f), unzip_dir + '.mp4') # mv /path/to/zipfile/overlay~* /path/to/zipfile_overlay.png elif f.split('~')[0] == 'overlay': os.rename(os.path.join(unzip_dir, f), unzip_dir + '_overlay.png') try: os.rmdir(unzip_dir) except __HOLE__: print('Something other than a video or overlay was in {0}. \ Cannot remove directory, not empty.' .format(unzip_dir + '.zip')) if not quiet: print('Unzipped {0}'.format(abspath))
OSError
dataset/ETHPy150Open rxw/snapy/snapy/utils.py/unzip_snap_mp4
def system_methodHelp(self, method_name): """system.methodHelp('add') => "Adds two integers together" Returns a string containing documentation for the specified method.""" method = None if method_name in self.funcs: method = self.funcs[method_name] elif self.instance is not None: # Instance can implement _methodHelp to return help for a method if hasattr(self.instance, '_methodHelp'): return self.instance._methodHelp(method_name) # if the instance has a _dispatch method then we # don't have enough information to provide help elif not hasattr(self.instance, '_dispatch'): try: method = resolve_dotted_attribute( self.instance, method_name, self.allow_dotted_names ) except __HOLE__: pass # Note that we aren't checking that the method actually # be a callable object of some kind if method is None: return "" else: import pydoc return pydoc.getdoc(method)
AttributeError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/SimpleXMLRPCServer.py/SimpleXMLRPCDispatcher.system_methodHelp
def _dispatch(self, method, params): """Dispatches the XML-RPC method. XML-RPC calls are forwarded to a registered function that matches the called XML-RPC method name. If no such function exists then the call is forwarded to the registered instance, if available. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called. """ func = None try: # check to see if a matching function has been registered func = self.funcs[method] except __HOLE__: if self.instance is not None: # check for a _dispatch method if hasattr(self.instance, '_dispatch'): return self.instance._dispatch(method, params) else: # call instance method directly try: func = resolve_dotted_attribute( self.instance, method, self.allow_dotted_names ) except AttributeError: pass if func is not None: return func(*params) else: raise Exception('method "%s" is not supported' % method)
KeyError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/SimpleXMLRPCServer.py/SimpleXMLRPCDispatcher._dispatch
def do_POST(self): """Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's _dispatch method for handling. """ # Check that the path is legal if not self.is_rpc_path_valid(): self.report_404() return try: # Get arguments by reading body of request. # We read this in chunks to avoid straining # socket.read(); around the 10 or 15Mb mark, some platforms # begin to have problems (bug #792570). max_chunk_size = 10*1024*1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) chunk = self.rfile.read(chunk_size) if not chunk: break L.append(chunk) size_remaining -= len(L[-1]) data = ''.join(L) data = self.decode_request_content(data) if data is None: return #response has been sent # In previous versions of SimpleXMLRPCServer, _dispatch # could be overridden in this class, instead of in # SimpleXMLRPCDispatcher. To maintain backwards compatibility, # check to see if a subclass implements _dispatch and dispatch # using that method if present. response = self.server._marshaled_dispatch( data, getattr(self, '_dispatch', None), self.path ) except Exception, e: # This should only happen if the module is buggy # internal error, report as HTTP server error self.send_response(500) # Send information about the exception if requested if hasattr(self.server, '_send_traceback_header') and \ self.server._send_traceback_header: self.send_header("X-exception", str(e)) self.send_header("X-traceback", traceback.format_exc()) self.send_header("Content-length", "0") self.end_headers() else: # got a valid XML RPC response self.send_response(200) self.send_header("Content-type", "text/xml") if self.encode_threshold is not None: if len(response) > self.encode_threshold: q = self.accept_encodings().get("gzip", 0) if q: try: response = xmlrpclib.gzip_encode(response) self.send_header("Content-Encoding", "gzip") except __HOLE__: pass self.send_header("Content-length", str(len(response))) self.end_headers() self.wfile.write(response)
NotImplementedError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/SimpleXMLRPCServer.py/SimpleXMLRPCRequestHandler.do_POST
def decode_request_content(self, data): #support gzip encoding of request encoding = self.headers.get("content-encoding", "identity").lower() if encoding == "identity": return data if encoding == "gzip": try: return xmlrpclib.gzip_decode(data) except NotImplementedError: self.send_response(501, "encoding %r not supported" % encoding) except __HOLE__: self.send_response(400, "error decoding gzip content") else: self.send_response(501, "encoding %r not supported" % encoding) self.send_header("Content-length", "0") self.end_headers()
ValueError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/SimpleXMLRPCServer.py/SimpleXMLRPCRequestHandler.decode_request_content
def handle_request(self, request_text = None): """Handle a single XML-RPC request passed through a CGI post method. If no XML data is given then it is read from stdin. The resulting XML-RPC response is printed to stdout along with the correct HTTP headers. """ if request_text is None and \ os.environ.get('REQUEST_METHOD', None) == 'GET': self.handle_get() else: # POST data is normally available through stdin try: length = int(os.environ.get('CONTENT_LENGTH', None)) except (__HOLE__, ValueError): length = -1 if request_text is None: request_text = sys.stdin.read(length) self.handle_xmlrpc(request_text)
TypeError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/SimpleXMLRPCServer.py/CGIXMLRPCRequestHandler.handle_request
@register.tag def get_object_models(parser, token): """ USAGE: {% load get_objects %} {% get_object_models "APP_NAME" "MODEL_NAME" "SORT" "NUMBER OF ITEMS" "VARIABLE_NAME" Then iterate through EXAMPLE: {% load get_objects %} {% get_object_models "django_yaba" "Story" "-created" "3" "blog_posts" %} <ul> {% if blog_posts %} {% for post in blog_posts %} <li><a href="{{ post.get_absolute_url }}">{{ post.title }}</a></li> {% endfor %} {% endif %} """ try: tag_name, app_name, model_name, sort, count, var_name = token.split_contents() except __HOLE__: raise template.TemplateSyntaxError("Object Tag requires 5 variables") return ModelObjectNode(app_name[1:-1], model_name[1:-1], sort[1:-1], count[1:-1], var_name[1:-1])
ValueError
dataset/ETHPy150Open f4nt/djtracker/djtracker/templatetags/get_objects.py/get_object_models
def get_image(self, image_id): """ Shortcut method to retrieve a specific image (AMI). :type image_id: string :param image_id: the ID of the Image to retrieve :rtype: :class:`boto.ec2.image.Image` :return: The EC2 Image specified or None if the image is not found """ try: return self.get_all_images(image_ids=[image_id])[0] except __HOLE__: # None of those images available return None
IndexError
dataset/ETHPy150Open darcyliu/storyboard/boto/ec2/connection.py/EC2Connection.get_image
def __init__(self, nodelist): try: app_id = settings.FACEBOOK_APPLICATION_ID except __HOLE__: raise template.TemplateSyntaxError, "%r tag requires FACEBOOK_APP_ID to be configured." \ % token.contents.split()[0] self.app_id = app_id self.nodelist = nodelist
AttributeError
dataset/ETHPy150Open jgorset/fandjango/fandjango/templatetags/facebook.py/FacebookNode.__init__
def _decode_subelements(self): """Decode the stanza subelements.""" for child in self._element: if child.tag == self._show_tag: self._show = child.text elif child.tag == self._status_tag: self._status = child.text elif child.tag == self._priority_tag: try: self._priority = int(child.text.strip()) if self._priority < -128 or self._priority > 127: raise ValueError except __HOLE__: raise BadRequestProtocolError( "Presence priority not an integer")
ValueError
dataset/ETHPy150Open kuri65536/python-for-android/python3-alpha/python-libs/pyxmpp2/presence.py/Presence._decode_subelements
def tearDown(self): for recorder in self.top.recorders: recorder.close() os.chdir(self.startdir) if not os.environ.get('OPENMDAO_KEEPDIRS', False): try: shutil.rmtree(self.tempdir) except __HOLE__: pass
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/casehandlers/test/test_csvcase.py/TestCase.tearDown
def tearDown(self): for recorder in self.top.recorders: recorder.close() os.chdir(self.startdir) if not os.environ.get('OPENMDAO_KEEPDIRS', False): try: shutil.rmtree(self.tempdir) except __HOLE__: pass
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/casehandlers/test/test_csvcase.py/CSVCaseRecorderTestCase.tearDown
def extract_params(raw): """Extract parameters and return them as a list of 2-tuples. Will successfully extract parameters from urlencoded query strings, dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an empty list of parameters. Any other input will result in a return value of None. """ if isinstance(raw, basestring): try: params = urldecode(raw) except ValueError: params = None elif hasattr(raw, '__iter__'): try: dict(raw) except ValueError: params = None except __HOLE__: params = None else: params = list(raw.items() if isinstance(raw, dict) else raw) params = decode_params_utf8(params) else: params = None return params
TypeError
dataset/ETHPy150Open hzlf/openbroadcast/services/bcmon/requests/packages/oauthlib/common.py/extract_params
def open(self, port='', canonical=True): """ Opens fd on terminal console in non blocking mode. port is the serial port device path name or if '' then use os.ctermid() which returns path name of console usually '/dev/tty' canonical sets the mode for the port. Canonical means no characters available until a newline os.O_NONBLOCK makes non blocking io os.O_RDWR allows both read and write. os.O_NOCTTY don't make this the controlling terminal of the process O_NOCTTY is only for cross platform portability BSD never makes it the controlling terminal Don't use print at same time since it will mess up non blocking reads. Default is canonical mode so no characters available until newline need to add code to enable non canonical mode It appears that canonical mode only applies to the console. For other serial ports the characters are available immediately """ if not port: port = os.ctermid() #default to console try: self.fd = os.open(port, os.O_NONBLOCK | os.O_RDWR | os.O_NOCTTY) except __HOLE__ as ex: console.terse("os.error = {0}\n".format(ex)) return False return True
OSError
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/serial/serialing.py/ConsoleNb.open
def getLine(self,bs = 80): """Gets nonblocking line from console up to bs characters including newline. Returns empty string if no characters available else returns line. In canonical mode no chars available until newline is entered. """ line = '' try: line = os.read(self.fd, bs) except OSError as ex1: #if no chars available generates exception try: #need to catch correct exception errno = ex1.args[0] #if args not sequence get TypeError if errno == 35: pass #No characters available else: raise #re raise exception ex1 except __HOLE__ as ex2: #catch args[0] mismatch above raise ex1 #ignore TypeError, re-raise exception ex1 return line
TypeError
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/serial/serialing.py/ConsoleNb.getLine
def receive(self): """ Reads nonblocking characters from serial device up to bs characters Returns empty bytes if no characters available else returns all available. In canonical mode no chars are available until newline is entered. """ data = b'' try: data = os.read(self.fd, self.bs) #if no chars available generates exception except __HOLE__ as ex1: # ex1 is the target instance of the exception if ex1.errno == errno.EAGAIN: #BSD 35, Linux 11 pass #No characters available else: raise #re raise exception ex1 return data
OSError
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/serial/serialing.py/DeviceNb.receive
def send(self, data=b'\n'): """ Writes data bytes to serial device port. Returns number of bytes sent """ try: count = os.write(self.fd, data) except __HOLE__ as ex1: # ex1 is the target instance of the exception if ex1.errno == errno.EAGAIN: #BSD 35, Linux 11 count = 0 # buffer full can't write else: raise #re raise exception ex1 return count
OSError
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/serial/serialing.py/DeviceNb.send
def receive(self): """ Reads nonblocking characters from serial device up to bs characters Returns empty bytes if no characters available else returns all available. In canonical mode no chars are available until newline is entered. """ data = b'' try: data = self.serial.read(self.bs) #if no chars available generates exception except __HOLE__ as ex1: # ex1 is the target instance of the exception if ex1.errno == errno.EAGAIN: #BSD 35, Linux 11 pass #No characters available else: raise #re raise exception ex1 return data
OSError
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/serial/serialing.py/SerialNb.receive
def send(self, data=b'\n'): """ Writes data bytes to serial device port. Returns number of bytes sent """ try: count = self.serial.write(data) except __HOLE__ as ex1: # ex1 is the target instance of the exception if ex1.errno == errno.EAGAIN: #BSD 35, Linux 11 count = 0 # buffer full can't write else: raise #re raise exception ex1 return count
OSError
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/serial/serialing.py/SerialNb.send
def __init__(self, name=u'', uid=0, port=None, speed=9600, bs=1024, server=None): """ Initialization method for instance. Parameters: name = user friendly name for driver uid = unique identifier for driver port = serial device port path string speed = serial port speed in bps canonical = canonical mode True or False bs = buffer size for reads server = serial port device server if any Attributes: name = user friendly name for driver uid = unique identifier for driver server = serial device server nonblocking txes = deque of data bytes to send rxbs = bytearray of data bytes received """ self.name = name self.uid = uid if not server: try: import serial self.server = SerialNb(port=port, speed=speed, bs=bs) except __HOLE__ as ex: console.terse("Error: importing pyserial\n{0}\n".format(ex)) self.server = DeviceNb(port=port, speed=speed, bs=bs) else: self.server = server self.txes = deque() # deque of data to send self.rxbs = bytearray() # byte array of data received
ImportError
dataset/ETHPy150Open ioflo/ioflo/ioflo/aio/serial/serialing.py/Driver.__init__
def update(self, obj, set_fields=None, unset_fields=None, update_obj=True): collection = self.get_collection_for_cls(obj.__class__) if obj.pk == None: raise obj.DoesNotExist("update() called on document without primary key!") def serialize_fields(fields): if isinstance(fields, (list,tuple)): update_dict = {} for key in fields: try: update_dict[key] = get_value(obj,key) except __HOLE__: pass elif isinstance(fields,dict): update_dict = fields.copy() else: raise TypeError("fields must be a list/tuple!") return update_dict if set_fields: set_attributes = serialize_fields(set_fields) else: set_attributes = {} if unset_fields: unset_attributes = list(unset_fields) else: unset_attributes = [] self.call_hook('before_update',obj,set_attributes,unset_attributes) set_attributes = {key : self.serialize(value) for key,value in set_attributes.items()} if update_obj: for key,value in set_attributes.items(): set_value(obj,key,value) for key in unset_attributes: delete_value(obj,key) update_dict = {} if set_attributes: update_dict['$set'] = set_attributes if unset_attributes: update_dict['$unset'] = {key : '' for key in unset_attributes} if not update_dict: return #nothing to do... if self.autocommit: self.db[collection].update({'_id': obj.pk}, update_dict) else: if obj.pk in self._delete_cache[collection]: raise obj.DoesNotExist("update() on document that is marked for deletion!") if obj.pk in self._update_cache[collection]: update_cache = self._update_cache[collection][obj.pk] if set_attributes: if '$set' not in update_cache: update_cache['$set'] = {} for key, value in set_attributes.items(): if '$unset' in update_cache and key in update_cache['$unset']: del update_cache['$unset'][key] update_cache['$set'][key] = value if unset_attributes: if '$unset' not in update_cache: update_cache['$unset'] = {} for key in unset_attributes: if '$set' in update_cache and key in update_cache['$set']: del update_cache['$set'][key] update_cache['$unset'][key] = '' else: self._update_cache[collection][obj.pk] = update_dict
KeyError
dataset/ETHPy150Open adewes/blitzdb/blitzdb/backends/mongo/backend.py/Backend.update
def set_key(self, key, value): """ Set the given ``key`` to the given ``value``. Handles nested keys, e.g.:: d = AttrDict() d.set_key('foo.bar', 1) d.foo.bar == 1 # True """ if '.' in key: key, remainder = key.split('.', 1) try: self[key].set_key(remainder, value) except KeyError: self[key] = AttrDict() self[key].set_key(remainder, value) except __HOLE__: if self[key] is None: # If the value is None, we replace it self[key] = AttrDict() self[key].set_key(remainder, value) # Else there is probably something there, and we don't just # want to overwrite so stop and warn the user else: raise KeyError('Cannot set nested key on non-dict key.') else: self[key] = value
AttributeError
dataset/ETHPy150Open calliope-project/calliope/calliope/utils.py/AttrDict.set_key
def get_key(self, key, default=_MISSING): """ Looks up the given ``key``. Like set_key(), deals with nested keys. If default is anything but ``_MISSING``, the given default is returned if the key does not exist. """ if '.' in key: # Nested key of form "foo.bar" key, remainder = key.split('.', 1) if default != _MISSING: try: value = self[key].get_key(remainder, default) except KeyError: # subdict exists, but doesn't contain key return default except __HOLE__: # key points to non-dict thing, so no get_key attribute return default else: value = self[key].get_key(remainder) else: # Single, non-nested key of form "foo" if default != _MISSING: return self.get(key, default) else: return self[key] return value
AttributeError
dataset/ETHPy150Open calliope-project/calliope/calliope/utils.py/AttrDict.get_key
def del_key(self, key): """Delete the given key. Properly deals with nested keys.""" if '.' in key: key, remainder = key.split('.', 1) try: del self[key][remainder] except __HOLE__: self[key].del_key(remainder) else: del self[key]
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/utils.py/AttrDict.del_key
def __call__(self, *args, **kw): obj = args[0] try: cache = obj.__cache except AttributeError: cache = obj.__cache = {} key = (self.func, args[1:], frozenset(list(kw.items()))) try: res = cache[key] except __HOLE__: res = cache[key] = self.func(*args, **kw) return res
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/utils.py/memoize_instancemethod.__call__
def option_getter(config_model, data): """Returns a get_option() function using the given config_model and data""" o = config_model d = data def get_option(option, x=None, default=None, ignore_inheritance=False): def _get_option(opt, fail=False): try: result = o.get_key('techs.' + opt) except KeyError: if ignore_inheritance: return _get_option(default, fail) # 'ccgt.constraints.s_time' -> 'ccgt', 'constraints.s_time' tech, remainder = opt.split('.', 1) if ':' in tech: parent = tech.split(':')[0] else: # parent = e.g. 'defaults' parent = o.get_key('techs.' + tech + '.parent') try: result = _get_option(parent + '.' + remainder, fail) except KeyError: e = exceptions.OptionNotSetError if fail: raise e('Failed to read option `{}` ' 'with given default ' '`{}`'.format(option, default)) elif default: result = _get_option(default, fail=True) elif tech == 'defaults': raise e('Reached top of inheritance chain ' 'and no default defined for: ' '`{}`'.format(option)) else: raise e('Can not get parent for `{}` ' 'and no default defined ' '({}).'.format(tech, option)) return result def _get_location_option(key, location): def getter(key, location): # NB: KeyErrors raised here are always caught within _get_option # so need no further information or handling # Raises KeyError if the specific _override column does not exist result = d.locations.at[location, '_override.' + key] # Also raise KeyError if the result is NaN, i.e. if no # location-specific override has been defined try: if np.isnan(result): raise KeyError # Have to catch this because np.isnan not implemented for strings except TypeError: pass return result while True: try: return getter(key, location) except __HOLE__: parent_location = d.locations.at[location, '_within'] if parent_location: # Will be None if no parent return getter(key, parent_location) else: # Once top of "location inheritance" chain reached, # raise KeyError, which will cause the calling function # to fall back to non-location specific settings raise if x: try: result = _get_location_option(option, x) # If can't find a location-specific option, fall back to model-wide except KeyError: result = _get_option(option) else: result = _get_option(option) # Deal with 'inf' settings if result == 'inf': result = float('inf') return result return get_option
KeyError
dataset/ETHPy150Open calliope-project/calliope/calliope/utils.py/option_getter
def _check_if_pyc(fname): """Return True if the extension is .pyc, False if .py and None if otherwise""" from imp import find_module from os.path import realpath, dirname, basename, splitext # Normalize the file-path for the find_module() filepath = realpath(fname) dirpath = dirname(filepath) module_name = splitext(basename(filepath))[0] # Validate and fetch try: fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath]) except __HOLE__: raise IOError("Cannot find config file. " "Path maybe incorrect! : {0}".format(filepath)) return pytype, fileobj, fullpath
ImportError
dataset/ETHPy150Open benoitc/gunicorn/gunicorn/_compat.py/_check_if_pyc
def wrap_error(func, *args, **kw): """ Wrap socket.error, IOError, OSError, select.error to raise new specialized exceptions of Python 3.3 like InterruptedError (PEP 3151). """ try: return func(*args, **kw) except (socket.error, __HOLE__, OSError) as exc: if hasattr(exc, 'winerror'): _wrap_error(exc, _MAP_ERRNO, exc.winerror) # _MAP_ERRNO does not contain all Windows errors. # For some errors like "file not found", exc.errno should # be used (ex: ENOENT). _wrap_error(exc, _MAP_ERRNO, exc.errno) raise except select.error as exc: if exc.args: _wrap_error(exc, _MAP_ERRNO, exc.args[0]) raise
IOError
dataset/ETHPy150Open benoitc/gunicorn/gunicorn/_compat.py/wrap_error
def get_version(self, paths=None, default="unknown"): """Get version number of installed module, 'None', or 'default' Search 'paths' for module. If not found, return 'None'. If found, return the extracted version attribute, or 'default' if no version attribute was specified, or the value cannot be determined without importing the module. The version is formatted according to the requirement's version format (if any), unless it is 'None' or the supplied 'default'. """ if self.attribute is None: try: f,p,i = find_module(self.module,paths) if f: f.close() return default except __HOLE__: return None v = get_module_constant(self.module,self.attribute,default,paths) if v is not None and v is not default and self.format is not None: return self.format(v) return v
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/setuptools-0.6c11/setuptools/depends.py/Require.get_version
def get_module_constant(module, symbol, default=-1, paths=None): """Find 'module' by searching 'paths', and extract 'symbol' Return 'None' if 'module' does not exist on 'paths', or it does not define 'symbol'. If the module defines 'symbol' as a constant, return the constant. Otherwise, return 'default'.""" try: f, path, (suffix,mode,kind) = find_module(module,paths) except __HOLE__: # Module doesn't exist return None try: if kind==PY_COMPILED: f.read(8) # skip magic & date code = marshal.load(f) elif kind==PY_FROZEN: code = imp.get_frozen_object(module) elif kind==PY_SOURCE: code = compile(f.read(), path, 'exec') else: # Not something we can parse; we'll have to import it. :( if module not in sys.modules: imp.load_module(module,f,path,(suffix,mode,kind)) return getattr(sys.modules[module],symbol,None) finally: if f: f.close() return extract_constant(code,symbol,default)
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/setuptools-0.6c11/setuptools/depends.py/get_module_constant
def _init_aliases(ctx): for alias, value in ctx._aliases.items(): try: setattr(ctx, alias, getattr(ctx, value)) except __HOLE__: pass
AttributeError
dataset/ETHPy150Open fredrik-johansson/mpmath/mpmath/ctx_base.py/StandardBaseContext._init_aliases
def chop(ctx, x, tol=None): """ Chops off small real or imaginary parts, or converts numbers close to zero to exact zeros. The input can be a single number or an iterable:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> chop(5+1e-10j, tol=1e-9) mpf('5.0') >>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2])) [1.0, 0.0, 3.0, -4.0, 2.0] The tolerance defaults to ``100*eps``. """ if tol is None: tol = 100*ctx.eps try: x = ctx.convert(x) absx = abs(x) if abs(x) < tol: return ctx.zero if ctx._is_complex_type(x): #part_tol = min(tol, absx*tol) part_tol = max(tol, absx*tol) if abs(x.imag) < part_tol: return x.real if abs(x.real) < part_tol: return ctx.mpc(0, x.imag) except __HOLE__: if isinstance(x, ctx.matrix): return x.apply(lambda a: ctx.chop(a, tol)) if hasattr(x, "__iter__"): return [ctx.chop(a, tol) for a in x] return x
TypeError
dataset/ETHPy150Open fredrik-johansson/mpmath/mpmath/ctx_base.py/StandardBaseContext.chop
def __init__(self, *args, **kwargs): super(NoArgsCommand, self).__init__(*args, **kwargs) self.copied_files = [] self.symlinked_files = [] self.unmodified_files = [] self.post_processed_files = [] self.storage = storage.staticfiles_storage try: self.storage.path('') except __HOLE__: self.local = False else: self.local = True # Use ints for file times (ticket #14665), if supported if hasattr(os, 'stat_float_times'): os.stat_float_times(False)
NotImplementedError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/staticfiles/management/commands/collectstatic.py/Command.__init__
def delete_file(self, path, prefixed_path, source_storage): """ Checks if the target file should be deleted if it already exists """ if self.storage.exists(prefixed_path): try: # When was the target file modified last time? target_last_modified = \ self.storage.modified_time(prefixed_path) except (__HOLE__, NotImplementedError, AttributeError): # The storage doesn't support ``modified_time`` or failed pass else: try: # When was the source file modified last time? source_last_modified = source_storage.modified_time(path) except (OSError, NotImplementedError, AttributeError): pass else: # The full path of the target file if self.local: full_path = self.storage.path(prefixed_path) else: full_path = None # Skip the file if the source file is younger if target_last_modified >= source_last_modified: if not ((self.symlink and full_path and not os.path.islink(full_path)) or (not self.symlink and full_path and os.path.islink(full_path))): if prefixed_path not in self.unmodified_files: self.unmodified_files.append(prefixed_path) self.log(u"Skipping '%s' (not modified)" % path) return False # Then delete the existing file if really needed if self.dry_run: self.log(u"Pretending to delete '%s'" % path) else: self.log(u"Deleting '%s'" % path) self.storage.delete(prefixed_path) return True
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/staticfiles/management/commands/collectstatic.py/Command.delete_file
def link_file(self, path, prefixed_path, source_storage): """ Attempt to link ``path`` """ # Skip this file if it was already copied earlier if prefixed_path in self.symlinked_files: return self.log(u"Skipping '%s' (already linked earlier)" % path) # Delete the target file if needed or break if not self.delete_file(path, prefixed_path, source_storage): return # The full path of the source file source_path = source_storage.path(path) # Finally link the file if self.dry_run: self.log(u"Pretending to link '%s'" % source_path, level=1) else: self.log(u"Linking '%s'" % source_path, level=1) full_path = self.storage.path(prefixed_path) try: os.makedirs(os.path.dirname(full_path)) except __HOLE__: pass os.symlink(source_path, full_path) if prefixed_path not in self.symlinked_files: self.symlinked_files.append(prefixed_path)
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/staticfiles/management/commands/collectstatic.py/Command.link_file
def copy_file(self, path, prefixed_path, source_storage): """ Attempt to copy ``path`` with storage """ # Skip this file if it was already copied earlier if prefixed_path in self.copied_files: return self.log(u"Skipping '%s' (already copied earlier)" % path) # Delete the target file if needed or break if not self.delete_file(path, prefixed_path, source_storage): return # The full path of the source file source_path = source_storage.path(path) # Finally start copying if self.dry_run: self.log(u"Pretending to copy '%s'" % source_path, level=1) else: self.log(u"Copying '%s'" % source_path, level=1) if self.local: full_path = self.storage.path(prefixed_path) try: os.makedirs(os.path.dirname(full_path)) except __HOLE__: pass with source_storage.open(path) as source_file: self.storage.save(prefixed_path, source_file) if not prefixed_path in self.copied_files: self.copied_files.append(prefixed_path)
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/staticfiles/management/commands/collectstatic.py/Command.copy_file
def query_api(self, data=None, endpoint='SMS'): url = self.api_url % endpoint if data: conn = Request(url, b(urlencode(data))) else: conn = Request(url) auth = b('Basic ') + b64encode(b(self.username + ':' + self.password)) conn.add_header('Authorization', auth) try: response = urlopen(conn) except __HOLE__ as err: print(err) print("\nSending didn't succeed :(") exit(-2) return response.read()
HTTPError
dataset/ETHPy150Open 46elks/elkme/elkme/elks.py/Elks.query_api
def pprint_task(task, keys, label_size=60): """Return a nicely formatted string for a task. Parameters ---------- task: Value within dask graph to render as text keys: iterable List of keys within dask graph label_size: int (optional) Maximum size of output label, defaults to 60 Examples -------- >>> from operator import add, mul >>> dsk = {'a': 1, ... 'b': 2, ... 'c': (add, 'a', 'b'), ... 'd': (add, (mul, 'a', 'b'), 'c'), ... 'e': (sum, ['a', 'b', 5]), ... 'f': (add,), ... 'g': []} >>> pprint_task(dsk['c'], dsk) 'add(_, _)' >>> pprint_task(dsk['d'], dsk) 'add(mul(_, _), _)' >>> pprint_task(dsk['e'], dsk) 'sum([_, _, *])' >>> pprint_task(dsk['f'], dsk) 'add()' >>> pprint_task(dsk['g'], dsk) '[]' """ if istask(task): func = task[0] if func is apply: head = funcname(task[1]) tail = ')' args = unquote(task[2]) if len(task) > 2 else () kwargs = unquote(task[3]) if len(task) > 3 else {} else: if hasattr(func, 'funcs'): head = '('.join(funcname(f) for f in func.funcs) tail = ')'*len(func.funcs) else: head = funcname(task[0]) tail = ')' args = task[1:] kwargs = {} if args or kwargs: label_size2 = int((label_size - len(head) - len(tail)) // (len(args) + len(kwargs))) pprint = lambda t: pprint_task(t, keys, label_size2) if args: if label_size2 > 5: args = ', '.join(pprint(t) for t in args) else: args = '...' else: args = '' if kwargs: if label_size2 > 5: kwargs = ', ' + ', '.join('{0}={1}'.format(k, pprint(v)) for k, v in sorted(kwargs.items())) else: kwargs = ', ...' else: kwargs = '' return '{0}({1}{2}{3}'.format(head, args, kwargs, tail) elif isinstance(task, list): if not task: return '[]' elif len(task) > 3: result = pprint_task(task[:3], keys, label_size) return result[:-1] + ', ...]' else: label_size2 = int((label_size - 2 - 2*len(task)) // len(task)) args = ', '.join(pprint_task(t, keys, label_size2) for t in task) return '[{0}]'.format(args) else: try: if task in keys: return '_' else: return '*' except __HOLE__: return '*'
TypeError
dataset/ETHPy150Open dask/dask/dask/diagnostics/profile_visualize.py/pprint_task
def send_to_able(self, method, args={}, to=None, **kwargs): actor = None try: actor = self.lookup(to) except __HOLE__: raise self.NoRouteError(to) if actor: return self.send(method, args, to=actor, **kwargs) r = self.scatter(method, args, propagate=True, **kwargs) if r: return first_or_raise(r, self.NoRouteError(to))
KeyError
dataset/ETHPy150Open celery/cell/cell/presence.py/AwareActorMixin.send_to_able
def call_center_location_owner(user, ancestor_level): if user.location_id is None: return "" if ancestor_level == 0: owner_id = user.location_id else: location = SQLLocation.objects.get(location_id=user.location_id) ancestors = location.get_ancestors(ascending=True, include_self=True).only("location_id") try: owner_id = ancestors[ancestor_level].location_id except __HOLE__: owner_id = ancestors.last().location_id return owner_id
IndexError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/callcenter/utils.py/call_center_location_owner
def start(): ''' Start the saltnado! ''' try: from . import saltnado except __HOLE__ as err: logger.error('ImportError! {0}'.format(str(err))) return None mod_opts = __opts__.get(__virtualname__, {}) if 'num_processes' not in mod_opts: mod_opts['num_processes'] = 1 if mod_opts['num_processes'] > 1 and mod_opts.get('debug', False) is True: raise Exception(( 'Tornado\'s debug implementation is not compatible with multiprocess. ' 'Either disable debug, or set num_processes to 1.' )) paths = [ (r"/", saltnado.SaltAPIHandler), (r"/login", saltnado.SaltAuthHandler), (r"/minions/(.*)", saltnado.MinionSaltAPIHandler), (r"/minions", saltnado.MinionSaltAPIHandler), (r"/jobs/(.*)", saltnado.JobsSaltAPIHandler), (r"/jobs", saltnado.JobsSaltAPIHandler), (r"/run", saltnado.RunSaltAPIHandler), (r"/events", saltnado.EventsSaltAPIHandler), (r"/hook(/.*)?", saltnado.WebhookSaltAPIHandler), ] # if you have enabled websockets, add them! if mod_opts.get('websockets', False): from . import saltnado_websockets token_pattern = r"([0-9A-Fa-f]{{{0}}})".format(len(getattr(hashlib, __opts__.get('hash_type', 'md5'))().hexdigest())) all_events_pattern = r"/all_events/{0}".format(token_pattern) formatted_events_pattern = r"/formatted_events/{0}".format(token_pattern) logger.debug("All events URL pattern is {0}".format(all_events_pattern)) paths += [ # Matches /all_events/[0-9A-Fa-f]{n} # Where n is the length of hexdigest # for the current hashing algorithm. # This algorithm is specified in the # salt master config file. (all_events_pattern, saltnado_websockets.AllEventsHandler), (formatted_events_pattern, saltnado_websockets.FormattedEventsHandler), ] application = tornado.web.Application(paths, debug=mod_opts.get('debug', False)) application.opts = __opts__ application.mod_opts = mod_opts application.auth = salt.auth.LoadAuth(__opts__) # the kwargs for the HTTPServer kwargs = {} if not mod_opts.get('disable_ssl', False): if 'ssl_crt' not in mod_opts: logger.error("Not starting '%s'. Options 'ssl_crt' and " "'ssl_key' are required if SSL is not disabled.", __name__) return None # cert is required, key may be optional # https://docs.python.org/2/library/ssl.html#ssl.wrap_socket ssl_opts = {'certfile': mod_opts['ssl_crt']} if mod_opts.get('ssl_key', False): ssl_opts.update({'keyfile': mod_opts['ssl_key']}) kwargs['ssl_options'] = ssl_opts http_server = tornado.httpserver.HTTPServer(application, **kwargs) try: http_server.bind(mod_opts['port'], address=mod_opts.get('address'), backlog=mod_opts.get('backlog', 128), ) http_server.start(mod_opts['num_processes']) except: logger.error('Rest_tornado unable to bind to port {0}'.format(mod_opts['port']), exc_info=True) raise SystemExit(1) try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: raise SystemExit(0)
ImportError
dataset/ETHPy150Open saltstack/salt/salt/netapi/rest_tornado/__init__.py/start
@classmethod def MemcacheWrappedGet( cls, key_name, prop_name=None, memcache_secs=MEMCACHE_SECS, retry=False): """Fetches an entity by key name from model wrapped by Memcache. Args: key_name: str key name of the entity to fetch. prop_name: optional property name to return the value for instead of returning the entire entity. memcache_secs: int seconds to store in memcache; default MEMCACHE_SECS. retry: bool, default False, if this is a retry (2nd attempt) to MemcacheWrappedGet the entity. Returns: If an entity for key_name exists, if prop_name == None returns the db.Model entity, otherwise only returns the prop_name property value on entity. If an entity for key_name does not exist, returns None. """ output = None if prop_name: memcache_key = 'mwgpn_%s_%s_%s' % (cls.kind(), key_name, prop_name) else: memcache_key = 'mwg_%s_%s' % (cls.kind(), key_name) cached = memcache.get(memcache_key) if cached is None: entity = cls.get_by_key_name(key_name) if not entity: return if prop_name: try: output = getattr(entity, prop_name) except AttributeError: logging.error( 'Retrieving missing property %s on %s', prop_name, entity.__class__.__name__) return to_cache = output else: output = entity to_cache = db.model_to_protobuf(entity).SerializeToString() try: memcache.set(memcache_key, to_cache, memcache_secs) except __HOLE__, e: logging.warning( 'MemcacheWrappedGet: failure to memcache.set(%s, ...): %s', memcache_key, str(e)) else: if prop_name: output = cached else: try: output = db.model_from_protobuf(cached) except Exception, e: # pylint: disable=broad-except # NOTE(user): I copied this exception trap style from # google.appengine.datastore.datatstore_query. The notes indicate # that trapping this exception by the class itself is problematic # due to differences between the Python and SWIG'd exception # classes. output = None memcache.delete(memcache_key) if e.__class__.__name__ == 'ProtocolBufferDecodeError': logging.warning('Invalid protobuf at key %s', key_name) elif retry: logging.exception('Unexpected exception in MemcacheWrappedGet') if not retry: return cls.MemcacheWrappedGet( key_name, prop_name=prop_name, memcache_secs=memcache_secs, retry=True) else: return cls.get_by_key_name(key_name) return output
ValueError
dataset/ETHPy150Open google/simian/src/simian/mac/models/base.py/BaseModel.MemcacheWrappedGet
def _SetForceInstallAfterDateStr(self, str_dt): """Sets the force_install_after_date property from a string.""" try: dt = datetime.datetime.strptime(str_dt, '%Y-%m-%d %H:%M') except ValueError: try: dt = datetime.datetime.strptime('%s 13:00' % (str_dt), '%Y-%m-%d %H:%M') except __HOLE__: raise self.force_install_after_date = dt
ValueError
dataset/ETHPy150Open google/simian/src/simian/mac/models/base.py/AppleSUSProduct._SetForceInstallAfterDateStr
def __contains__(self, key): with self._lock: try: self._load_key(key) except __HOLE__: pass return key in self._local
KeyError
dataset/ETHPy150Open pallets/werkzeug/examples/cupoftee/db.py/Database.__contains__
def setdefault(self, key, factory): with self._lock: try: rv = self._load_key(key) except __HOLE__: self._local[key] = rv = factory() return rv
KeyError
dataset/ETHPy150Open pallets/werkzeug/examples/cupoftee/db.py/Database.setdefault
def list(self): try: files = os.listdir(self.folder) except __HOLE__: files = [] return files
IOError
dataset/ETHPy150Open dokipen/whoosh/src/whoosh/filedb/filestore.py/FileStorage.list
def run(self): mtimes = {} while 1: for filename in chain(_iter_module_files(), self.extra_files): try: mtime = os.stat(filename).st_mtime except __HOLE__: continue old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime continue elif mtime > old_time: self.trigger_reload(filename) self._sleep(self.interval)
OSError
dataset/ETHPy150Open GoogleCloudPlatform/appengine-flask-skeleton/lib/werkzeug/_reloader.py/StatReloaderLoop.run
def run(self): watches = {} observer = self.observer_class() observer.start() while not self.should_reload: to_delete = set(watches) paths = _find_observable_paths(self.extra_files) for path in paths: if path not in watches: try: watches[path] = observer.schedule( self.event_handler, path, recursive=True) except __HOLE__ as e: message = str(e) if message != "Path is not a directory": # Log the exception _log('error', message) # Clear this path from list of watches We don't want # the same error message showing again in the next # iteration. watches[path] = None to_delete.discard(path) for path in to_delete: watch = watches.pop(path, None) if watch is not None: observer.unschedule(watch) self.observable_paths = paths self._sleep(self.interval) sys.exit(3)
OSError
dataset/ETHPy150Open GoogleCloudPlatform/appengine-flask-skeleton/lib/werkzeug/_reloader.py/WatchdogReloaderLoop.run
def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type='auto'): """Run the given function in an independent python interpreter.""" import signal reloader = reloader_loops[reloader_type](extra_files, interval) signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) try: if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': t = threading.Thread(target=main_func, args=()) t.setDaemon(True) t.start() reloader.run() else: sys.exit(reloader.restart_with_reloader()) except __HOLE__: pass
KeyboardInterrupt
dataset/ETHPy150Open GoogleCloudPlatform/appengine-flask-skeleton/lib/werkzeug/_reloader.py/run_with_reloader
def get_status(name): """Get the status of an instance. Args: name: Instance name Returns: A tuple (state, extra_info). extra_info is a string only used when the instance is broken somehow. This won't ever report HADOOP_READY; that's known when we start a Hadoop daemon ourselves. """ # Do getinstance first. Trying to poke the agent on a STAGING box just times # out, so such stages are never observed otherwise try: data = api.get_instance(name) if data.status == 'RUNNING': # Now try talking to their agent address = name_to_ip(name, data=data) if cfg.ip_via_api else name response = talk_to_agent(address, '/status') if response is not None: state = response.get('state', '') if state == 'READY': return (InstanceState.SNITCH_READY, None) elif state != 'STARTING': msg = 'snitch reported {0}'.format(response['state']) logging.warn('%s: %s', name, msg) return (InstanceState.BROKEN, msg) return (InstanceState.RUNNING, None) elif data.status == 'PROVISIONING': return (InstanceState.PROVISIONING, None) elif data.status == 'STAGING': return (InstanceState.STAGING, None) else: msg = 'instance is {0}'.format(data.status) logging.warn('%s: %s', name, data.status) return (InstanceState.BROKEN, msg) except __HOLE__: return (InstanceState.NON_EXISTENT, None) # Communication
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/compute-hadoop-java-python/util.py/get_status
def name_to_ip(name, data=None): """Do a DNS lookup using the Compute API. Args: name: instance name data: the result from calling getinstance, if the caller already has it. Returns: An IP address, unless some error is raised. """ if name in ip_cache: return ip_cache[name] else: if data is None: try: data = api.get_instance(name) except __HOLE__: # This instance does not exist return None ip = data.networkInterfaces[0].accessConfigs[0].natIP ip_cache[name] = ip return ip
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/compute-hadoop-java-python/util.py/name_to_ip
def talk_to_agent(address, method, data=None): """Make a REST call. These are described in docs/API. Args: address: IP address from name_to_ip() or a hostname (if called from an instance) method: the HTTP call to make, should include the leading / data: a Python dictionary; caller must JSONify things themselves. Returns: The reply, which will be a de-JSONified dictionary. """ try: url = 'https://{0}:{1}{2}'.format(address, cfg.port, method) # The coordinator's certificate is self-signed, so we cannot verify we are # talking to the "correct" coordinator. Eavesdropping is not a problem, but # man-in-the-middle attacks could be. http = httplib2.Http(disable_ssl_certificate_validation=True, timeout=5) if data is None: # GET return json.loads(http.request(url, 'GET')[1]) else: # POST return json.loads(http.request(url, 'POST', urllib.urlencode(data))[1]) except (httplib2.HttpLib2Error, socket.error, __HOLE__): return None
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/compute-hadoop-java-python/util.py/talk_to_agent
def typifyFields(fields) : primary_fields = [] data_model = [] for definition in fields : try : field_type = definition['type'] except __HOLE__ : raise TypeError("Incorrect field specification: field " "specifications are dictionaries that must " "include a type definition, ex. " "{'field' : 'Phone', type: 'String'}") except KeyError : raise KeyError("Missing field type: fields " "specifications are dictionaries that must " "include a type definition, ex. " "{'field' : 'Phone', type: 'String'}") if field_type == 'Interaction' : continue if field_type == 'FuzzyCategorical' and 'other fields' not in definition : definition['other fields'] = [d['field'] for d in fields if 'field' in d and d['field'] != definition['field']] try : field_class = FIELD_CLASSES[field_type] except KeyError : raise KeyError("Field type %s not valid. Valid types include %s" % (definition['type'], ', '.join(FIELD_CLASSES))) field_object = field_class(definition) primary_fields.append(field_object) if hasattr(field_object, 'higher_vars') : data_model.extend(field_object.higher_vars) else : data_model.append(field_object) return primary_fields, data_model
TypeError
dataset/ETHPy150Open datamade/dedupe/dedupe/datamodel.py/typifyFields
def __call__(self, container): np_key = container.getNonPersistentKey() try: old_container = self.common.non_persistant_pointer_lookup[np_key] except KeyError: old_container = None if old_container is not None: try: del self.common.cache_lookup[old_container.getCacheKey()].cache[old_container.getObjectKey()] except __HOLE__: pass self.common.non_persistant_pointer_lookup[np_key] = container # This class holds the runtime environment for the pnodes
KeyError
dataset/ETHPy150Open hoytak/lazyrunner/lazyrunner/pnstructures.py/_PNodeNonPersistentDeleter.__call__
def decreaseResultReference(self): assert self.result_reference_count >= 1 self.result_reference_count -= 1 assert self.module_reference_count <= self.result_reference_count if self.result_reference_count == 0: try: del self.results_container except __HOLE__: pass self.dropUnneededReferences()
AttributeError
dataset/ETHPy150Open hoytak/lazyrunner/lazyrunner/pnstructures.py/PNode.decreaseResultReference
def _reportResults(self, results): if not self.results_reported: try: self.p_class.reportResults(self.parameters, self.parameters[self.name], results) except TypeError, te: rrf = self.p_class.reportResults def raiseTypeError(): raise TypeError(("reportResults method in '%s' must be @classmethod " "and take global parameter tree, local parameter tree, " "and result tree as arguments.") % name) # See if it was due to incompatable signature from robust_inspect import getcallargs try: getcallargs(rrf, parameters, p, r) except __HOLE__: raiseTypeError() # Well, that wasn't the issue, so it's something internal; re-raise raise self.results_reported = True
TypeError
dataset/ETHPy150Open hoytak/lazyrunner/lazyrunner/pnstructures.py/PNode._reportResults
def getStream(self): from PIL import Image, ImageDraw # PIL dependency # Create an image and draw something on it. image = Image.new("RGB", (270, 270)) drawable = ImageDraw.Draw(image) drawable.rectangle([0, 0, 270, 270], fill=str(Color.BLUE)) drawable.rectangle([1, 1, 268, 268], fill=str(self._bgColor)) drawable.ellipse([25, 25, 245, 245], fill=str(self._fgColor)) drawable.text((50, 100), 'r=' + str(self._fgColor.getRed()) + ',g=' + str(self._fgColor.getGreen()) + ',b=' + str(self._fgColor.getBlue()), fill=str(Color.BLACK)) drawable.text((5, 15), 'r=' + str(self._bgColor.getRed()) + ',g=' + str(self._bgColor.getGreen()) + ',b=' + str(self._bgColor.getBlue()), fill=str(Color.BLACK)) del drawable try: # Write the image to a buffer. self._imagebuffer = StringIO() image.save(self._imagebuffer, 'PNG') return self._imagebuffer except __HOLE__: return None
IOError
dataset/ETHPy150Open rwl/muntjac/muntjac/addon/colorpicker/color_picker_application.py/MyImageSource.getStream
def __init__(self, command): self.args = self._split_command_line(command) self.command = self.args[0] self.exitStatus = -1 try: self.pid, self.child_fd = pty.fork() except __HOLE__, e: raise Exception("Unable to fork") if self.pid == 0: self.child_fd = sys.stdout.fileno() os.execvp(self.command, self.args)
OSError
dataset/ETHPy150Open joehewitt/devon/devon/spawn.py/SpawnPty.__init__
def read(self): r, w, e = select.select([self.child_fd], [], [], 30) if not r: return "" if self.child_fd in r: try: txt = os.read(self.child_fd, 1000) except __HOLE__: # XXXblake Not sure why this happens on Unix txt = "" return txt
OSError
dataset/ETHPy150Open joehewitt/devon/devon/spawn.py/SpawnPty.read
@feature('download') def feature_download(tgen): ''' Download a file. ''' work_dir = tgen.make_node(tgen.worch.download_dir) target_filename = tgen.worch.download_target if not target_filename: target_filename = os.path.basename(tgen.worch.download_url) target_node = work_dir.make_node(target_filename) tgen.step('download_seturl', rule = "echo '%s' > %s" % (tgen.worch.download_url, tgen.worch.download_urlfile), update_outputs = True, target = tgen.worch.download_urlfile) def dl_task(task): src = task.inputs[0] tgt = task.outputs[0] url = src.read().strip() try: web = urlopen(url) tgt.write(web.read(),'wb') except Exception: import traceback traceback.print_exc() msg.error(tgen.worch.format("error downloading {download_url}")) raise checksum = tgen.worch.download_checksum if not checksum: return hasher_name, ref = checksum.split(":") import hashlib, os # FIXME: check the hasher method exists. check for typos. hasher = getattr(hashlib, hasher_name)() hasher.update(tgt.read('rb')) data= hasher.hexdigest() if data != ref: msg.error(tgen.worch.format("invalid checksum:\nref: %s\nnew: %s" %\ (ref, data))) try: os.remove(tgt.abspath()) except __HOLE__: pass return 1 return tgen.step('download', rule = dl_task, source = tgen.worch.download_urlfile, target = target_node, cwd = work_dir.abspath()) return
IOError
dataset/ETHPy150Open hwaf/hwaf/py-hwaftools/orch/features/feature_download.py/feature_download
def _unwrap(self, ret_fun, err_fun): """Iterate over the options in the choice type, and try to perform some action on them. If the action fails (returns None or raises either CoercionError or ValueError), then it goes on to the next type. Args: ret_fun: a function that takes a wrapped option value, and either returns a successful return value or fails. err_fun: a function that takes the unwrapped value of this choice, and generates an appropriate error. Returns: the return value from a successful invocation of ret_fun on one of the type options. If no invocation fails, then returns the value of invoking err_fun. """ for opt in self.CHOICES: if isinstance(self._value, opt): return ret_fun(self._value) else: try: o = opt(self._value) ret = ret_fun(o) if ret: return ret except (self.CoercionError, __HOLE__): pass return err_fun(self._value)
ValueError
dataset/ETHPy150Open wickman/pystachio/pystachio/choice.py/ChoiceContainer._unwrap
def _make_client(self, parsed_url, options): # Creates a kazoo client, # See: https://github.com/python-zk/kazoo/blob/2.2.1/kazoo/client.py # for what options a client takes... maybe_hosts = [parsed_url.netloc] + list(options.get('hosts', [])) hosts = list(compat_filter(None, maybe_hosts)) if not hosts: hosts = ['localhost:2181'] randomize_hosts = options.get('randomize_hosts', True) client_kwargs = { 'hosts': ",".join(hosts), 'timeout': float(options.get('timeout', self.timeout)), 'connection_retry': options.get('connection_retry'), 'command_retry': options.get('command_retry'), 'randomize_hosts': strutils.bool_from_string(randomize_hosts), } handler_kind = options.get('handler') if handler_kind: try: handler_cls = self.HANDLERS[handler_kind] except __HOLE__: raise ValueError("Unknown handler '%s' requested" " valid handlers are %s" % (handler_kind, sorted(self.HANDLERS.keys()))) client_kwargs['handler'] = handler_cls() return client.KazooClient(**client_kwargs)
KeyError
dataset/ETHPy150Open openstack/tooz/tooz/drivers/zookeeper.py/KazooDriver._make_client
def handle_connection(self, conn): """ Handle an individual connection. """ input = conn.makefile("r") output = conn.makefile("w") environ = self.read_env(input) environ['wsgi.input'] = input environ['wsgi.errors'] = sys.stderr environ['wsgi.version'] = (1, 0) environ['wsgi.multithread'] = False environ['wsgi.multiprocess'] = True environ['wsgi.run_once'] = False # dunno how SCGI does HTTPS signalling; can't test it myself... @CTB if environ.get('HTTPS','off') in ('on','1'): environ['wsgi.url_scheme'] = 'https' else: environ['wsgi.url_scheme'] = 'http' ## SCGI does some weird environ manglement. We need to set ## SCRIPT_NAME from 'prefix' and then set PATH_INFO from ## REQUEST_URI. prefix = self.prefix path = environ['REQUEST_URI'][len(prefix):].split('?', 1)[0] environ['SCRIPT_NAME'] = prefix environ['PATH_INFO'] = path headers_set = [] headers_sent = [] chunks = [] def write(data): chunks.append(data) def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: # Re-raise original exception if headers sent six.reraise(exc_info[0], exc_info[1], exc_info[2]) finally: exc_info = None # avoid dangling circular ref elif headers_set: raise AssertionError("Headers already set!") headers_set[:] = [status, response_headers] return write ### result = self.app_obj(environ, start_response) try: for data in result: chunks.append(data) # Before the first output, send the stored headers if not headers_set: # Error -- the app never called start_response status = '500 Server Error' response_headers = [('Content-type', 'text/html')] chunks = ["XXX start_response never called"] else: status, response_headers = headers_sent[:] = headers_set output.write('Status: %s\r\n' % status) for header in response_headers: output.write('%s: %s\r\n' % header) output.write('\r\n') for data in chunks: output.write(data) finally: if hasattr(result,'close'): result.close() # SCGI backends use connection closing to signal 'fini'. try: input.close() output.close() conn.close() except __HOLE__ as err: debug("IOError while closing connection ignored: %s" % err)
IOError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/paste/util/scgiserver.py/SWAP.handle_connection
def read_sock(sock): buffers = [] while True: try: buffer = sock.recv(BUFFER_SIZE) except __HOLE__ as err: if err.errno != errno.EINTR: raise continue else: if not buffer: break buffers.append(buffer) return b''.join(buffers)
IOError
dataset/ETHPy150Open serverdensity/sd-agent-plugins/Uwsgi/Uwsgi.py/read_sock
def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True, studies=None, features=None, regularization="scale"): """ Set up data for a classification task given a set of masks Given a set of masks, this function retrieves studies associated with each mask at the specified threshold, optionally removes overlap and filters by studies and features, and returns studies by feature matrix (X) and class labels (y) Args: dataset: a Neurosynth dataset maks: a list of paths to Nifti masks threshold: percentage of voxels active within the mask for study to be included remove_overlap: A boolean indicating if studies studies that appear in more than one mask should be excluded studies: An optional list of study names used to constrain the set used in classification. If None, will use all features in the dataset. features: An optional list of feature names used to constrain the set used in classification. If None, will use all features in the dataset. regularize: Optional boolean indicating if X should be regularized Returns: A tuple (X, y) of np arrays. X is a feature by studies matrix and y is a vector of class labels """ import nibabel as nib import os # Load masks using NiBabel try: loaded_masks = [nib.load(os.path.relpath(m)) for m in masks] except __HOLE__: print('Error loading masks. Check the path') # Get a list of studies that activate for each mask file--i.e., a list of # lists grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold) for m in loaded_masks] # Flattened ids flat_ids = reduce(lambda a, b: a + b, grouped_ids) # Remove duplicates if remove_overlap: import collections flat_ids = [id for (id, count) in collections.Counter(flat_ids).items() if count == 1] grouped_ids = [[x for x in m if x in flat_ids] for m in grouped_ids] # Remove # Create class label(y) y = [[idx] * len(ids) for (idx, ids) in enumerate(grouped_ids)] y = reduce(lambda a, b: a + b, y) # Flatten y = np.array(y) # Extract feature set for each class separately X = [dataset.get_feature_data(ids=group_ids, features=features) for group_ids in grouped_ids] X = np.vstack(tuple(X)) if regularization: X = regularize(X, method=regularization) return (X, y)
OSError
dataset/ETHPy150Open neurosynth/neurosynth/neurosynth/analysis/classify.py/get_studies_by_regions
def set_class_weight(self, class_weight='auto', y=None): """ Sets the class_weight of the classifier to match y """ if class_weight == None: cw = None try: self.clf.set_params(class_weight = cw) except __HOLE__: pass elif class_weight == 'auto': c = np.bincount(y) ii = np.nonzero(c)[0] c = c / float(c.sum()) cw = dict(zip(ii[::-1],c[ii])) try: self.clf.set_params(class_weight = cw) except ValueError: import warnings warnings.warn("Tried to set class_weight, but failed. The classifier probably doesn't support it")
ValueError
dataset/ETHPy150Open neurosynth/neurosynth/neurosynth/analysis/classify.py/Classifier.set_class_weight
def getOpenIDStore(filestore_path, table_prefix): """ Returns an OpenID association store object based on the database engine chosen for this Django application. * If no database engine is chosen, a filesystem-based store will be used whose path is filestore_path. * If a database engine is chosen, a store object for that database type will be returned. * If the chosen engine is not supported by the OpenID library, raise ImproperlyConfigured. * If a database store is used, this will create the tables necessary to use it. The table names will be prefixed with table_prefix. DO NOT use the same table prefix for both an OpenID consumer and an OpenID server in the same database. The result of this function should be passed to the Consumer constructor as the store parameter. """ if not settings.DATABASE_ENGINE: return FileOpenIDStore(filestore_path) # Possible side-effect: create a database connection if one isn't # already open. connection.cursor() # Create table names to specify for SQL-backed stores. tablenames = { 'associations_table': table_prefix + 'openid_associations', 'nonces_table': table_prefix + 'openid_nonces', } types = { 'postgresql': sqlstore.PostgreSQLStore, 'mysql': sqlstore.MySQLStore, 'sqlite3': sqlstore.SQLiteStore, } try: s = types[settings.DATABASE_ENGINE](connection.connection, **tablenames) except KeyError: raise ImproperlyConfigured, \ "Database engine %s not supported by OpenID library" % \ (settings.DATABASE_ENGINE,) try: s.createTables() except (SystemExit, __HOLE__, MemoryError), e: raise except: # XXX This is not the Right Way to do this, but because the # underlying database implementation might differ in behavior # at this point, we can't reliably catch the right # exception(s) here. Ideally, the SQL store in the OpenID # library would catch exceptions that it expects and fail # silently, but that could be bad, too. More ideally, the SQL # store would not attempt to create tables it knows already # exists. pass return s
KeyboardInterrupt
dataset/ETHPy150Open adieu/python-openid/examples/djopenid/util.py/getOpenIDStore
def handle(self, *args, **options): start_date = date(2013,6,16) end_date = date(2013,6,18) one_day = timedelta(days=1) this_date = start_date while (this_date < end_date): datestring = this_date.strftime("%Y%m%d") entry_time = datetime(this_date.year, this_date.month, this_date.day, 7,0) print "datestring %s" % (datestring) this_date += one_day filing_info = None try: filing_info = filerange[datestring] except __HOLE__: print "Missing data for %s" % datestring continue #print filing_info thisfilerange=range(int(filing_info['first']), 1+int(filing_info['last'])) #thisfilerange=['868338'] for filenum in thisfilerange: # see if the file is downloaded, and if it isn't just ignore it. Some numbers are skipped; our assumption here is that we're entering files that have come from a zipfile. local_file_location = FILECACHE_DIRECTORY + "/" + str(filenum) + ".fec" if path.isfile(local_file_location): print "Processing %s" % (filenum) process_filing_header(filenum, fp=fp, filing_time=entry_time, filing_time_is_exact=False) else: print "!! missing file %s" % filenum
KeyError
dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/formdata/management/commands/enter_headers_from_archive.py/Command.handle
def make_model(name): cfgs = config.model_configs(name) try: model_class = getattr(model_types, cfgs['model']) except __HOLE__: raise AttributeError('Unable to find model \ %s in model_types.py' % cfgs['model']) logger.info('Creating model %s' % name) model = model_class(**cfgs['args']) return model
AttributeError
dataset/ETHPy150Open theusual/kaggle-seeclickfix-ensemble/Miroslaw/models.py/make_model
def train_model(name): try: model = utils.load_from_cache(name) logger.info('Loading model %s from cache' % name) except __HOLE__: cfgs = config.model_configs(name) model = make_model(name) data = get_model_data(name) logger.info('Training model %s' % name) if "target" in cfgs: (train_data, target), test_data = data model.fit(train_data, target) else: model.fit(data) utils.save_to_cache(model, name) return model
IOError
dataset/ETHPy150Open theusual/kaggle-seeclickfix-ensemble/Miroslaw/models.py/train_model
def predict_model(name, data, model=None): if model is None: model = train_model(name) try: pred = model.predict(data) except __HOLE__: raise AttributeError("Model %s does not implement a predict function" % name) cfgs = config.model_configs(name) if 'postprocess_pred' in cfgs: postprocess_pred = getattr(predict, cfgs['postprocess_pred']['name']) pred = postprocess_pred(pred, **cfgs['postprocess_pred'].get('args', {})) return pred
AttributeError
dataset/ETHPy150Open theusual/kaggle-seeclickfix-ensemble/Miroslaw/models.py/predict_model
def get_validation_errors(outfile, app=None): """ Validates all models that are part of the specified app. If no app name is provided, validates all models of all installed apps. Writes errors, if any, to outfile. Returns number of errors. """ from django.db import models, connection from django.db.models.loading import get_app_errors from django.db.models.fields.related import RelatedObject from django.db.models.deletion import SET_NULL, SET_DEFAULT e = ModelErrorCollection(outfile) for (app_name, error) in get_app_errors().items(): e.add(app_name, error) for cls in models.get_models(app, include_swapped=True): opts = cls._meta # Check swappable attribute. if opts.swapped: try: app_label, model_name = opts.swapped.split('.') except ValueError: e.add(opts, "%s is not of the form 'app_label.app_name'." % opts.swappable) continue if not models.get_model(app_label, model_name): e.add(opts, "Model has been swapped out for '%s' which has not been installed or is abstract." % opts.swapped) # No need to perform any other validation checks on a swapped model. continue # If this is the current User model, check known validation problems with User models if settings.AUTH_USER_MODEL == '%s.%s' % (opts.app_label, opts.object_name): # Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS. if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS: e.add(opts, 'The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.') # Check that the username field is unique if not opts.get_field(cls.USERNAME_FIELD).unique: e.add(opts, 'The USERNAME_FIELD must be unique. Add unique=True to the field parameters.') # Model isn't swapped; do field-specific validation. for f in opts.local_fields: if f.name == 'id' and not f.primary_key and opts.pk.name == 'id': e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name) if f.name.endswith('_'): e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name) if (f.primary_key and f.null and not connection.features.interprets_empty_strings_as_nulls): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name) if isinstance(f, models.CharField): try: max_length = int(f.max_length) if max_length <= 0: e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name) except (ValueError, TypeError): e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name) if isinstance(f, models.DecimalField): decimalp_ok, mdigits_ok = False, False decimalp_msg = '"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.' try: decimal_places = int(f.decimal_places) if decimal_places < 0: e.add(opts, decimalp_msg % f.name) else: decimalp_ok = True except (ValueError, TypeError): e.add(opts, decimalp_msg % f.name) mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.' try: max_digits = int(f.max_digits) if max_digits <= 0: e.add(opts, mdigits_msg % f.name) else: mdigits_ok = True except (ValueError, __HOLE__): e.add(opts, mdigits_msg % f.name) invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.' if decimalp_ok and mdigits_ok: if decimal_places > max_digits: e.add(opts, invalid_values_msg % f.name) if isinstance(f, models.FileField) and not f.upload_to: e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name) if isinstance(f, models.ImageField): # Try to import PIL in either of the two ways it can end up installed. try: from PIL import Image except ImportError: try: import Image except ImportError: e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name) if isinstance(f, models.BooleanField) and getattr(f, 'null', False): e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name) if isinstance(f, models.FilePathField) and not (f.allow_files or f.allow_folders): e.add(opts, '"%s": FilePathFields must have either allow_files or allow_folders set to True.' % f.name) if f.choices: if isinstance(f.choices, six.string_types) or not is_iterable(f.choices): e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name) else: for c in f.choices: if not isinstance(c, (list, tuple)) or len(c) != 2: e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name) if f.db_index not in (None, True, False): e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name) # Perform any backend-specific field validation. connection.validation.validate_field(e, opts, f) # Check if the on_delete behavior is sane if f.rel and hasattr(f.rel, 'on_delete'): if f.rel.on_delete == SET_NULL and not f.null: e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name) elif f.rel.on_delete == SET_DEFAULT and not f.has_default(): e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name) # Check to see if the related field will clash with any existing # fields, m2m fields, m2m related objects or related objects if f.rel: if f.rel.to not in models.get_models(): # If the related model is swapped, provide a hint; # otherwise, the model just hasn't been installed. if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped: e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable)) else: e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to)) # it is a string and we could not find the model it refers to # so skip the next section if isinstance(f.rel.to, six.string_types): continue # Make sure the related field specified by a ForeignKey is unique if not f.rel.to._meta.get_field(f.rel.field_name).unique: e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__)) rel_opts = f.rel.to._meta rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name() rel_query_name = f.related_query_name() if not f.rel.is_hidden(): for r in rel_opts.fields: if r.name == rel_name: e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.local_many_to_many: if r.name == rel_name: e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.get_all_related_many_to_many_objects(): if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) for r in rel_opts.get_all_related_objects(): if r.field is not f: if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) seen_intermediary_signatures = [] for i, f in enumerate(opts.local_many_to_many): # Check to see if the related m2m field will clash with any # existing fields, m2m fields, m2m related objects or related # objects if f.rel.to not in models.get_models(): # If the related model is swapped, provide a hint; # otherwise, the model just hasn't been installed. if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped: e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable)) else: e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to)) # it is a string and we could not find the model it refers to # so skip the next section if isinstance(f.rel.to, six.string_types): continue # Check that the field is not set to unique. ManyToManyFields do not support unique. if f.unique: e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name) if f.rel.through is not None and not isinstance(f.rel.through, six.string_types): from_model, to_model = cls, f.rel.to if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created: e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.") seen_from, seen_to, seen_self = False, False, 0 for inter_field in f.rel.through._meta.fields: rel_to = getattr(inter_field.rel, 'to', None) if from_model == to_model: # relation to self if rel_to == from_model: seen_self += 1 if seen_self > 2: e.add(opts, "Intermediary model %s has more than " "two foreign keys to %s, which is ambiguous " "and is not permitted." % ( f.rel.through._meta.object_name, from_model._meta.object_name ) ) else: if rel_to == from_model: if seen_from: e.add(opts, "Intermediary model %s has more " "than one foreign key to %s, which is " "ambiguous and is not permitted." % ( f.rel.through._meta.object_name, from_model._meta.object_name ) ) else: seen_from = True elif rel_to == to_model: if seen_to: e.add(opts, "Intermediary model %s has more " "than one foreign key to %s, which is " "ambiguous and is not permitted." % ( f.rel.through._meta.object_name, rel_to._meta.object_name ) ) else: seen_to = True if f.rel.through not in models.get_models(include_auto_created=True): e.add(opts, "'%s' specifies an m2m relation through model " "%s, which has not been installed." % (f.name, f.rel.through) ) signature = (f.rel.to, cls, f.rel.through) if signature in seen_intermediary_signatures: e.add(opts, "The model %s has two manually-defined m2m " "relations through the model %s, which is not " "permitted. Please consider using an extra field on " "your intermediary model instead." % ( cls._meta.object_name, f.rel.through._meta.object_name ) ) else: seen_intermediary_signatures.append(signature) if not f.rel.through._meta.auto_created: seen_related_fk, seen_this_fk = False, False for field in f.rel.through._meta.fields: if field.rel: if not seen_related_fk and field.rel.to == f.rel.to: seen_related_fk = True elif field.rel.to == cls: seen_this_fk = True if not seen_related_fk or not seen_this_fk: e.add(opts, "'%s' is a manually-defined m2m relation " "through model %s, which does not have foreign keys " "to %s and %s" % (f.name, f.rel.through._meta.object_name, f.rel.to._meta.object_name, cls._meta.object_name) ) elif isinstance(f.rel.through, six.string_types): e.add(opts, "'%s' specifies an m2m relation through model %s, " "which has not been installed" % (f.name, f.rel.through) ) rel_opts = f.rel.to._meta rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name() rel_query_name = f.related_query_name() # If rel_name is none, there is no reverse accessor (this only # occurs for symmetrical m2m relations to self). If this is the # case, there are no clashes to check for this field, as there are # no reverse descriptors for this field. if rel_name is not None: for r in rel_opts.fields: if r.name == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.local_many_to_many: if r.name == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) if r.name == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name)) for r in rel_opts.get_all_related_many_to_many_objects(): if r.field is not f: if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) for r in rel_opts.get_all_related_objects(): if r.get_accessor_name() == rel_name: e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) if r.get_accessor_name() == rel_query_name: e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name)) # Check ordering attribute. if opts.ordering: for field_name in opts.ordering: if field_name == '?': continue if field_name.startswith('-'): field_name = field_name[1:] if opts.order_with_respect_to and field_name == '_order': continue # Skip ordering in the format field1__field2 (FIXME: checking # this format would be nice, but it's a little fiddly). if '__' in field_name: continue # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. if field_name == 'pk': continue try: opts.get_field(field_name, many_to_many=False) except models.FieldDoesNotExist: e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name) # Check unique_together. for ut in opts.unique_together: validate_local_fields(e, opts, "unique_together", ut) if not isinstance(opts.index_together, collections.Sequence): e.add(opts, '"index_together" must a sequence') else: for it in opts.index_together: validate_local_fields(e, opts, "index_together", it) return len(e.errors)
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/core/management/validation.py/get_validation_errors
def custom_message_validator(validator, error_message=None): def _validator(raw_value): try: return validator(raw_value) except __HOLE__ as e: if error_message: e.args = (error_message,) raise return _validator
ValueError
dataset/ETHPy150Open sodastsai/taskr/taskr/contrib/validators.py/custom_message_validator
@classmethod def create(cls, entry): """ Factory that creates an app config from an entry in INSTALLED_APPS. """ try: # If import_module succeeds, entry is a path to an app module, # which may specify an app config class with default_app_config. # Otherwise, entry is a path to an app config class or an error. module = import_module(entry) except ImportError: # Track that importing as an app module failed. If importing as an # app config class fails too, we'll trigger the ImportError again. module = None mod_path, _, cls_name = entry.rpartition('.') # Raise the original exception when entry cannot be a path to an # app config class. if not mod_path: raise else: try: # If this works, the app module specifies an app config class. entry = module.default_app_config except AttributeError: # Otherwise, it simply uses the default app config class. return cls(entry, module) else: mod_path, _, cls_name = entry.rpartition('.') # If we're reaching this point, we must attempt to load the app config # class located at <mod_path>.<cls_name> mod = import_module(mod_path) try: cls = getattr(mod, cls_name) except AttributeError: if module is None: # If importing as an app module failed, that error probably # contains the most informative traceback. Trigger it again. import_module(entry) else: raise # Check for obvious errors. (This check prevents duck typing, but # it could be removed if it became a problem in practice.) if not issubclass(cls, AppConfig): raise ImproperlyConfigured( "'%s' isn't a subclass of AppConfig." % entry) # Obtain app name here rather than in AppClass.__init__ to keep # all error checking for entries in INSTALLED_APPS in one place. try: app_name = cls.name except __HOLE__: raise ImproperlyConfigured( "'%s' must supply a name attribute." % entry) # Ensure app_name points to a valid module. app_module = import_module(app_name) # Entry is a path to an app config class. return cls(app_name, app_module)
AttributeError
dataset/ETHPy150Open django/django/django/apps/config.py/AppConfig.create
def get_model(self, model_name): """ Returns the model with the given case-insensitive model_name. Raises LookupError if no model exists with this name. """ self.check_models_ready() try: return self.models[model_name.lower()] except __HOLE__: raise LookupError( "App '%s' doesn't have a '%s' model." % (self.label, model_name))
KeyError
dataset/ETHPy150Open django/django/django/apps/config.py/AppConfig.get_model
def __init__(self, colorscheme_config, colors_config): '''Initialize a colorscheme.''' self.colors = {} self.gradients = {} self.groups = colorscheme_config['groups'] self.translations = colorscheme_config.get('mode_translations', {}) # Create a dict of color tuples with both a cterm and hex value for color_name, color in colors_config['colors'].items(): try: self.colors[color_name] = (color[0], int(color[1], 16)) except __HOLE__: self.colors[color_name] = (color, cterm_to_hex[color]) # Create a dict of gradient names with two lists: for cterm and hex # values. Two lists in place of one list of pairs were chosen because # true colors allow more precise gradients. for gradient_name, gradient in colors_config['gradients'].items(): if len(gradient) == 2: self.gradients[gradient_name] = ( (gradient[0], [int(color, 16) for color in gradient[1]])) else: self.gradients[gradient_name] = ( (gradient[0], [cterm_to_hex[color] for color in gradient[0]]))
TypeError
dataset/ETHPy150Open powerline/powerline/powerline/colorscheme.py/Colorscheme.__init__
def get_group_props(self, mode, trans, group, translate_colors=True): if isinstance(group, (str, unicode)): try: group_props = trans['groups'][group] except KeyError: try: group_props = self.groups[group] except KeyError: return None else: return self.get_group_props(mode, trans, group_props, True) else: return self.get_group_props(mode, trans, group_props, False) else: if translate_colors: group_props = copy(group) try: ctrans = trans['colors'] except __HOLE__: pass else: for key in ('fg', 'bg'): try: group_props[key] = ctrans[group_props[key]] except KeyError: pass return group_props else: return group
KeyError
dataset/ETHPy150Open powerline/powerline/powerline/colorscheme.py/Colorscheme.get_group_props
def truncatechars(value, arg): """ Truncates a string after a certain number of characters, but respects word boundaries. Argument: Number of characters to truncate after. """ try: length = int(arg) except __HOLE__: # If the argument is not a valid integer. return value # Fail silently. return truncate_chars(value, length)
ValueError
dataset/ETHPy150Open jumoconnect/openjumo/jumodjango/etc/templatetags/tags.py/truncatechars
@register.filter def partition(my_list, n): ''' Partitions a list into sublists, each with n (or fewer) elements. my_list = [1,2,3,4,5] partion(my_list, 2) => [[1,2],[3,4],[5]] ''' try: n = int(n) my_list = list(my_list) except __HOLE__: return [my_list] return [my_list[i:i+n] for i in range(0, len(my_list), n)]
ValueError
dataset/ETHPy150Open jumoconnect/openjumo/jumodjango/etc/templatetags/tags.py/partition
def get_hexdigest(algorithm, salt, raw_password): """ Returns a string of the hexdigest of the given plaintext password and salt using the given algorithm ('md5', 'sha1' or 'crypt'). """ raw_password, salt = smart_str(raw_password), smart_str(salt) if algorithm == 'crypt': try: import crypt except __HOLE__: raise ValueError('"crypt" password algorithm not supported in this environment') return crypt.crypt(raw_password, salt) if algorithm == 'md5': return md5_constructor(salt + raw_password).hexdigest() elif algorithm == 'sha1': return sha_constructor(salt + raw_password).hexdigest() raise ValueError("Got unknown password algorithm type in password.")
ImportError
dataset/ETHPy150Open adieu/django-nonrel/django/contrib/auth/models.py/get_hexdigest
def create_user(self, username, email, password=None): """ Creates and saves a User with the given username, e-mail and password. """ now = datetime.datetime.now() # Normalize the address by lowercasing the domain part of the email # address. try: email_name, domain_part = email.strip().split('@', 1) except __HOLE__: pass else: email = '@'.join([email_name, domain_part.lower()]) user = self.model(username=username, email=email, is_staff=False, is_active=True, is_superuser=False, last_login=now, date_joined=now) user.set_password(password) user.save(using=self._db) return user
ValueError
dataset/ETHPy150Open adieu/django-nonrel/django/contrib/auth/models.py/UserManager.create_user
def get_profile(self): """ Returns site-specific profile for this user. Raises SiteProfileNotAvailable if this site does not allow profiles. """ if not hasattr(self, '_profile_cache'): from django.conf import settings if not getattr(settings, 'AUTH_PROFILE_MODULE', False): raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO' 'DULE in your project settings') try: app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.') except ValueError: raise SiteProfileNotAvailable('app_label and model_name should' ' be separated by a dot in the AUTH_PROFILE_MODULE set' 'ting') try: model = models.get_model(app_label, model_name) if model is None: raise SiteProfileNotAvailable('Unable to load the profile ' 'model, check AUTH_PROFILE_MODULE in your project sett' 'ings') self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id) self._profile_cache.user = self except (__HOLE__, ImproperlyConfigured): raise SiteProfileNotAvailable return self._profile_cache
ImportError
dataset/ETHPy150Open adieu/django-nonrel/django/contrib/auth/models.py/User.get_profile