function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def identify(self): """Update client metadata on the server and negotiate features. :returns: nsqd response data if there was feature negotiation, otherwise `None` """ self.send(nsq.identify({ # nsqd <0.2.28 'short_id': self.client_id, 'long_id': self.hostname, # nsqd 0.2.28+ 'client_id': self.client_id, 'hostname': self.hostname, # nsqd 0.2.19+ 'feature_negotiation': True, 'heartbeat_interval': self.heartbeat_interval, # nsqd 0.2.21+ 'output_buffer_size': self.output_buffer_size, 'output_buffer_timeout': self.output_buffer_timeout, # nsqd 0.2.22+ 'tls_v1': self.tls_v1, # nsqd 0.2.23+ 'snappy': self.snappy, 'deflate': self.deflate, 'deflate_level': self.deflate_level, # nsqd nsqd 0.2.25+ 'sample_rate': self.sample_rate, 'user_agent': self.user_agent, })) frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data if data == nsq.OK: return try: data = json.loads(data.decode('utf-8')) except __HOLE__: self.close_stream() msg = 'failed to parse IDENTIFY response JSON from nsqd: %r' raise errors.NSQException(msg % data) self.max_ready_count = data.get('max_rdy_count', self.max_ready_count) if self.tls_v1 and data.get('tls_v1'): self.upgrade_to_tls() if self.snappy and data.get('snappy'): self.upgrade_to_snappy() elif self.deflate and data.get('deflate'): self.deflate_level = data.get('deflate_level', self.deflate_level) self.upgrade_to_defalte() if self.auth_secret and data.get('auth_required'): self.auth() return data
ValueError
dataset/ETHPy150Open wtolson/gnsq/gnsq/nsqd.py/Nsqd.identify
def auth(self): """Send authorization secret to nsqd.""" self.send(nsq.auth(self.auth_secret)) frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data try: response = json.loads(data.decode('utf-8')) except __HOLE__: self.close_stream() msg = 'failed to parse AUTH response JSON from nsqd: %r' raise errors.NSQException(msg % data) self.on_auth.send(self, response=response) return response
ValueError
dataset/ETHPy150Open wtolson/gnsq/gnsq/nsqd.py/Nsqd.auth
def runfastcgi(argset=[], **kwargs): options = FASTCGI_OPTIONS.copy() options.update(kwargs) for x in argset: if "=" in x: k, v = x.split('=', 1) else: k, v = x, True options[k.lower()] = v if "help" in options: return fastcgi_help() try: import flup except __HOLE__, e: print >> sys.stderr, "ERROR: %s" % e print >> sys.stderr, " Unable to load the flup package. In order to run django" print >> sys.stderr, " as a FastCGI application, you will need to get flup from" print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already" print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH." return False if options['method'] in ('prefork', 'fork'): from flup.server.fcgi_fork import WSGIServer wsgi_opts = { 'maxSpare': int(options["maxspare"]), 'minSpare': int(options["minspare"]), 'maxChildren': int(options["maxchildren"]), 'maxRequests': int(options["maxrequests"]), } elif options['method'] in ('thread', 'threaded'): from flup.server.fcgi import WSGIServer wsgi_opts = { 'maxSpare': int(options["maxspare"]), 'minSpare': int(options["minspare"]), 'maxThreads': int(options["maxchildren"]), } else: return fastcgi_help("ERROR: Implementation must be one of prefork or thread.") wsgi_opts['debug'] = False # Turn off flup tracebacks # Prep up and go from django.core.handlers.wsgi import WSGIHandler if options["host"] and options["port"] and not options["socket"]: wsgi_opts['bindAddress'] = (options["host"], int(options["port"])) elif options["socket"] and not options["host"] and not options["port"]: wsgi_opts['bindAddress'] = options["socket"] elif not options["socket"] and not options["host"] and not options["port"]: wsgi_opts['bindAddress'] = None else: return fastcgi_help("Invalid combination of host, port, socket.") if options["daemonize"] is None: # Default to daemonizing if we're running on a socket/named pipe. daemonize = (wsgi_opts['bindAddress'] is not None) else: if options["daemonize"].lower() in ('true', 'yes', 't'): daemonize = True elif options["daemonize"].lower() in ('false', 'no', 'f'): daemonize = False else: return fastcgi_help("ERROR: Invalid option for daemonize parameter.") if daemonize: from django.utils.daemonize import become_daemon become_daemon(our_home_dir=options["workdir"]) if options["pidfile"]: fp = open(options["pidfile"], "w") fp.write("%d\n" % os.getpid()) fp.close() WSGIServer(WSGIHandler(), **wsgi_opts).run()
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/core/servers/fastcgi.py/runfastcgi
def _handle_password(self, password): """ Handles getting the password""" if password is None: password = keyring.get_password('yagmail', self.user) if password is None: password = keyring.get_password('yagmail', self.user) if password is None: import getpass password = getpass.getpass( 'Password for <{}>: '.format(self.user)) answer = '' # Python 2 fix while answer != 'y' and answer != 'n': prompt_string = 'Save username and password in keyring? [y/n]: ' # pylint: disable=undefined-variable try: answer = raw_input(prompt_string).strip() except __HOLE__: answer = input(prompt_string).strip() if answer == 'y': register(self.user, password) return password
NameError
dataset/ETHPy150Open kootenpv/yagmail/yagmail/yagmail.py/SMTP._handle_password
def process_view(self, request, view_func, view_args, view_kwargs): try: path = view_kwargs['path'] except __HOLE__: request.node = None else: request.node = SimpleLazyObject(lambda: get_node(path))
KeyError
dataset/ETHPy150Open ithinksw/philo/philo/middleware.py/RequestNodeMiddleware.process_view
def hashed_name(self, name, content=None): parsed_name = urlsplit(unquote(name)) clean_name = parsed_name.path.strip() opened = False if content is None: if not self.exists(clean_name): raise ValueError("The file '%s' could not be found with %r." % (clean_name, self)) try: content = self.open(clean_name) except __HOLE__: # Handle directory paths and fragments return name opened = True try: file_hash = self.file_hash(clean_name, content) finally: if opened: content.close() path, filename = os.path.split(clean_name) root, ext = os.path.splitext(filename) if file_hash is not None: file_hash = ".%s" % file_hash hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext)) unparsed_name = list(parsed_name) unparsed_name[2] = hashed_name # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax if '?#' in name and not unparsed_name[3]: unparsed_name[2] += '?' return urlunsplit(unparsed_name)
IOError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/staticfiles/storage.py/CachedFilesMixin.hashed_name
def _get_request_object(request, attrs=None, klass=None, key=None): """Resolves the appropriate object for use from the request. This applies only to DataView or DataContext objects. """ # Attempt to derive the `attrs` from the request if attrs is None: if request.method == 'POST': attrs = request.data.get(key) elif request.method == 'GET': attrs = request.GET.get(key) # If the `attrs` still could not be resolved, try to get the view or # context from the query data if it exists within the request. if attrs is None: request_data = None # Try to read the query data from the request if request.method == 'POST': request_data = request.data.get('query') elif request.method == 'GET': request_data = request.GET.get('query') # If query data was found in the request, then attempt to create a # DataQuery object from it. if request_data: query = get_request_query(request, attrs=request_data.get('query')) # Now that the DataQuery object is built, read the appropriate # attribute from it, returning None if the attribute wasn't found. # Since `context` and `view` are the keys used in get_request_view # and get_request_context respectively, we can use the key directly # to access the context and view properties of the DataQuery model. key_object = getattr(query, key, None) # If the property exists and is not None, then read the json from # the object as both DataContext and DataView objects will have a # json property. This json will be used as the attributes to # construct or lookup the klass object going forward. Otherwise, # `attrs` will still be None and we are no worse off than we were # before attempting to create and read the query. if key_object: attrs = key_object.json # Use attrs that were supplied or derived from the request. # This provides support for one-off queries via POST or GET. if isinstance(attrs, (list, dict)): return klass(json=attrs) kwargs = {} # If an authenticated user made the request, filter by the user or # fallback to an active session key. if getattr(request, 'user', None) and request.user.is_authenticated(): kwargs['user'] = request.user else: # If no session has been created, this is a cookie-less user agent # which is most likely a bot or a non-browser client (e.g. cURL). if request.session.session_key is None: return klass() kwargs['session_key'] = request.session.session_key # Assume it is a primary key and fallback to the sesssion try: kwargs['pk'] = int(attrs) except (__HOLE__, TypeError): kwargs['session'] = True try: # Check that multiple DataViews or DataContexts are not returned # If there are more than one, return the most recent return klass.objects.filter(**kwargs).latest('modified') except klass.DoesNotExist: pass # Fallback to an instance based off the default template if one exists instance = klass() default = klass.objects.get_default_template() if default: instance.json = default.json return instance # Partially applied functions for DataView and DataContext. These functions # only require the request object and an optional `attrs` dict
ValueError
dataset/ETHPy150Open chop-dbhi/serrano/serrano/resources/base.py/_get_request_object
def get_request_query(request, attrs=None): """ Resolves the appropriate DataQuery object for use from the request. """ # Attempt to derive the `attrs` from the request if attrs is None: if request.method == 'POST': attrs = request.data.get('query') elif request.method == 'GET': attrs = request.GET.get('query') # If the `attrs` could not be derived from the request(meaning no query # was explicity defined), try to construct the query by deriving a context # and view from the request. if attrs is None: json = {} context = get_request_context(request) if context: json['context'] = context.json view = get_request_view(request) if view: json['view'] = view.json return DataQuery(json) # If `attrs` were derived or supplied then validate them and return a # DataQuery based off the `attrs`. if isinstance(attrs, dict): # We cannot simply validate and create a DataQuery based off the # `attrs` as they are now because the context and or view might not # contain json but might instead be a pk or some other value. Use the # internal helper methods to construct the context and view objects # and build the query from the json of those objects' json. json = {} context = get_request_context(request, attrs=attrs) if context: json['context'] = context.json view = get_request_view(request, attrs=attrs) if view: json['view'] = view.json DataQuery.validate(json) return DataQuery(json) kwargs = {} # If an authenticated user made the request, filter by the user or # fallback to an active session key. if getattr(request, 'user', None) and request.user.is_authenticated(): kwargs['user'] = request.user else: # If not session has been created, this is a cookie-less user agent # which is most likely a bot or a non-browser client (e.g. cURL). if request.session.session_key is None: return DataQuery() kwargs['session_key'] = request.session.session_key # Assume it is a primary key and fallback to the sesssion try: kwargs['pk'] = int(attrs) except (__HOLE__, TypeError): kwargs['session'] = True try: return DataQuery.objects.get(**kwargs) except DataQuery.DoesNotExist: pass # Fallback to an instance based off the default template if one exists instance = DataQuery() default = DataQuery.objects.get_default_template() if default: instance.json = default.json return instance
ValueError
dataset/ETHPy150Open chop-dbhi/serrano/serrano/resources/base.py/get_request_query
def create_app(applet_id, applet_name, src_dir, publish=False, set_default=False, billTo=None, try_versions=None, try_update=True, confirm=True): """ Creates a new app object from the specified applet. """ app_spec = _get_app_spec(src_dir) logger.info("Will create app with spec: %s" % (app_spec,)) app_spec["applet"] = applet_id app_spec["name"] = applet_name # Inline Readme.md and Readme.developer.md _inline_documentation_files(app_spec, src_dir) if billTo: app_spec["billTo"] = billTo if not try_versions: try_versions = [app_spec["version"]] for version in try_versions: logger.debug("Attempting to create version %s..." % (version,)) app_spec['version'] = version app_describe = None try: # 404, which is rather likely in this app_describe request # (the purpose of the request is to find out whether the # version of interest exists), would ordinarily cause this # request to be retried multiple times, introducing a # substantial delay. So we disable retrying here for this # request. app_describe = dxpy.api.app_describe("app-" + app_spec["name"], alias=version, always_retry=False) except dxpy.exceptions.DXAPIError as e: if e.name == 'ResourceNotFound': pass else: raise e # Now app_describe is None if the app didn't exist, OR it contains the # app describe content. # The describe check does not eliminate race conditions since an app # may always have been created, or published, since we last looked at # it. So the describe that happens here is just to save time and avoid # unnecessary API calls, but we always have to be prepared to recover # from API errors. if app_describe is None: logger.debug('App %s/%s does not yet exist' % (app_spec["name"], version)) app_id = _create_or_update_version(app_spec['name'], app_spec['version'], app_spec, try_update=try_update) if app_id is None: continue logger.info("Created app " + app_id) # Success! break elif app_describe.get("published", 0) == 0: logger.debug('App %s/%s already exists and has not been published' % (app_spec["name"], version)) app_id = _update_version(app_spec['name'], app_spec['version'], app_spec, try_update=try_update) if app_id is None: continue logger.info("Updated existing app " + app_id) # Success! break else: logger.debug('App %s/%s already exists and has been published' % (app_spec["name"], version)) # App has already been published. Give up on this version. continue else: # All versions requested failed if len(try_versions) != 1: tried_versions = 'any of the requested versions: ' + ', '.join(try_versions) else: tried_versions = 'the requested version: ' + try_versions[0] raise AppBuilderException('Could not create %s' % (tried_versions,)) # Set categories appropriately. categories_to_set = app_spec.get("categories", []) existing_categories = dxpy.api.app_list_categories(app_id)['categories'] categories_to_add = set(categories_to_set).difference(set(existing_categories)) categories_to_remove = set(existing_categories).difference(set(categories_to_set)) if categories_to_add: dxpy.api.app_add_categories(app_id, input_params={'categories': list(categories_to_add)}) if categories_to_remove: dxpy.api.app_remove_categories(app_id, input_params={'categories': list(categories_to_remove)}) # Set developers list appropriately, but only if provided. developers_to_set = app_spec.get("developers") if developers_to_set is not None: existing_developers = dxpy.api.app_list_developers(app_id)['developers'] developers_to_add = set(developers_to_set) - set(existing_developers) developers_to_remove = set(existing_developers) - set(developers_to_set) skip_updating_developers = False if developers_to_add or developers_to_remove: parts = [] if developers_to_add: parts.append('the following developers will be added: ' + ', '.join(sorted(developers_to_add))) if developers_to_remove: parts.append('the following developers will be removed: ' + ', '.join(sorted(developers_to_remove))) developer_change_message = '; and '.join(parts) if confirm: if INTERACTIVE_CLI: try: print('***') print(fill('WARNING: ' + developer_change_message)) print('***') value = input('Confirm updating developers list [y/N]: ') except __HOLE__: value = 'n' if not value.lower().startswith('y'): skip_updating_developers = True else: # Default to NOT updating developers if operating # without a TTY. logger.warn('skipping requested change to the developer list. Rerun "dx build" interactively or pass --yes to confirm this change.') skip_updating_developers = True else: logger.warn(developer_change_message) if not skip_updating_developers: if developers_to_add: dxpy.api.app_add_developers(app_id, input_params={'developers': list(developers_to_add)}) if developers_to_remove: dxpy.api.app_remove_developers(app_id, input_params={'developers': list(developers_to_remove)}) # Set authorizedUsers list appropriately, but only if provided. authorized_users_to_set = app_spec.get("authorizedUsers") existing_authorized_users = dxpy.api.app_list_authorized_users(app_id)['authorizedUsers'] if authorized_users_to_set is not None: authorized_users_to_add = set(authorized_users_to_set) - set(existing_authorized_users) authorized_users_to_remove = set(existing_authorized_users) - set(authorized_users_to_set) skip_adding_public = False if 'PUBLIC' in authorized_users_to_add: acl_change_message = 'app-%s will be made public. Anyone will be able to view and run all published versions of this app.' % (app_spec['name'],) if confirm: if INTERACTIVE_CLI: try: print('***') print(fill('WARNING: ' + acl_change_message)) print('***') value = input('Confirm making this app public [y/N]: ') except KeyboardInterrupt: value = 'n' if not value.lower().startswith('y'): skip_adding_public = True else: # Default to NOT adding PUBLIC if operating # without a TTY. logger.warn('skipping requested change to add PUBLIC to the authorized users list. Rerun "dx build" interactively or pass --yes to confirm this change.') skip_adding_public = True else: logger.warn(acl_change_message) if skip_adding_public: authorized_users_to_add -= {'PUBLIC'} if authorized_users_to_add: dxpy.api.app_add_authorized_users(app_id, input_params={'authorizedUsers': list(authorized_users_to_add)}) if skip_adding_public: logger.warn('the app was NOT made public as requested in the dxapp.json. To make it so, run "dx add users app-%s PUBLIC".' % (app_spec["name"],)) if authorized_users_to_remove: dxpy.api.app_remove_authorized_users(app_id, input_params={'authorizedUsers': list(authorized_users_to_remove)}) elif not len(existing_authorized_users): # Apps that had authorized users added by any other means will # not have this message printed. logger.warn('authorizedUsers is missing from the dxapp.json. No one will be able to view or run the app except the app\'s developers.') if publish: dxpy.api.app_publish(app_id, input_params={'makeDefault': set_default}) else: # If no versions of this app have ever been published, then # we'll set the "default" tag to point to the latest # (unpublished) version. no_published_versions = len(list(dxpy.find_apps(name=applet_name, published=True, limit=1))) == 0 if no_published_versions: dxpy.api.app_add_tags(app_id, input_params={'tags': ['default']}) return app_id
KeyboardInterrupt
dataset/ETHPy150Open dnanexus/dx-toolkit/src/python/dxpy/app_builder.py/create_app
def consume_incoming(self, *args, **kwargs): consumed = 0 for m in self.spider_log_consumer.get_messages(timeout=1.0, count=self.consumer_batch_size): try: msg = self._decoder.decode(m) except (KeyError, TypeError), e: logger.error("Decoding error: %s", e) continue else: type = msg[0] if type == 'add_seeds': _, seeds = msg logger.info('Adding %i seeds', len(seeds)) for seed in seeds: logger.debug('URL: ', seed.url) self._backend.add_seeds(seeds) if type == 'page_crawled': _, response, links = msg logger.debug("Page crawled %s", response.url) if response.meta['jid'] != self.job_id: continue self._backend.page_crawled(response, links) if type == 'request_error': _, request, error = msg if request.meta['jid'] != self.job_id: continue logger.info("Request error %s", request.url) self._backend.request_error(request, error) if type == 'offset': _, partition_id, offset = msg try: producer_offset = self.spider_feed_producer.get_offset(partition_id) except __HOLE__: continue else: lag = producer_offset - offset if lag < 0: # non-sense in general, happens when SW is restarted and not synced yet with Spiders. continue if lag < self.max_next_requests or offset == 0: self.spider_feed.mark_ready(partition_id) else: self.spider_feed.mark_busy(partition_id) finally: consumed += 1 """ # TODO: Think how it should be implemented in DB-worker only mode. if not self.strategy_enabled and self._backend.finished(): logger.info("Crawling is finished.") reactor.stop() """ logger.info("Consumed %d items.", consumed) self.stats['last_consumed'] = consumed self.stats['last_consumption_run'] = asctime() self.slot.schedule() return consumed
KeyError
dataset/ETHPy150Open scrapinghub/frontera/frontera/worker/db.py/DBWorker.consume_incoming
def consume_scoring(self, *args, **kwargs): consumed = 0 seen = set() batch = [] for m in self.scoring_log_consumer.get_messages(count=self.consumer_batch_size): try: msg = self._decoder.decode(m) except (__HOLE__, TypeError), e: logger.error("Decoding error: %s", e) continue else: if msg[0] == 'update_score': _, fprint, score, url, schedule = msg if fprint not in seen: batch.append((fprint, score, Request(url), schedule)) seen.add(fprint) if msg[0] == 'new_job_id': self.job_id = msg[1] finally: consumed += 1 self.queue.schedule(batch) logger.info("Consumed %d items during scoring consumption.", consumed) self.stats['last_consumed_scoring'] = consumed self.stats['last_consumption_run_scoring'] = asctime() self.slot.schedule()
KeyError
dataset/ETHPy150Open scrapinghub/frontera/frontera/worker/db.py/DBWorker.consume_scoring
def has_module(module_name): try: import imp imp.find_module(module_name) del imp return True except __HOLE__: return False
ImportError
dataset/ETHPy150Open tony/tmuxp/bootstrap_env.py/has_module
def pca(x, subtract_mean=False, normalize=False, sort_components=True, reducedim=None, algorithm=pca_eig): """ Calculate principal component analysis (PCA) Parameters ---------- x : array-like, shape = [n_trials, n_channels, n_samples] or [n_channels, n_samples] EEG data set subtract_mean : bool, optional Subtract sample mean from x. normalize : bool, optional Normalize variances to 1 before applying PCA. sort_components : bool, optional Sort principal components in order of decreasing eigenvalues. reducedim : {float, int}, optional A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All components that describe in total less than `1-reducedim` of the variance are removed by the PCA step. An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA. algorithm : func, optional Specify function to use for eigenvalue decomposition (:func:`pca_eig` or :func:`pca_svd`) Returns ------- w : array, shape = [n_channels, n_components] PCA transformation matrix v : array, shape = [n_components, n_channels] PCA backtransformation matrix """ x = np.asarray(x) if x.ndim > 2: x = cat_trials(x) if reducedim: sort_components = True if subtract_mean: x = x - np.mean(x, axis=1, keepdims=True) k, l = None, None if normalize: l = np.std(x, axis=1, ddof=1) k = np.diag(1.0 / l) l = np.diag(l) x = np.dot(k, x) w, latent = algorithm(x) #print(w.shape, k.shape) #v = np.linalg.inv(w) # PCA is just a rotation, so inverse is equal transpose... v = w.T if normalize: w = np.dot(k, w) v = np.dot(v, l) latent /= sum(latent) if sort_components: order = np.argsort(latent)[::-1] w = w[:, order] v = v[order, :] latent = latent[order] if reducedim and reducedim < 1: selected = np.nonzero(np.cumsum(latent) < reducedim)[0] try: selected = np.concatenate([selected, [selected[-1] + 1]]) except __HOLE__: selected = [0] if selected[-1] >= w.shape[1]: selected = selected[0:-1] w = w[:, selected] v = v[selected, :] if reducedim and reducedim >= 1: w = w[:, np.arange(reducedim)] v = v[np.arange(reducedim), :] return w, v
IndexError
dataset/ETHPy150Open scot-dev/scot/scot/pca.py/pca
def draw_networkx_nodes(G, pos, nodelist=None, node_size=300, node_color='r', node_shape='o', alpha=1.0, cmap=None, vmin=None, vmax=None, ax=None, linewidth=None, zorder=None, **kwds): """Draw the nodes of the graph G. This draws only the nodes of the graph G. Parameters ---------- G : graph A networkx graph pos : dictionary A dictionary with nodes as keys and positions as values. If not specified a spring layout positioning will be computed. See networkx.layout for functions that compute node positions. ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. nodelist: list, optional Draw only specified nodes (default G.nodes()) edgelist: list Draw only specified edges(default=G.edges()) node_size: scalar or array Size of nodes (default=300). If an array is specified it must be the same length as nodelist. node_color: color string, or array of floats Node color. Can be a single color format string (default='r'), or a sequence of colors with the same length as nodelist. If numeric values are specified they will be mapped to colors using the cmap and vmin,vmax parameters. See matplotlib.scatter for more details. node_shape: string The shape of the node. Specification is as matplotlib.scatter marker, one of 'so^>v<dph8' (default='o'). alpha: float The node transparency (default=1.0) cmap: Matplotlib colormap Colormap for mapping intensities of nodes (default=None) vmin,vmax: floats Minimum and maximum for node colormap scaling (default=None) width`: float Line width of edges (default =1.0) Notes ----- Any keywords not listed above are passed through to Matplotlib's scatter function. Examples -------- >>> G=nx.dodecahedral_graph() >>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G)) Also see the NetworkX drawing examples at http://networkx.lanl.gov/gallery.html See Also -------- draw() draw_networkx() draw_networkx_edges() draw_networkx_labels() draw_networkx_edge_labels() """ try: import matplotlib.pylab as pylab import numpy except __HOLE__: raise ImportError("Matplotlib required for draw()") except RuntimeError: print("Matplotlib unable to open display") raise if zorder is None: zorder = 2 if ax is None: ax=pylab.gca() if nodelist is None: nodelist=G.nodes() if not nodelist or len(nodelist)==0: # empty nodelist, no drawing return None try: xy=numpy.asarray([pos[v] for v in nodelist]) except KeyError as e: raise nx.NetworkXError('Node %s has no position.'%e) except ValueError: raise nx.NetworkXError('Bad value in node positions.') syms = { # a dict from symbol to (numsides, angle) 's' : (4,math.pi/4.0,0), # square 'o' : (0,0,3), # circle '^' : (3,0,0), # triangle up '>' : (3,math.pi/2.0,0), # triangle right 'v' : (3,math.pi,0), # triangle down '<' : (3,3*math.pi/2.0,0), # triangle left 'd' : (4,0,0), # diamond 'p' : (5,0,0), # pentagram 'h' : (6,0,0), # hexagon '8' : (8,0,0), # octagon '+' : (4,0,0), # plus 'x' : (4,math.pi/4.0,0) # cross } temp_x = [p[0] for p in list(pos.values())] temp_y = [p[1] for p in list(pos.values())] minx = np.amin(temp_x) maxx = np.amax(temp_x) miny = np.amin(temp_y) maxy = np.amax(temp_y) w = max(maxx-minx,1.0) h = max(maxy-miny,1.0) #for scaling area2radius = lambda a: math.sqrt((a*w*h)/(ax.figure.get_figheight()*ax.figure.get_figwidth()*ax.figure.dpi*ax.figure.dpi*math.pi*.75*.75)) if cb.iterable(node_size): try: vals = list(node_size.values()) except: vals = node_size node_size = dict(list(zip(nodelist,list(map(area2radius,vals))))) else: node_size = {}.fromkeys(nodelist,area2radius(node_size)) for n in node_size: if node_size[n] == 0.0: node_size[n] = .00001 if cmap is None: cmap = cm.get_cmap(mpl.rcParams['image.cmap']) n_colors = get_color_dict(node_color,nodelist,vmin,vmax,cmap) sym = syms[node_shape] numsides,rotation,symstyle=syms[node_shape] node_patches = {} for n in nodelist: if symstyle==0: node_patches[n] = patches.RegularPolygon(pos[n], numsides, orientation=rotation, radius=node_size[n], facecolor=n_colors[n], edgecolor='k', alpha=alpha, linewidth=linewidth, transform=ax.transData, zorder=zorder) elif symstyle==3: node_patches[n] = patches.Circle(pos[n], radius=node_size[n], facecolor=n_colors[n], edgecolor='k', alpha=alpha, linewidth=linewidth, transform=ax.transData, zorder=zorder) ax.add_patch(node_patches[n]) # the pad is a little hack to deal with the fact that we don't # want to transform all the symbols whose scales are in points # to data coords to get the exact bounding box for efficiency # reasons. It can be done right if this is deemed important temp_x = xy[:,0] temp_y = xy[:,1] minx = np.amin(temp_x) maxx = np.amax(temp_x) miny = np.amin(temp_y) maxy = np.amax(temp_y) w = maxx-minx h = maxy-miny padx, pady = 0.05*w, 0.05*h corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady) ax.update_datalim(corners) # ax.autoscale() ax.autoscale_view() ax.set_aspect('equal') # pylab.axes(ax) #pylab.sci(node_collection) #node_collection.set_zorder(2) return node_patches
ImportError
dataset/ETHPy150Open calliope-project/calliope/calliope/lib/nx_pylab.py/draw_networkx_nodes
def draw_networkx_edges(G, pos, node_patches=None, edgelist=None, width=None, edge_color=None, style='solid', alpha=None, edge_cmap=None, edge_vmin=None, edge_vmax=None, ax=None, arrows=True, arrow_style=None, connection_style='arc3', color_weights=False, width_weights=False, **kwds): """Draw the edges of the graph G This draws only the edges of the graph G. Parameters ---------- G : graph A networkx graph pos : dictionary A dictionary with nodes as keys and positions as values. If not specified a spring layout positioning will be computed. See networkx.layout for functions that compute node positions. ax : Matplotlib Axes object, optional Draw the graph in the specified Matplotlib axes. alpha: float The edge transparency (default=1.0) width`: float Line width of edges (default =1.0) edge_color: color string, or array of floats Edge color. Can be a single color format string (default='r'), or a sequence of colors with the same length as edgelist. If numeric values are specified they will be mapped to colors using the edge_cmap and edge_vmin,edge_vmax parameters. edge_ cmap: Matplotlib colormap Colormap for mapping intensities of edges (default=None) edge_vmin,edge_vmax: floats Minimum and maximum for edge colormap scaling (default=None) style: string Edge line style (default='solid') (solid|dashed|dotted,dashdot) arrow: Bool Whether to draw arrows or not for directed graphs arrow_style: string Arrow style used by matplotlib see FancyArrowPatch connection_style: string Connection style used by matplotlib, see FancyArrowPatch color_weights: Bool Whether to color the edges of a graph by their weight if the graph has any. width_weights: Bool Whether to vary the thicknes of an edge by their weight, if the graph has any. Examples -------- >>> G=nx.dodecahedral_graph() >>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G)) Also see the NetworkX drawing examples at http://networkx.lanl.gov/gallery.html See Also -------- draw() draw_networkx() draw_networkx_nodes() draw_networkx_labels() draw_networkx_edge_labels() """ try: import matplotlib import matplotlib.pylab as pylab import matplotlib.cbook as cb from matplotlib.colors import colorConverter,Colormap from matplotlib.collections import LineCollection import numpy except ImportError: raise ImportError("Matplotlib required for draw()") except __HOLE__: print("Matplotlib unable to open display") raise if ax is None: ax=pylab.gca() if edgelist is None: edgelist=G.edges() if not edgelist or len(edgelist)==0: # no edges! return None # set edge positions edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist]) if width is None and width_weights and is_weighted(G): lw = edge_width_weight(G,edgelist) if alpha is None: alpha = 0.75 elif width is None: lw = {}.fromkeys(edgelist,1.0) elif cb.iterable(width): try: lwvals = list(width.values()) except: lwvals = width lw = dict(list(zip(edgelist,lwvals))) elif cb.is_scalar(width): lw = {}.fromkeys(edgelist,width) else: raise nx.NetworkXError("Must provide a single scalar value or a list \ of values for line width or None") if edge_cmap is None: edge_cmap = cm.get_cmap(mpl.rcParams['image.cmap']) if edge_color is None and color_weights and is_weighted(G): edge_color = edge_color_weight(G,edgelist) if alpha is None: alpha = 0.75 elif edge_color is None: edge_color = 'k' e_colors = get_color_dict(edge_color,edgelist,edge_vmin,edge_vmax,edge_cmap) edge_patches = {} if arrow_style is None: if G.is_directed(): arrow_style = '-|>' else: arrow_style = '-' if node_patches is None: node_patches = {}.fromkeys(G.nodes(),None) for (u,v) in edgelist: edge_patches[(u,v)] = patches.FancyArrowPatch(posA=pos[u], posB=pos[v], arrowstyle=arrow_style, connectionstyle=connection_style, patchA=node_patches[u], patchB=node_patches[v], shrinkA=0.0, shrinkB=0.0, mutation_scale=20.0, alpha=alpha, color=e_colors[(u,v)], lw = lw[(u,v)], linestyle=style, zorder=1) ax.add_patch(edge_patches[(u,v)]) # update view minx = numpy.amin(numpy.ravel(edge_pos[:,:,0])) maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0])) miny = numpy.amin(numpy.ravel(edge_pos[:,:,1])) maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1])) w = maxx-minx h = maxy-miny padx, pady = 0.05*w, 0.05*h corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady) ax.update_datalim( corners) ax.autoscale_view() return edge_patches
RuntimeError
dataset/ETHPy150Open calliope-project/calliope/calliope/lib/nx_pylab.py/draw_networkx_edges
def main(): init_app(set_backends=True, routes=False) # drop all styles CitationStyle.remove() total = 0 for style_file in get_style_files(settings.CITATION_STYLES_PATH): with open(style_file, 'r') as f: try: root = etree.parse(f).getroot() except etree.XMLSyntaxError: continue total += 1 namespace = root.nsmap.get(None) selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace) # Required fields = { '_id': os.path.splitext(os.path.basename(style_file))[0], 'title': root.find(selector + 'title').text, } # Optional try: fields['short_title'] = root.find(selector + "title-short").text except __HOLE__: pass try: fields['summary'] = root.find(selector + 'summary').text except AttributeError: pass style = CitationStyle(**fields) style.save() return total
AttributeError
dataset/ETHPy150Open CenterForOpenScience/osf.io/scripts/parse_citation_styles.py/main
def validate(self, attrs): ''' 1. Disallow starts-with, which complicates validation, and is unnecessary for file uploading 2. Require that required_conditions are present 3. Make sure conditions are in required_conditions or optional_conditions 4. Invoke super, which checks for duplicate keys and invokes the validate_condition_<element_name> methods ''' conditions = attrs.get('conditions', []) errors = {} missing_conditions = set(self.required_conditions) - set([item.element_name for item in conditions]) for element_name in missing_conditions: message = _('Required condition is missing') errors['conditions.' + element_name] = [message] for item in conditions: field_name = 'conditions.' + item.element_name if item.operator and item.operator != 'eq': message = _("starts-with and operators other than 'eq' are not allowed") errors[field_name] = errors.get(field_name, []) + [message] elif item.element_name not in self.required_conditions + self.optional_conditions: message = _('Invalid element name') errors[field_name] = errors.get(field_name, []) + [message] try: super(DefaultPolicySerializer, self).validate(attrs) except __HOLE__ as err: # Merge with our errors for field_name, error_messages in err.message_dict.items(): errors[field_name] = errors.get(field_name, []) + list(error_messages) if len(errors): raise ValidationError(errors) else: return attrs
ValidationError
dataset/ETHPy150Open bodylabs/drf-to-s3/drf_to_s3/serializers.py/DefaultPolicySerializer.validate
def testError(self): failed = False try: bam_innerdist(testbam3, testbam4) except __HOLE__: failed = True self.assertTrue(failed)
ValueError
dataset/ETHPy150Open ngsutils/ngsutils/ngsutils/bam/t/test_innerdist.py/InnerDistTest.testError
def add_db(source_socket, destination_socket, source_db, destination_db): src = redis_conn(source_socket, source_db) dest = redis_conn(destination_socket, destination_db) keys = src.keys() for key in keys: key_type = src.type(key) if key_type == "string": value = src.get(key) if dest.exists(key): try: float_val = float(value) incr_by_float(dest, key, float_val) except ValueError: raise NotANumber(key, value) else: dest.set(key, value) elif key_type == "set": value = src.smembers(key) for sval in value: dest.sadd(key, sval) elif key_type == "hash": value = src.hgetall(key) if dest.exists(key): for hkey, hval in value.iteritems(): if dest.hexists(key, hkey): try: float_val = float(hval) hincr_by_float(dest, key, hkey, float_val) except __HOLE__: raise NotANumber("->".join([key, hkey]), hval) else: dest.hset(key, hkey, hval) else: for hkey, hval in value.iteritems(): dest.hset(key, hkey, hval) else: raise UnsupportedKeyType(key_type, key)
ValueError
dataset/ETHPy150Open practo/r5d4/scripts/add_keys.py/add_db
def check(): try: check_call(['initctl', '--version'], stdout=PIPE) return True except __HOLE__ as err: if err.errno == 2: return False raise
OSError
dataset/ETHPy150Open Anaconda-Platform/chalmers/chalmers/service/upstart_service.py/check
def _run_command(function, args, kwargs, exit_msg): success = False try: log.debug("Running %s command...", getattr(function, '__name__', 'a')) success = function(*args, **kwargs) except __HOLE__: log.debug("Command canceled") exit_msg = "" except RuntimeError as exc: exit_msg = str(exc) + exit_msg else: exit_msg = "" if success: log.debug("Command succeeded") else: log.debug("Command failed") sys.exit(exit_msg or 1)
KeyboardInterrupt
dataset/ETHPy150Open jacebrowning/gitman/gitman/cli.py/_run_command
def machine_setup(target, use_chroot): """Prepare the machine and get SSH parameters from ``vagrant ssh``. """ try: out = subprocess.check_output(['vagrant', 'ssh-config'], cwd=target.path, stderr=subprocess.PIPE) except subprocess.CalledProcessError: # Makes sure the VM is running logging.info("Calling 'vagrant up'...") try: retcode = subprocess.check_call(['vagrant', 'up'], cwd=target.path) except __HOLE__: logging.critical("vagrant executable not found") sys.exit(1) else: if retcode != 0: logging.critical("vagrant up failed with code %d", retcode) sys.exit(1) # Try again out = subprocess.check_output(['vagrant', 'ssh-config'], cwd=target.path) vagrant_info = {} for line in out.split(b'\n'): line = line.strip().split(b' ', 1) if len(line) != 2: continue value = line[1].decode('utf-8') if len(value) >= 2 and value[0] == '"' and value[-1] == '"': # Vagrant should really be escaping special characters here, but # it's not -- https://github.com/mitchellh/vagrant/issues/6428 value = value[1:-1] vagrant_info[line[0].decode('utf-8').lower()] = value if 'identityfile' in vagrant_info: key_file = vagrant_info['identityfile'] else: key_file = Path('~/.vagrant.d/insecure_private_key').expand_user() info = dict(hostname=vagrant_info.get('hostname', '127.0.0.1'), port=int(vagrant_info.get('port', 2222)), username=vagrant_info.get('user', 'vagrant'), key_filename=key_file) logging.debug("SSH parameters from Vagrant: %s@%s:%s, key=%s", info['username'], info['hostname'], info['port'], info['key_filename']) if use_chroot: # Mount directories ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(IgnoreMissingKey()) ssh.connect(**info) chan = ssh.get_transport().open_session() chan.exec_command( '/usr/bin/sudo /bin/sh -c %s' % shell_escape( 'for i in dev proc; do ' 'if ! grep "^/experimentroot/$i$" /proc/mounts; then ' 'mount -o rbind /$i /experimentroot/$i; ' 'fi; ' 'done')) if chan.recv_exit_status() != 0: logging.critical("Couldn't mount directories in chroot") sys.exit(1) ssh.close() return info
OSError
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip-vagrant/reprounzip/unpackers/vagrant/__init__.py/machine_setup
def vagrant_setup_create(args): """Sets up the experiment to be run in a Vagrant-built virtual machine. This can either build a chroot or not. If building a chroot, we do just like without Vagrant: we copy all the files and only get what's missing from the host. But we do install automatically the packages whose files are required. If not building a chroot, we install all the packages, and only unpack files that don't come from packages. In short: files from packages with packfiles=True will only be used if building a chroot. """ if not args.pack: logging.critical("setup/create needs the pack filename") sys.exit(1) pack = Path(args.pack[0]) target = Path(args.target[0]) if target.exists(): logging.critical("Target directory exists") sys.exit(1) use_chroot = args.use_chroot mount_bind = args.bind_magic_dirs record_usage(use_chroot=use_chroot, mount_bind=mount_bind) signals.pre_setup(target=target, pack=pack) # Unpacks configuration file rpz_pack = RPZPack(pack) rpz_pack.extract_config(target / 'config.yml') # Loads config runs, packages, other_files = config = load_config(target / 'config.yml', True) if not args.memory: memory = None else: try: memory = int(args.memory[-1]) except ValueError: logging.critical("Invalid value for memory size: %r", args.memory) sys.exit(1) if args.base_image and args.base_image[0]: record_usage(vagrant_explicit_image=True) box = args.base_image[0] if args.distribution: target_distribution = args.distribution[0] else: target_distribution = None else: target_distribution, box = select_box(runs) logging.info("Using box %s", box) logging.debug("Distribution: %s", target_distribution or "unknown") # If using chroot, we might still need to install packages to get missing # (not packed) files if use_chroot: packages = [pkg for pkg in packages if not pkg.packfiles] if packages: record_usage(vagrant_install_pkgs=True) logging.info("Some packages were not packed, so we'll install and " "copy their files\n" "Packages that are missing:\n%s", ' '.join(pkg.name for pkg in packages)) if packages: try: installer = select_installer(pack, runs, target_distribution) except CantFindInstaller as e: logging.error("Need to install %d packages but couldn't select a " "package installer: %s", len(packages), e) target.mkdir(parents=True) try: # Writes setup script logging.info("Writing setup script %s...", target / 'setup.sh') with (target / 'setup.sh').open('w', encoding='utf-8', newline='\n') as fp: fp.write('#!/bin/sh\n\nset -e\n\n') if packages: # Updates package sources fp.write(installer.update_script()) fp.write('\n') # Installs necessary packages fp.write(installer.install_script(packages)) fp.write('\n') # TODO : Compare package versions (painful because of sh) # Untar if use_chroot: fp.write('\n' 'mkdir /experimentroot; cd /experimentroot\n') fp.write('tar zpxf /vagrant/data.tgz --numeric-owner ' '--strip=1 %s\n' % rpz_pack.data_prefix) if mount_bind: fp.write('\n' 'mkdir -p /experimentroot/dev\n' 'mkdir -p /experimentroot/proc\n') for pkg in packages: fp.write('\n# Copies files from package %s\n' % pkg.name) for f in pkg.files: f = f.path dest = join_root(PosixPath('/experimentroot'), f) fp.write('mkdir -p %s\n' % shell_escape(unicode_(f.parent))) fp.write('cp -L %s %s\n' % ( shell_escape(unicode_(f)), shell_escape(unicode_(dest)))) else: fp.write('\ncd /\n') paths = set() pathlist = [] # Adds intermediate directories, and checks for existence in # the tar for f in other_files: path = PosixPath('/') for c in rpz_pack.remove_data_prefix(f.path).components: path = path / c if path in paths: continue paths.add(path) try: rpz_pack.get_data(path) except __HOLE__: logging.info("Missing file %s", path) else: pathlist.append(path) # FIXME : for some reason we need reversed() here, I'm not sure # why. Need to read more of tar's docs. # TAR bug: --no-overwrite-dir removes --keep-old-files # TAR bug: there is no way to make --keep-old-files not report # an error if an existing file is encountered. --skip-old-files # was introduced too recently. Instead, we just ignore the exit # status with (target / 'rpz-files.list').open('wb') as lfp: for p in reversed(pathlist): lfp.write(join_root(rpz_pack.data_prefix, p).path) lfp.write(b'\0') fp.write('tar zpxf /vagrant/data.tgz --keep-old-files ' '--numeric-owner --strip=1 ' '--null -T /vagrant/rpz-files.list || /bin/true\n') # Copies busybox if use_chroot: arch = runs[0]['architecture'] download_file(busybox_url(arch), target / 'busybox', 'busybox-%s' % arch) fp.write(r''' cp /vagrant/busybox /experimentroot/busybox chmod +x /experimentroot/busybox mkdir -p /experimentroot/bin [ -e /experimentroot/bin/sh ] || \ ln -s /busybox /experimentroot/bin/sh ''') # Copies pack logging.info("Copying pack file...") rpz_pack.copy_data_tar(target / 'data.tgz') rpz_pack.close() # Writes Vagrant file logging.info("Writing %s...", target / 'Vagrantfile') with (target / 'Vagrantfile').open('w', encoding='utf-8', newline='\n') as fp: # Vagrant header and version fp.write( '# -*- mode: ruby -*-\n' '# vi: set ft=ruby\n\n' 'VAGRANTFILE_API_VERSION = "2"\n\n' 'Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|\n') # Selects which box to install fp.write(' config.vm.box = "%s"\n' % box) # Run the setup script on the virtual machine fp.write(' config.vm.provision "shell", path: "setup.sh"\n') # Memory size if memory is not None: fp.write(' config.vm.provider "virtualbox" do |v|\n' ' v.memory = %d\n' ' end\n' % memory) fp.write('end\n') # Meta-data for reprounzip write_dict(target, metadata_initial_iofiles(config, {'use_chroot': use_chroot})) signals.post_setup(target=target, pack=pack) except Exception: target.rmtree(ignore_errors=True) raise
KeyError
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip-vagrant/reprounzip/unpackers/vagrant/__init__.py/vagrant_setup_create
def upload_file(self, local_path, input_path): if self.use_chroot: remote_path = join_root(PosixPath('/experimentroot'), input_path) else: remote_path = input_path temp = make_unique_name(b'reprozip_input_') ltemp = self.target / temp rtemp = PosixPath('/vagrant') / temp # Copy file to shared folder logging.info("Copying file to shared folder...") local_path.copyfile(ltemp) # Move it logging.info("Moving file into place...") chan = self.ssh.get_transport().open_session() chown_cmd = '/bin/chown --reference=%s %s' % ( shell_escape(remote_path.path), shell_escape(rtemp.path)) chmod_cmd = '/bin/chmod --reference=%s %s' % ( shell_escape(remote_path.path), shell_escape(rtemp.path)) mv_cmd = '/bin/mv %s %s' % ( shell_escape(rtemp.path), shell_escape(remote_path.path)) chan.exec_command('/usr/bin/sudo /bin/sh -c %s' % shell_escape( ' && '.join((chown_cmd, chmod_cmd, mv_cmd)))) if chan.recv_exit_status() != 0: logging.critical("Couldn't move file in virtual machine") try: ltemp.remove() except __HOLE__: pass sys.exit(1) chan.close()
OSError
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip-vagrant/reprounzip/unpackers/vagrant/__init__.py/SSHUploader.upload_file
def download(self, remote_path, local_path): if self.use_chroot: remote_path = join_root(PosixPath('/experimentroot'), remote_path) temp = make_unique_name(b'reprozip_output_') rtemp = PosixPath('/vagrant') / temp ltemp = self.target / temp # Copy file to shared folder logging.info("Copying file to shared folder...") chan = self.ssh.get_transport().open_session() cp_cmd = '/bin/cp %s %s' % ( shell_escape(remote_path.path), shell_escape(rtemp.path)) chown_cmd = '/bin/chown vagrant %s' % shell_escape(rtemp.path) chmod_cmd = '/bin/chmod 644 %s' % shell_escape(rtemp.path) chan.exec_command('/usr/bin/sudo /bin/sh -c %s' % shell_escape( ' && '.join((cp_cmd, chown_cmd, chmod_cmd)))) if chan.recv_exit_status() != 0: logging.critical("Couldn't copy file in virtual machine") try: ltemp.remove() except OSError: pass return False # Move file to final destination try: ltemp.rename(local_path) except __HOLE__ as e: logging.critical("Couldn't download output file: %s\n%s", remote_path, str(e)) ltemp.remove() return False return True
OSError
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip-vagrant/reprounzip/unpackers/vagrant/__init__.py/SSHDownloader.download
def check_vagrant_version(): try: out = subprocess.check_output(['vagrant', '--version']) except (subprocess.CalledProcessError, __HOLE__): logging.error("Couldn't run vagrant") sys.exit(1) out = out.decode('ascii').strip().lower().split() if out[0] == 'vagrant': if LooseVersion(out[1]) < LooseVersion('1.1'): logging.error("Vagrant >=1.1 is required; detected version: %s", out[1]) sys.exit(1) else: logging.error("Vagrant >=1.1 is required") sys.exit(1)
OSError
dataset/ETHPy150Open ViDA-NYU/reprozip/reprounzip-vagrant/reprounzip/unpackers/vagrant/__init__.py/check_vagrant_version
@classmethod def _size_selector(cls, possible_sizes): """Select the maximum size, utility function for adding borders. The number two is used so that the edges of a column have spaces around them (instead of being right next to a column separator). :param possible_sizes: possible sizes available :returns: maximum size :rtype: number """ try: return max(x + 2 for x in possible_sizes) except __HOLE__: return 0
ValueError
dataset/ETHPy150Open openstack/taskflow/taskflow/types/table.py/PleasantTable._size_selector
def __exit__(self, exc_type, exc_val, exc_tb): """ If an exception occurred, we leave the file for inspection. """ sys.path.remove(self.tempdir) if exc_type is None: # No exception occurred os.remove(self.tempdir + 'test_imports_future_stdlib.py') try: os.remove(self.tempdir + 'test_imports_future_stdlib.pyc') except __HOLE__: pass
OSError
dataset/ETHPy150Open PythonCharmers/python-future/tests/test_future/test_requests.py/write_module.__exit__
def test_remove_hooks_then_requests(self): code = """ from future import standard_library standard_library.install_hooks() import builtins import http.client import html.parser """ with write_module(code, self.tempdir): import test_imports_future_stdlib standard_library.remove_hooks() try: import requests except __HOLE__: print("Requests doesn't seem to be available. Skipping requests test ...") else: r = requests.get('http://google.com') self.assertTrue(r) self.assertTrue(True)
ImportError
dataset/ETHPy150Open PythonCharmers/python-future/tests/test_future/test_requests.py/TestRequests.test_remove_hooks_then_requests
def test_requests_cm(self): """ Tests whether requests can be used importing standard_library modules previously with the hooks context manager """ code = """ from future import standard_library with standard_library.hooks(): import builtins import html.parser import http.client """ with write_module(code, self.tempdir): import test_imports_future_stdlib try: import requests except __HOLE__: print("Requests doesn't seem to be available. Skipping requests test ...") else: r = requests.get('http://google.com') self.assertTrue(r) self.assertTrue(True)
ImportError
dataset/ETHPy150Open PythonCharmers/python-future/tests/test_future/test_requests.py/TestRequests.test_requests_cm
def get_os_by_image_meta(self, os_release): LOG.debug('--- Getting operating system data by image metadata ---') if os_release: LOG.debug('Looks like {0} is going to be provisioned'. format(os_release)) try: OS = getattr(objects, os_release['name']) os = OS(repos=None, packages=None, major=os_release['major'], minor=os_release['minor']) return os except (__HOLE__, KeyError): LOG.warning('Cannot guess operating system release ' 'from image metadata')
AttributeError
dataset/ETHPy150Open openstack/bareon/bareon/drivers/data/nailgun.py/Nailgun.get_os_by_image_meta
def bind_links(self, links, sources=None, position=None): """ Associate a link to a model, a view inside this menu """ try: for source in sources: self._map_links_to_source( links=links, position=position, source=source ) except __HOLE__: # Unsourced links display always self._map_links_to_source( links=links, position=position, source=sources )
TypeError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/navigation/classes.py/Menu.bind_links
def resolve(self, context, source=None): try: request = Variable('request').resolve(context) except VariableDoesNotExist: # There is no request variable, most probable a 500 in a test view # Don't return any resolved links then. logger.warning('No request variable, aborting menu resolution') return () current_path = request.META['PATH_INFO'] # Get sources: view name, view objects current_view = resolve(current_path).view_name resolved_navigation_object_list = [] result = [] if source: resolved_navigation_object_list = [source] else: navigation_object_list = context.get( 'navigation_object_list', ('object',) ) # Multiple objects for navigation_object in navigation_object_list: try: resolved_navigation_object_list.append( Variable(navigation_object).resolve(context) ) except VariableDoesNotExist: pass for resolved_navigation_object in resolved_navigation_object_list: resolved_links = [] for bound_source, links in self.bound_links.iteritems(): try: if inspect.isclass(bound_source) and type(resolved_navigation_object) == bound_source: for link in links: resolved_link = link.resolve( context=context, resolved_object=resolved_navigation_object ) if resolved_link: resolved_links.append(resolved_link) # No need for further content object match testing break except __HOLE__: # When source is a dictionary pass if resolved_links: result.append(resolved_links) resolved_links = [] # View links for link in self.bound_links.get(current_view, []): resolved_link = link.resolve(context) if resolved_link: resolved_links.append(resolved_link) if resolved_links: result.append(resolved_links) resolved_links = [] # Main menu links for link in self.bound_links.get(None, []): resolved_link = link.resolve(context) if resolved_link: resolved_links.append(resolved_link) if resolved_links: result.append(resolved_links) if result: unbound_links = [] unbound_links.extend(self.unbound_links.get(source, ())) unbound_links.extend(self.unbound_links.get(current_view, ())) for resolved_link in result[0]: if resolved_link.link in unbound_links: result[0].remove(resolved_link) # Sort links by position value passed during bind result[0] = sorted( result[0], key=lambda item: self.link_positions.get(item.link) ) return result
TypeError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/navigation/classes.py/Menu.resolve
def unbind_links(self, links, sources=None): """ Allow unbinding links from sources, used to allow 3rd party apps to change the link binding of core apps """ try: for source in sources: self._map_links_to_source( links=links, source=source, map_variable='unbound_links' ) except __HOLE__: # Unsourced links display always self._map_links_to_source( links=links, source=sources, map_variable='unbound_links' )
TypeError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/navigation/classes.py/Menu.unbind_links
def resolve(self, context, resolved_object=None): request = Variable('request').resolve(context) current_path = request.META['PATH_INFO'] current_view = resolve(current_path).view_name # ACL is tested agains the resolved_object or just {{ object }} if not if not resolved_object: try: resolved_object = Variable('object').resolve(context=context) except VariableDoesNotExist: pass # If this link has a required permission check that the user have it # too if self.permissions: try: Permission.check_permissions(request.user, self.permissions) except PermissionDenied: # If the user doesn't have the permission, and we are passed # an instance, check to see if the user has at least ACL # access to the instance. if resolved_object: try: AccessControlList.objects.check_access( self.permissions, request.user, resolved_object ) except PermissionDenied: return None else: return None # Check to see if link has conditional display function and only # display it if the result of the conditional display function is # True if self.condition: if not self.condition(context): return None resolved_link = ResolvedLink(current_view=current_view, link=self) view_name = Variable('"{}"'.format(self.view)) if isinstance(self.args, list) or isinstance(self.args, tuple): # TODO: Don't check for instance check for iterable in try/except # block. This update required changing all 'args' argument in # links.py files to be iterables and not just strings. args = [Variable(arg) for arg in self.args] else: args = [Variable(self.args)] # If we were passed an instance of the view context object we are # resolving, inject it into the context. This help resolve links for # object lists. if resolved_object: context['resolved_object'] = resolved_object try: kwargs = self.kwargs(context) except TypeError: # Is not a callable kwargs = self.kwargs kwargs = {key: Variable(value) for key, value in kwargs.iteritems()} # Use Django's exact {% url %} code to resolve the link node = URLNode( view_name=view_name, args=args, kwargs=kwargs, asvar=None ) try: resolved_link.url = node.render(context) except Exception as exception: logger.error( 'Error resolving link "%s" URL; %s', self.text, exception ) # This is for links that should be displayed but that are not clickable if self.conditional_disable: resolved_link.disabled = self.conditional_disable(context) else: resolved_link.disabled = False # Lets a new link keep the same URL query string of the current URL if self.keep_query: # Sometimes we are required to remove a key from the URL QS previous_path = smart_unicode( urllib.unquote_plus( smart_str( request.get_full_path() ) or smart_str( request.META.get( 'HTTP_REFERER', reverse(settings.LOGIN_REDIRECT_URL) ) ) ) ) query_string = urlparse.urlparse(previous_path).query parsed_query_string = urlparse.parse_qs(query_string) for key in self.remove_from_query: try: del parsed_query_string[key] except __HOLE__: pass resolved_link.url = '%s?%s' % ( urlquote(resolved_link.url), urlencode(parsed_query_string, doseq=True) ) return resolved_link
KeyError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/navigation/classes.py/Link.resolve
@classmethod def get_for_source(cls, source): try: return cls._registry[source] except KeyError: try: return cls._registry[source.model] except AttributeError: try: return cls._registry[source.__class__] except __HOLE__: return () except TypeError: # unhashable type: list return ()
KeyError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/navigation/classes.py/SourceColumn.get_for_source
def check_is_int(s): try: int(s) except __HOLE__: return False return True
ValueError
dataset/ETHPy150Open cloudify-cosmo/cloudify-manager/rest-service/manager_rest/deployment_update/utils.py/check_is_int
def _ifconfig_getnode(): """Get the hardware address on Unix by running ifconfig.""" import os for dir in ['', '/sbin/', '/usr/sbin']: try: pipe = os.popen(os.path.join(dir, 'ifconfig')) except __HOLE__: continue for line in pipe: words = line.lower().split() for i in range(len(words)): if words[i] in ['hwaddr', 'ether']: return int(words[i + 1].replace(':', ''), 16)
IOError
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/third_party/uuid/uuid.py/_ifconfig_getnode
def _ipconfig_getnode(): """Get the hardware address on Windows by running ipconfig.exe.""" import os, re dirs = ['', r'c:\windows\system32', r'c:\winnt\system32'] try: import ctypes buffer = ctypes.create_string_buffer(300) ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300) dirs.insert(0, buffer.value.decode('mbcs')) except: pass for dir in dirs: try: pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all') except __HOLE__: continue for line in pipe: value = line.split(':')[-1].strip().lower() if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value): return int(value.replace('-', ''), 16)
IOError
dataset/ETHPy150Open scalyr/scalyr-agent-2/scalyr_agent/third_party/uuid/uuid.py/_ipconfig_getnode
def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All'): """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame Parameters ---------- data : DataFrame values : column to aggregate, optional index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : function or list of functions, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) fill_value : scalar, default None Value to replace missing values with margins : boolean, default False Add all row / columns (e.g. for subtotal / grand totals) dropna : boolean, default True Do not include columns whose entries are all NaN margins_name : string, default 'All' Name of the row / column that will contain the totals when margins is True. Examples -------- >>> df A B C D 0 foo one small 1 1 foo one large 2 2 foo one large 2 3 foo two small 3 4 foo two small 3 5 bar one large 4 6 bar one small 5 7 bar two small 6 8 bar two large 7 >>> table = pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table small large foo one 1 4 two 6 NaN bar one 5 4 two 6 7 Returns ------- table : DataFrame """ index = _convert_by(index) columns = _convert_by(columns) if isinstance(aggfunc, list): pieces = [] keys = [] for func in aggfunc: table = pivot_table(data, values=values, index=index, columns=columns, fill_value=fill_value, aggfunc=func, margins=margins) pieces.append(table) keys.append(func.__name__) return concat(pieces, keys=keys, axis=1) keys = index + columns values_passed = values is not None if values_passed: if com.is_list_like(values): values_multi = True values = list(values) else: values_multi = False values = [values] else: values = list(data.columns.drop(keys)) if values_passed: to_filter = [] for x in keys + values: if isinstance(x, Grouper): x = x.key try: if x in data: to_filter.append(x) except TypeError: pass if len(to_filter) < len(data.columns): data = data[to_filter] grouped = data.groupby(keys) agged = grouped.agg(aggfunc) table = agged if table.index.nlevels > 1: to_unstack = [agged.index.names[i] or i for i in range(len(index), len(keys))] table = agged.unstack(to_unstack) if not dropna: try: m = MultiIndex.from_arrays(cartesian_product(table.index.levels), names=table.index.names) table = table.reindex_axis(m, axis=0) except __HOLE__: pass # it's a single level try: m = MultiIndex.from_arrays(cartesian_product(table.columns.levels), names=table.columns.names) table = table.reindex_axis(m, axis=1) except AttributeError: pass # it's a single level or a series if isinstance(table, DataFrame): if isinstance(table.columns, MultiIndex): table = table.sortlevel(axis=1) else: table = table.sort_index(axis=1) if fill_value is not None: table = table.fillna(value=fill_value, downcast='infer') if margins: if dropna: data = data[data.notnull().all(axis=1)] table = _add_margins(table, data, values, rows=index, cols=columns, aggfunc=aggfunc, margins_name=margins_name) # discard the top level if values_passed and not values_multi and not table.empty: table = table[values[0]] if len(index) == 0 and len(columns) > 0: table = table.T return table
AttributeError
dataset/ETHPy150Open pydata/pandas/pandas/tools/pivot.py/pivot_table
def _add_margins(table, data, values, rows, cols, aggfunc, margins_name='All'): if not isinstance(margins_name, compat.string_types): raise ValueError('margins_name argument must be a string') exception_msg = 'Conflicting name "{0}" in margins'.format(margins_name) for level in table.index.names: if margins_name in table.index.get_level_values(level): raise ValueError(exception_msg) grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name) # could be passed a Series object with no 'columns' if hasattr(table, 'columns'): for level in table.columns.names[1:]: if margins_name in table.columns.get_level_values(level): raise ValueError(exception_msg) if len(rows) > 1: key = (margins_name,) + ('',) * (len(rows) - 1) else: key = margins_name if not values and isinstance(table, Series): # If there are no values and the table is a series, then there is only # one column in the data. Compute grand margin and return it. return table.append(Series({key: grand_margin[margins_name]})) if values: marginal_result_set = _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin, margins_name) if not isinstance(marginal_result_set, tuple): return marginal_result_set result, margin_keys, row_margin = marginal_result_set else: marginal_result_set = _generate_marginal_results_without_values( table, data, rows, cols, aggfunc, margins_name) if not isinstance(marginal_result_set, tuple): return marginal_result_set result, margin_keys, row_margin = marginal_result_set row_margin = row_margin.reindex(result.columns) # populate grand margin for k in margin_keys: if isinstance(k, compat.string_types): row_margin[k] = grand_margin[k] else: row_margin[k] = grand_margin[k[0]] margin_dummy = DataFrame(row_margin, columns=[key]).T row_names = result.index.names try: result = result.append(margin_dummy) except __HOLE__: # we cannot reshape, so coerce the axis result.index = result.index._to_safe_for_reshape() result = result.append(margin_dummy) result.index.names = row_names return result
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/tools/pivot.py/_add_margins
def _compute_grand_margin(data, values, aggfunc, margins_name='All'): if values: grand_margin = {} for k, v in data[values].iteritems(): try: if isinstance(aggfunc, compat.string_types): grand_margin[k] = getattr(v, aggfunc)() elif isinstance(aggfunc, dict): if isinstance(aggfunc[k], compat.string_types): grand_margin[k] = getattr(v, aggfunc[k])() else: grand_margin[k] = aggfunc[k](v) else: grand_margin[k] = aggfunc(v) except __HOLE__: pass return grand_margin else: return {margins_name: aggfunc(data.index)}
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/tools/pivot.py/_compute_grand_margin
def _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin, margins_name='All'): if len(cols) > 0: # need to "interleave" the margins table_pieces = [] margin_keys = [] def _all_key(key): return (key, margins_name) + ('',) * (len(cols) - 1) if len(rows) > 0: margin = data[rows + values].groupby(rows).agg(aggfunc) cat_axis = 1 for key, piece in table.groupby(level=0, axis=cat_axis): all_key = _all_key(key) # we are going to mutate this, so need to copy! piece = piece.copy() try: piece[all_key] = margin[key] except __HOLE__: # we cannot reshape, so coerce the axis piece.set_axis(cat_axis, piece._get_axis( cat_axis)._to_safe_for_reshape()) piece[all_key] = margin[key] table_pieces.append(piece) margin_keys.append(all_key) else: margin = grand_margin cat_axis = 0 for key, piece in table.groupby(level=0, axis=cat_axis): all_key = _all_key(key) table_pieces.append(piece) table_pieces.append(Series(margin[key], index=[all_key])) margin_keys.append(all_key) result = concat(table_pieces, axis=cat_axis) if len(rows) == 0: return result else: result = table margin_keys = table.columns if len(cols) > 0: row_margin = data[cols + values].groupby(cols).agg(aggfunc) row_margin = row_margin.stack() # slight hack new_order = [len(cols)] + lrange(len(cols)) row_margin.index = row_margin.index.reorder_levels(new_order) else: row_margin = Series(np.nan, index=result.columns) return result, margin_keys, row_margin
TypeError
dataset/ETHPy150Open pydata/pandas/pandas/tools/pivot.py/_generate_marginal_results
def _normalize(table, normalize, margins): if not isinstance(normalize, bool) and not isinstance(normalize, compat.string_types): axis_subs = {0: 'index', 1: 'columns'} try: normalize = axis_subs[normalize] except __HOLE__: raise ValueError("Not a valid normalize argument") if margins is False: # Actual Normalizations normalizers = { 'all': lambda x: x / x.sum(axis=1).sum(axis=0), 'columns': lambda x: x / x.sum(), 'index': lambda x: x.div(x.sum(axis=1), axis=0) } normalizers[True] = normalizers['all'] try: f = normalizers[normalize] except KeyError: raise ValueError("Not a valid normalize argument") table = f(table) table = table.fillna(0) elif margins is True: column_margin = table.loc[:, 'All'].drop('All') index_margin = table.loc['All', :].drop('All') table = table.drop('All', axis=1).drop('All') # Normalize core table = _normalize(table, normalize=normalize, margins=False) # Fix Margins if normalize == 'columns': column_margin = column_margin / column_margin.sum() table = concat([table, column_margin], axis=1) table = table.fillna(0) elif normalize == 'index': index_margin = index_margin / index_margin.sum() table = table.append(index_margin) table = table.fillna(0) elif normalize == "all" or normalize is True: column_margin = column_margin / column_margin.sum() index_margin = index_margin / index_margin.sum() index_margin.loc['All'] = 1 table = concat([table, column_margin], axis=1) table = table.append(index_margin) table = table.fillna(0) else: raise ValueError("Not a valid normalize argument") else: raise ValueError("Not a valid margins argument") return table
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/tools/pivot.py/_normalize
@classmethod def _validate_timeout(cls, value, name): """ Check that a timeout attribute is valid. :param value: The timeout value to validate :param name: The name of the timeout attribute to validate. This is used to specify in error messages. :return: The validated and casted version of the given value. :raises ValueError: If the type is not an integer or a float, or if it is a numeric value less than zero. """ if value is _Default: return cls.DEFAULT_TIMEOUT if value is None or value is cls.DEFAULT_TIMEOUT: return value try: float(value) except (TypeError, __HOLE__): raise ValueError("Timeout value %s was %s, but it must be an " "int or float." % (name, value)) try: if value < 0: raise ValueError("Attempted to set %s timeout to %s, but the " "timeout cannot be set to a value less " "than 0." % (name, value)) except TypeError: # Python 3 raise ValueError("Timeout value %s was %s, but it must be an " "int or float." % (name, value)) return value
ValueError
dataset/ETHPy150Open BergWerkGIS/QGIS-CKAN-Browser/CKAN-Browser/request/packages/urllib3/util/timeout.py/Timeout._validate_timeout
def get_trigger_db_given_type_and_params(type=None, parameters=None): try: parameters = parameters or {} trigger_dbs = Trigger.query(type=type, parameters=parameters) trigger_db = trigger_dbs[0] if len(trigger_dbs) > 0 else None if not parameters and not trigger_db: # We need to do double query because some TriggeDB objects without # parameters have "parameters" attribute stored in the db and others # don't trigger_db = Trigger.query(type=type, parameters=None).first() return trigger_db except __HOLE__ as e: LOG.debug('Database lookup for type="%s" parameters="%s" resulted ' + 'in exception : %s.', type, parameters, e, exc_info=True) return None
ValueError
dataset/ETHPy150Open StackStorm/st2/st2common/st2common/services/triggers.py/get_trigger_db_given_type_and_params
def get_trigger_type_db(ref): """ Returns the trigger type object from db given a string ref. :param ref: Reference to the trigger type db object. :type ref: ``str`` :rtype trigger_type: ``object`` """ try: return TriggerType.get_by_ref(ref) except __HOLE__ as e: LOG.debug('Database lookup for ref="%s" resulted ' + 'in exception : %s.', ref, e, exc_info=True) return None
ValueError
dataset/ETHPy150Open StackStorm/st2/st2common/st2common/services/triggers.py/get_trigger_type_db
def dispatch_events(self, dt=None): if not self._open: return event = IOHIDEventStruct() r = self._queue.contents.contents.getNextEvent(self._queue, ctypes.byref(event), 0, 0) if r != 0: # Undocumented behaviour? returns 3758097127L when no events are # in queue (is documented to block) return try: element = self._element_cookies[event.elementCookie] element.value = event.value except __HOLE__: pass
KeyError
dataset/ETHPy150Open ardekantur/pyglet/experimental/input/osx.py/Device.dispatch_events
def _pop_new_chunk(self): if self.chunks_exhausted: return b"" try: chunk = self.content[self.index] except __HOLE__: chunk = b'' self.chunks_exhausted = True else: self.index += 1 chunk = self._encode_chunk(chunk) if not isinstance(chunk, bytes): chunk = chunk.encode() return chunk
IndexError
dataset/ETHPy150Open shazow/urllib3/test/test_response.py/MockChunkedEncodingResponse._pop_new_chunk
def pop_current_chunk(self, amt=-1, till_crlf=False): if amt > 0 and till_crlf: raise ValueError("Can't specify amt and till_crlf.") if len(self.cur_chunk) <= 0: self.cur_chunk = self._pop_new_chunk() if till_crlf: try: i = self.cur_chunk.index(b"\r\n") except ValueError: # No CRLF in current chunk -- probably caused by encoder. self.cur_chunk = b"" return b"" else: chunk_part = self.cur_chunk[:i+2] self.cur_chunk = self.cur_chunk[i+2:] return chunk_part elif amt <= -1: chunk_part = self.cur_chunk self.cur_chunk = b'' return chunk_part else: try: chunk_part = self.cur_chunk[:amt] except __HOLE__: chunk_part = self.cur_chunk self.cur_chunk = b'' else: self.cur_chunk = self.cur_chunk[amt:] return chunk_part
IndexError
dataset/ETHPy150Open shazow/urllib3/test/test_response.py/MockChunkedEncodingResponse.pop_current_chunk
def __getattr__(self, name): if hasattr(self.underlying, name): return getattr(self.underlying, name) try: return self.defaults[name] except __HOLE__, error: raise AttributeError("'%s' object has no attribute '%s'" % (self.underlying.__class__.__name__, name)) # WARNING: This is a near copy from django.template.loader.find_template_loader. Maybe I'm blind, but despite django's # heavy use of string imports I couldn't find an exposed utility function like this in django's source.
KeyError
dataset/ETHPy150Open yaniv-aknin/django-ajaxerrors/ajaxerrors/utils.py/DefaultedAttributes.__getattr__
def get_callable(callable): if isinstance(callable, basestring): module, attr = callable.rsplit('.', 1) try: mod = import_module(module) except __HOLE__, e: raise ImproperlyConfigured('Error importing ajaxerrors callable %s: "%s"' % (callable, e)) try: callable = getattr(mod, attr) except AttributeError, e: raise ImproperlyConfigured('Error importing ajaxerrors callable %s: "%s"' % (callable, e)) return callable
ImportError
dataset/ETHPy150Open yaniv-aknin/django-ajaxerrors/ajaxerrors/utils.py/get_callable
def _isvalidnode(pynode): #(7) try: bool(pynode) return True except __HOLE__: return False
KeyError
dataset/ETHPy150Open rgalanakis/practicalmayapython/src/chapter7/callbacks.py/_isvalidnode
def make_url(report_class, domain, string_params, args): try: return html.escape( report_class.get_url( domain=domain ) + string_params % args ) except __HOLE__: return None
KeyError
dataset/ETHPy150Open dimagi/commcare-hq/custom/ilsgateway/tanzania/reports/utils.py/make_url
def check_xform_es_index(interval=10): try: doc_id, doc_rev = get_last_change_for_doc_class(XFormInstance) except __HOLE__: return None time.sleep(interval) return _check_es_rev(XFORM_INDEX, doc_id, [doc_rev])
StopIteration
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/hqadmin/escheck.py/check_xform_es_index
def check_case_es_index(interval=10): try: doc_id, doc_rev = get_last_change_for_doc_class(CommCareCase) except __HOLE__: return None time.sleep(interval) return _check_es_rev(CASE_INDEX, doc_id, [doc_rev])
StopIteration
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/hqadmin/escheck.py/check_case_es_index
def func(self, lib, opts, args): """Command handler for the metasync function. """ pretend = opts.pretend query = ui.decargs(args) sources = [] for source in opts.sources: sources.extend(source.split(',')) sources = sources or self.config['source'].as_str_seq() meta_source_instances = {} items = lib.items(query) # Avoid needlessly instantiating meta sources (can be expensive) if not items: self._log.info(u'No items found matching query') return # Instantiate the meta sources for player in sources: try: cls = META_SOURCES[player] except __HOLE__: self._log.error(u'Unknown metadata source \'{0}\''.format( player)) try: meta_source_instances[player] = cls(self.config, self._log) except (ImportError, ConfigValueError) as e: self._log.error(u'Failed to instantiate metadata source ' u'\'{0}\': {1}'.format(player, e)) # Avoid needlessly iterating over items if not meta_source_instances: self._log.error(u'No valid metadata sources found') return # Sync the items with all of the meta sources for item in items: for meta_source in meta_source_instances.values(): meta_source.sync_from_source(item) changed = ui.show_model_changes(item) if changed and not pretend: item.store()
KeyError
dataset/ETHPy150Open beetbox/beets/beetsplug/metasync/__init__.py/MetaSyncPlugin.func
def __delitem__(self, key): try: del self.d[key] except (__HOLE__, TypeError): pass
KeyError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/formdict.py/FormDict.__delitem__
def __getitem__(self, key): try: v = self.d[key] # v is always a sequence if v and len(v) == 1: return v[0] return v except (__HOLE__, TypeError): # @@@ for this behavior, we should use d.get() instead. return None
KeyError
dataset/ETHPy150Open petrvanblokland/Xierpa3/xierpa3/toolbox/formdict.py/FormDict.__getitem__
def LogError(self, message, trace=None): try: self.db_error.Add(message, trace) # Log error in the DB. except __HOLE__: cprint("ERROR: DB is not setup yet: cannot log errors to file!")
AttributeError
dataset/ETHPy150Open owtf/owtf/framework/error_handler.py/ErrorHandler.LogError
def _python_eggs(self, priors): egg_infos = [] groups = [self.instances] if priors: for _group, prior_instances in self.prior_groups: groups.append(list(prior_instances.values())) for instances in groups: for i in instances: try: egg_infos.append(dict(i.egg_info)) except __HOLE__: pass return egg_infos
AttributeError
dataset/ETHPy150Open openstack/anvil/anvil/packaging/base.py/DependencyHandler._python_eggs
def _scan_pip_requires(self, requires_files): own_eggs = self._python_eggs(False) def replace_forced_requirements(fn, forced_by_key): old_lines = sh.load_file(fn).splitlines() new_lines = [] alterations = [] for line in old_lines: try: source_req = pip_helper.extract_requirement(line) except (ValueError, TypeError): pass else: if source_req: validate_requirement(fn, source_req) try: replace_req = forced_by_key[source_req.key] except __HOLE__: pass else: replace_req = str(replace_req) source_req = str(source_req) if replace_req != source_req: line = replace_req alterations.append("%s => %s" % (colorizer.quote(source_req), colorizer.quote(replace_req))) new_lines.append(line) if alterations: contents = "# Cleaned on %s\n\n%s\n" % (utils.iso8601(), "\n".join(new_lines)) sh.write_file_and_backup(fn, contents) utils.log_iterable(alterations, logger=LOG, header="Replaced %s requirements in %s" % (len(alterations), fn), color=None) return len(alterations) def on_replace_done(fn, time_taken): LOG.debug("Replacing potential forced requirements in %s" " took %s seconds", colorizer.quote(fn), time_taken) def validate_requirement(filename, source_req): install_egg = None for egg_info in own_eggs: if egg_info['name'] == source_req.key: install_egg = egg_info break if not install_egg: return # Ensure what we are about to install/create will actually work # with the desired version. If it is not compatible then we should # abort and someone should update the tag/branch in the origin # file (or fix it via some other mechanism). if install_egg['version'] not in source_req: msg = ("Can not satisfy '%s' with '%s', version" " conflict found in %s") raise exc.DependencyException(msg % (source_req, install_egg['req'], filename)) if not requires_files: return requires_files = sorted(requires_files) utils.log_iterable(requires_files, logger=LOG, header="Scanning %s pip 'requires' files" % (len(requires_files))) forced_by_key = {} for pkg in self.forced_pips: forced_by_key[pkg.key] = pkg mutations = 0 for fn in requires_files: LOG.debug("Replacing any potential forced requirements in %s", colorizer.quote(fn)) mutations += utils.time_it(functools.partial(on_replace_done, fn), replace_forced_requirements, fn, forced_by_key) # NOTE(imelnikov): after updating requirement lists we should re-fetch # data from them again, so we drop pip helper caches here. if mutations > 0: pip_helper.drop_caches()
KeyError
dataset/ETHPy150Open openstack/anvil/anvil/packaging/base.py/DependencyHandler._scan_pip_requires
@staticmethod def _requirements_satisfied(pips_list, download_dir): downloaded_req = [pip_helper.get_archive_details(filename)["req"] for filename in sh.listdir(download_dir, files_only=True)] downloaded_req = dict((req.key, req.specs[0][1]) for req in downloaded_req) for req_str in pips_list: req = pip_helper.extract_requirement(req_str) try: downloaded_version = downloaded_req[req.key] except __HOLE__: return False else: if downloaded_version not in req: return False return True
KeyError
dataset/ETHPy150Open openstack/anvil/anvil/packaging/base.py/DependencyHandler._requirements_satisfied
def _colorize(self, pyval, state): pyval_type = type(pyval) state.score += 1 if pyval is None or pyval is True or pyval is False: self._output(unicode(pyval), self.CONST_TAG, state) elif pyval_type in (int, float, long, types.ComplexType): self._output(unicode(pyval), self.NUMBER_TAG, state) elif pyval_type is str: self._colorize_str(pyval, state, '', 'string-escape') elif pyval_type is unicode: if self.ESCAPE_UNICODE: self._colorize_str(pyval, state, 'u', 'unicode-escape') else: self._colorize_str(pyval, state, 'u', None) elif pyval_type is list: self._multiline(self._colorize_iter, pyval, state, '[', ']') elif pyval_type is tuple: self._multiline(self._colorize_iter, pyval, state, '(', ')') elif pyval_type is set: self._multiline(self._colorize_iter, self._sort(pyval), state, 'set([', '])') elif pyval_type is frozenset: self._multiline(self._colorize_iter, self._sort(pyval), state, 'frozenset([', '])') elif pyval_type is dict: self._multiline(self._colorize_dict, self._sort(pyval.items()), state, '{', '}') elif is_re_pattern(pyval): self._colorize_re(pyval, state) else: try: pyval_repr = repr(pyval) if not isinstance(pyval_repr, (str, unicode)): pyval_repr = unicode(pyval_repr) pyval_repr_ok = True except __HOLE__: raise except: pyval_repr_ok = False state.score -= 100 if pyval_repr_ok: if self.GENERIC_OBJECT_RE.match(pyval_repr): state.score -= 5 self._output(pyval_repr, None, state) else: state.result.append(self.UNKNOWN_REPR)
KeyboardInterrupt
dataset/ETHPy150Open CollabQ/CollabQ/vendor/epydoc/markup/pyval_repr.py/PyvalColorizer._colorize
def _sort(self, items): if not self.sort: return items try: return sorted(items) except __HOLE__: raise except: return items
KeyboardInterrupt
dataset/ETHPy150Open CollabQ/CollabQ/vendor/epydoc/markup/pyval_repr.py/PyvalColorizer._sort
def items_for_result(cl, result, form): """ Generates the actual list of data. """ def link_in_col(is_first, field_name, cl): if cl.list_display_links is None: return False if is_first and not cl.list_display_links: return True return field_name in cl.list_display_links first = True pk = cl.lookup_opts.pk.attname for field_index, field_name in enumerate(cl.list_display): empty_value_display = cl.model_admin.get_empty_value_display() row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)] try: f, attr, value = lookup_field(field_name, result, cl.model_admin) except __HOLE__: result_repr = empty_value_display else: empty_value_display = getattr(attr, 'empty_value_display', empty_value_display) if f is None or f.auto_created: if field_name == 'action_checkbox': row_classes = ['action-checkbox'] allow_tags = getattr(attr, 'allow_tags', False) boolean = getattr(attr, 'boolean', False) result_repr = display_for_value(value, empty_value_display, boolean) if allow_tags: warnings.warn( "Deprecated allow_tags attribute used on field {}. " "Use django.utils.safestring.format_html(), " "format_html_join(), or mark_safe() instead.".format(field_name), RemovedInDjango20Warning ) result_repr = mark_safe(result_repr) if isinstance(value, (datetime.date, datetime.time)): row_classes.append('nowrap') else: if isinstance(f.remote_field, models.ManyToOneRel): field_val = getattr(result, f.name) if field_val is None: result_repr = empty_value_display else: result_repr = field_val else: result_repr = display_for_field(value, f, empty_value_display) if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)): row_classes.append('nowrap') if force_text(result_repr) == '': result_repr = mark_safe('&nbsp;') row_class = mark_safe(' class="%s"' % ' '.join(row_classes)) # If list_display_links not defined, add the link tag to the first field if link_in_col(first, field_name, cl): table_tag = 'th' if first else 'td' first = False # Display link to the result's change_view if the url exists, else # display just the result's representation. try: url = cl.url_for_result(result) except NoReverseMatch: link_or_text = result_repr else: url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url) # Convert the pk to something that can be used in Javascript. # Problem cases are long ints (23L) and non-ASCII strings. if cl.to_field: attr = str(cl.to_field) else: attr = pk value = result.serializable_value(attr) link_or_text = format_html( '<a href="{}"{}>{}</a>', url, format_html( ' data-popup-opener="{}"', value ) if cl.is_popup else '', result_repr) yield format_html('<{}{}>{}</{}>', table_tag, row_class, link_or_text, table_tag) else: # By default the fields come from ModelAdmin.list_editable, but if we pull # the fields out of the form instead of list_editable custom admins # can provide fields on a per request basis if (form and field_name in form.fields and not ( field_name == cl.model._meta.pk.name and form[cl.model._meta.pk.name].is_hidden)): bf = form[field_name] result_repr = mark_safe(force_text(bf.errors) + force_text(bf)) yield format_html('<td{}>{}</td>', row_class, result_repr) if form and not form[cl.model._meta.pk.name].is_hidden: yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
ObjectDoesNotExist
dataset/ETHPy150Open django/django/django/contrib/admin/templatetags/admin_list.py/items_for_result
@register.tag def gizmo(parser, token): """ Similar to the include tag, gizmo loads special templates called gizmos that come with the django-tethys_gizmo app. Gizmos provide tools for developing user interface elements with minimal code. Examples include date pickers, maps, and interactive plots. To insert a gizmo, use the "gizmo" tag and give it the name of a gizmo and a dictionary of configuration parameters. Example:: {% load tethys_gizmos %} {% gizmo example_gizmo options %} {% gizmo "example_gizmo" options %} NOTE: the "options" dictionary must be a template context variable. ALSO NOTE: All supporting css and javascript libraries are loaded using the gizmo_dependency tag (see below). """ try: tag_name, gizmo_name, options_literal = token.split_contents() except __HOLE__: raise template.TemplateSyntaxError('"%s" tag requires exactly two arguments' % token.contents.split()[0]) bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError('"{0}" tag takes at least one argument: the name of the ' 'template to be included.'.format(bits[0])) return TethysGizmoIncludeNode(gizmo_name, options_literal)
ValueError
dataset/ETHPy150Open tethysplatform/tethys/tethys_gizmos/templatetags/tethys_gizmos.py/gizmo
def render(self, context): # Get the gizmos rendered from the context gizmos_rendered = context['gizmos_rendered'] # Compile list of unique gizmo dependencies dependencies = [] # Add gizmo dependencies for rendered_gizmo in gizmos_rendered: try: # Retrieve the "gizmo_dependencies" module and find the appropriate function dependencies_module = __import__('tethys_gizmos.gizmo_dependencies', fromlist=[rendered_gizmo]) dependencies_function = getattr(dependencies_module, rendered_gizmo) # Retrieve a list of dependencies for the gizmo gizmo_deps = dependencies_function(context) # Only append dependencies if they do not already exist for dependency in gizmo_deps: if EXTERNAL_INDICATOR in dependency: static_url = dependency else: static_url = static(dependency) if static_url not in dependencies: # Lookup the static url given the path dependencies.append(static_url) except __HOLE__: # Skip those that do not have dependencies pass # Add the global dependencies last for dependency in global_dependencies(context): if EXTERNAL_INDICATOR in dependency: static_url = dependency else: static_url = static(dependency) if static_url not in dependencies: # Lookup the static url given the path dependencies.append(static_url) # Create markup tags script_tags = [] style_tags = [] for dependency in dependencies: # Only process Script tags if the dependency has a ".js" extension and the output type is JS or not specified if JS_EXTENSION in dependency and (self.output_type == JS_OUTPUT_TYPE or self.output_type is None): script_tags.append('<script src="{0}" type="text/javascript"></script>'.format(dependency)) # Only process Style tags if the dependency has a ".css" extension and the output type is CSS or not specified elif CSS_EXTENSION in dependency and (self.output_type == CSS_OUTPUT_TYPE or self.output_type is None): style_tags.append('<link href="{0}" rel="stylesheet" />'.format(dependency)) # Combine all tags tags = style_tags + script_tags tags_string = '\n'.join(tags) return tags_string
AttributeError
dataset/ETHPy150Open tethysplatform/tethys/tethys_gizmos/templatetags/tethys_gizmos.py/TethysGizmoDependenciesNode.render
def test_pyunitAddError(self): # pyunit passes an exc_info tuple directly to addError try: raise RuntimeError('foo') except __HOLE__ as e: excValue = e self.result.addError(self, sys.exc_info()) failure = self.result.errors[0][1] self.assertEqual(excValue, failure.value) self.assertEqual(RuntimeError, failure.type)
RuntimeError
dataset/ETHPy150Open twisted/twisted/twisted/trial/test/test_reporter.py/TestResultTests.test_pyunitAddError
def setUp(self): try: raise RuntimeError('foo') except __HOLE__: self.f = Failure() self.f.frames = [ ['foo', 'foo/bar.py', 5, [('x', 5)], [('y', 'orange')]], ['qux', 'foo/bar.py', 10, [('a', 'two')], [('b', 'MCMXCIX')]] ] self.stream = NativeStringIO() self.result = reporter.Reporter(self.stream)
RuntimeError
dataset/ETHPy150Open twisted/twisted/twisted/trial/test/test_reporter.py/FormatFailuresTests.setUp
def test_summaryColoredFailure(self): """ The summary in case of failure should have a good count of errors and be colored properly. """ try: raise RuntimeError('foo') except __HOLE__: self.result.addError(self, sys.exc_info()) self.result.done() self.assertEqual(self.log[1], (self.result.FAILURE, 'FAILED')) self.assertEqual( self.stream.getvalue().splitlines()[-1].strip(), "(errors=1)")
RuntimeError
dataset/ETHPy150Open twisted/twisted/twisted/trial/test/test_reporter.py/TreeReporterTests.test_summaryColoredFailure
def test_groupResults(self): """ If two different tests have the same error, L{Reporter._groupResults} includes them together in one of the tuples in the list it returns. """ try: raise RuntimeError('foo') except RuntimeError: self.result.addError(self, sys.exc_info()) self.result.addError(self.test, sys.exc_info()) try: raise RuntimeError('bar') except __HOLE__: extra = sample.FooTest('test_bar') self.result.addError(extra, sys.exc_info()) self.result.done() grouped = self.result._groupResults( self.result.errors, self.result._formatFailureTraceback) self.assertEqual(grouped[0][1], [self, self.test]) self.assertEqual(grouped[1][1], [extra])
RuntimeError
dataset/ETHPy150Open twisted/twisted/twisted/trial/test/test_reporter.py/TreeReporterTests.test_groupResults
def removeMethod(self, klass, methodName): """ Remove 'methodName' from 'klass'. If 'klass' does not have a method named 'methodName', then 'removeMethod' succeeds silently. If 'klass' does have a method named 'methodName', then it is removed using delattr. Also, methods of the same name are removed from all base classes of 'klass', thus removing the method entirely. @param klass: The class to remove the method from. @param methodName: The name of the method to remove. """ method = getattr(klass, methodName, None) if method is None: return for base in getmro(klass): try: delattr(base, methodName) except (__HOLE__, TypeError): break else: self.addCleanup(setattr, base, methodName, method)
AttributeError
dataset/ETHPy150Open twisted/twisted/twisted/trial/test/test_reporter.py/SubunitReporterTests.removeMethod
def find_unique_points(explored_parameters): """Takes a list of explored parameters and finds unique parameter combinations. If parameter ranges are hashable operates in O(N), otherwise O(N**2). :param explored_parameters: List of **explored** parameters :return: List of tuples, first entry being the parameter values, second entry a list containing the run position of the unique combination. """ ranges = [param.f_get_range(copy=False) for param in explored_parameters] zipped_tuples = list(zip(*ranges)) try: unique_elements = OrderedDict() for idx, val_tuple in enumerate(zipped_tuples): if val_tuple not in unique_elements: unique_elements[val_tuple] = [] unique_elements[val_tuple].append(idx) return compat.listitems(unique_elements) except __HOLE__: logger = logging.getLogger('pypet.find_unique') logger.error('Your parameter entries could not be hashed, ' 'now I am sorting slowly in O(N**2).') unique_elements = [] for idx, val_tuple in enumerate(zipped_tuples): matches = False for added_tuple, pos_list in unique_elements: matches = True for idx2, val in enumerate(added_tuple): if not explored_parameters[idx2]._equal_values(val_tuple[idx2], val): matches = False break if matches: pos_list.append(idx) break if not matches: unique_elements.append((val_tuple, [idx])) return unique_elements
TypeError
dataset/ETHPy150Open SmokinCaterpillar/pypet/pypet/utils/explore.py/find_unique_points
def execute(self, argv): '''Executes user provided command. Eg. bii run:cpp''' errors = False try: if isinstance(argv, basestring): # To make tests easier to write argv = shlex.split(argv) self.executor.execute(argv) # Executor only raises not expected Exceptions except (__HOLE__, SystemExit) as e: logger.debug('Execution terminated: %s', e) errors = True except BiiException as e: errors = True self.user_io.out.error(str(e)) except Exception as e: tb = traceback.format_exc() logger.error(tb) errors = True self.user_io.out.error('Unexpected Exception\n %s' % e) self.user_io.out.error('Error executing command.\n' '\tCheck the documentation in http://docs.biicode.com\n' '\tor ask in the forum http://forum.biicode.com\n') return errors
KeyboardInterrupt
dataset/ETHPy150Open biicode/client/shell/bii.py/Bii.execute
def run_main(args, user_io=None, current_folder=None, user_folder=None, biiapi_client=None): try: user_folder = user_folder or os.path.expanduser("~") biicode_folder = os.path.join(user_folder, '.biicode') current_folder = current_folder or os.getcwd() user_io = user_io or create_user_io(biicode_folder) bii = Bii(user_io, current_folder, biicode_folder) # Update manager doesn't need proxy nor authentication to call get_server_info biiapi_client = biiapi_client or bii.biiapi updates_manager = get_updates_manager(biiapi_client, biicode_folder) try: # Check for updates updates_manager.check_for_updates(bii.user_io.out) except ObsoleteClient as e: bii.user_io.out.error(e.message) return int(True) errors = bii.execute(args) return int(errors) except __HOLE__ as e: print str(e) return 1
OSError
dataset/ETHPy150Open biicode/client/shell/bii.py/run_main
def format_output_data(self, data): for k, v in six.iteritems(data): if isinstance(v, six.text_type): try: # Deserialize if possible into dict, lists, tuples... v = ast.literal_eval(v) except SyntaxError: # NOTE(sbauza): This is probably a datetime string, we need # to keep it unchanged. pass except __HOLE__: # NOTE(sbauza): This is not something AST can evaluate, # probably a string. pass if isinstance(v, list): value = '\n'.join(utils.dumps( i, indent=self.json_indent) if isinstance(i, dict) else str(i) for i in v) data[k] = value elif isinstance(v, dict): value = utils.dumps(v, indent=self.json_indent) data[k] = value elif v is None: data[k] = ''
ValueError
dataset/ETHPy150Open openstack/python-blazarclient/climateclient/command.py/ClimateCommand.format_output_data
def year(request, year, queryset=None, template_name="jellyroll/calendar/year.html", template_loader=loader, extra_context=None, context_processors=None, mimetype=None): """ Jellyroll'd items for a particular year. Works a bit like a generic view in that you can pass a bunch of optional keyword arguments which work just like they do in generic views. Those arguments are: ``template_name``, ``template_loader``, ``extra_context``, ``context_processors``, and ``mimetype``. You can also pass a ``queryset`` argument; see the module's docstring for information about how that works. Templates: ``jellyroll/calendar/year.html`` (default) Context: ``items`` Items from the year, earliest first. ``year`` The year. ``previous`` The previous year; ``None`` if that year was before jellyrolling started.. ``previous_link`` Link to the previous year ``next`` The next year; ``None`` if it's in the future. ``next_year`` Link to the next year """ # Make sure we've requested a valid year year = int(year) try: first = Item.objects.order_by("timestamp")[0] except __HOLE__: raise Http404("No items; no views.") today = datetime.date.today() if year < first.timestamp.year or year > today.year: raise Http404("Invalid year (%s .. %s)" % (first.timestamp.year, today.year)) # Calculate the previous year previous = year - 1 previous_link = urlresolvers.reverse("jellyroll.views.calendar.year", args=[previous]) if previous < first.timestamp.year: previous = previous_link = None # And the next year next = year + 1 next_link = urlresolvers.reverse("jellyroll.views.calendar.year", args=[next]) if next > today.year: next = next_link = None # Handle the initial queryset if not queryset: queryset = Item.objects.all() queryset = queryset.filter(timestamp__year=year) if not queryset.query.order_by: queryset = queryset.order_by("timestamp") # Build the context context = RequestContext(request, { "items" : queryset.filter(timestamp__year=year).order_by("timestamp"), "year" : year, "previous" : previous, "previous_link" : previous_link, "next" : next, "next_link" : next_link }, context_processors) if extra_context: for key, value in extra_context.items(): if callable(value): context[key] = value() else: context[key] = value # Load, render, and return t = template_loader.get_template(template_name) return HttpResponse(t.render(context), mimetype=mimetype)
IndexError
dataset/ETHPy150Open jacobian-archive/jellyroll/src/jellyroll/views/calendar.py/year
def month(request, year, month, queryset=None, template_name="jellyroll/calendar/month.html", template_loader=loader, extra_context=None, context_processors=None, mimetype=None): """ Jellyroll'd items for a particular month. Works a bit like a generic view in that you can pass a bunch of optional keyword arguments which work just like they do in generic views. Those arguments are: ``template_name``, ``template_loader``, ``extra_context``, ``context_processors``, and ``mimetype``. You can also pass a ``queryset`` argument; see the module's docstring for information about how that works. Templates: ``jellyroll/calendar/month.html`` (default) Context: ``items`` Items from the month, earliest first. ``month`` The month (a ``datetime.date`` object). ``previous`` The previous month; ``None`` if that month was before jellyrolling started. ``previous_link`` Link to the previous month ``next`` The next month; ``None`` if it's in the future. ``next_link`` Link to the next month """ # Make sure we've requested a valid month try: date = datetime.date(*time.strptime(year+month, '%Y%b')[:3]) except ValueError: raise Http404("Invalid month string") try: first = Item.objects.order_by("timestamp")[0] except __HOLE__: raise Http404("No items; no views.") # Calculate first and last day of month, for use in a date-range lookup. today = datetime.date.today() first_day = date.replace(day=1) if first_day.month == 12: last_day = first_day.replace(year=first_day.year + 1, month=1) else: last_day = first_day.replace(month=first_day.month + 1) if first_day < first.timestamp.date().replace(day=1) or date > today: raise Http404("Invalid month (%s .. %s)" % (first.timestamp.date(), today)) # Calculate the previous month previous = (first_day - datetime.timedelta(days=1)).replace(day=1) previous_link = urlresolvers.reverse("jellyroll.views.calendar.month", args=previous.strftime("%Y %b").lower().split()) if previous < first.timestamp.date().replace(day=1): previous = None # And the next month next = last_day + datetime.timedelta(days=1) next_link = urlresolvers.reverse("jellyroll.views.calendar.month", args=next.strftime("%Y %b").lower().split()) if next > today: next = None # Handle the initial queryset if not queryset: queryset = Item.objects.all() queryset = queryset.filter(timestamp__range=(first_day, last_day)) if not queryset.query.order_by: queryset = queryset.order_by("timestamp") # Build the context context = RequestContext(request, { "items" : queryset, "month" : date, "previous" : previous, "previous_link" : previous_link, "next" : next, "next_link" : next_link }, context_processors) if extra_context: for key, value in extra_context.items(): if callable(value): context[key] = value() else: context[key] = value # Load, render, and return t = template_loader.get_template(template_name) return HttpResponse(t.render(context), mimetype=mimetype)
IndexError
dataset/ETHPy150Open jacobian-archive/jellyroll/src/jellyroll/views/calendar.py/month
def day(request, year, month, day, queryset=None, recent_first=False, template_name="jellyroll/calendar/day.html", template_loader=loader, extra_context=None, context_processors=None, mimetype=None): """ Jellyroll'd items for a particular day. Works a bit like a generic view in that you can pass a bunch of optional keyword arguments which work just like they do in generic views. Those arguments are: ``template_name``, ``template_loader``, ``extra_context``, ``context_processors``, and ``mimetype``. Also takes a ``recent_first`` param; if it's ``True`` the newest items will be displayed first; otherwise items will be ordered earliest first. You can also pass a ``queryset`` argument; see the module's docstring for information about how that works. Templates: ``jellyroll/calendar/day.html`` (default) Context: ``items`` Items from the month, ordered according to ``recent_first``. ``day`` The day (a ``datetime.date`` object). ``previous`` The previous day; ``None`` if that day was before jellyrolling started. ``previous_link`` Link to the previous day ``next`` The next day; ``None`` if it's in the future. ``next_link`` Link to the next day. ``is_today`` ``True`` if this day is today. """ # Make sure we've requested a valid month try: day = datetime.date(*time.strptime(year+month+day, '%Y%b%d')[:3]) except ValueError: raise Http404("Invalid day string") try: first = Item.objects.order_by("timestamp")[0] except __HOLE__: raise Http404("No items; no views.") today = datetime.date.today() if day < first.timestamp.date() or day > today: raise Http404("Invalid day (%s .. %s)" % (first.timestamp.date(), today)) # Calculate the previous day previous = day - datetime.timedelta(days=1) previous_link = urlresolvers.reverse("jellyroll.views.calendar.day", args=previous.strftime("%Y %b %d").lower().split()) if previous < first.timestamp.date(): previous = previous_link = None # And the next month next = day + datetime.timedelta(days=1) next_link = urlresolvers.reverse("jellyroll.views.calendar.day", args=next.strftime("%Y %b %d").lower().split()) if next > today: next = next_link = None # Some lookup values... timestamp_range = (datetime.datetime.combine(day, datetime.time.min), datetime.datetime.combine(day, datetime.time.max)) # Handle the initial queryset if not queryset: queryset = Item.objects.all() queryset = queryset.filter(timestamp__range=timestamp_range) if not queryset.query.order_by: if recent_first: queryset = queryset.order_by("-timestamp") else: queryset = queryset.order_by("timestamp") # Build the context context = RequestContext(request, { "items" : queryset, "day" : day, "previous" : previous, "previous_link" : previous_link, "next" : next, "next_link" : next_link, "is_today" : day == today, }, context_processors) if extra_context: for key, value in extra_context.items(): if callable(value): context[key] = value() else: context[key] = value # Load, render, and return t = template_loader.get_template(template_name) return HttpResponse(t.render(context), mimetype=mimetype)
IndexError
dataset/ETHPy150Open jacobian-archive/jellyroll/src/jellyroll/views/calendar.py/day
def __init__(self, parentnode, name, atom=None, shape=None, title="", filters=None, chunkshape=None, byteorder=None, _log=True): self.atom = atom """An `Atom` instance representing the shape, type of the atomic objects to be saved. """ self.shape = None """The shape of the stored array.""" self.extdim = -1 # `CArray` objects are not enlargeable by default """The index of the enlargeable dimension.""" # Other private attributes self._v_version = None """The object version of this array.""" self._v_new = new = atom is not None """Is this the first time the node has been created?""" self._v_new_title = title """New title for this node.""" self._v_convert = True """Whether the ``Array`` object must be converted or not.""" self._v_chunkshape = chunkshape """Private storage for the `chunkshape` property of the leaf.""" # Miscellaneous iteration rubbish. self._start = None """Starting row for the current iteration.""" self._stop = None """Stopping row for the current iteration.""" self._step = None """Step size for the current iteration.""" self._nrowsread = None """Number of rows read up to the current state of iteration.""" self._startb = None """Starting row for current buffer.""" self._stopb = None """Stopping row for current buffer. """ self._row = None """Current row in iterators (sentinel).""" self._init = False """Whether we are in the middle of an iteration or not (sentinel).""" self.listarr = None """Current buffer in iterators.""" if new: if not isinstance(atom, Atom): raise ValueError("atom parameter should be an instance of " "tables.Atom and you passed a %s." % type(atom)) if shape is None: raise ValueError("you must specify a non-empty shape") try: shape = tuple(shape) except __HOLE__: raise TypeError("`shape` parameter must be a sequence " "and you passed a %s" % type(shape)) self.shape = tuple(SizeType(s) for s in shape) if chunkshape is not None: try: chunkshape = tuple(chunkshape) except TypeError: raise TypeError( "`chunkshape` parameter must be a sequence " "and you passed a %s" % type(chunkshape)) if len(shape) != len(chunkshape): raise ValueError("the shape (%s) and chunkshape (%s) " "ranks must be equal." % (shape, chunkshape)) elif min(chunkshape) < 1: raise ValueError("chunkshape parameter cannot have " "zero-dimensions.") self._v_chunkshape = tuple(SizeType(s) for s in chunkshape) # The `Array` class is not abstract enough! :( super(Array, self).__init__(parentnode, name, new, filters, byteorder, _log)
TypeError
dataset/ETHPy150Open PyTables/PyTables/tables/carray.py/CArray.__init__
def unquote(s): """ Undo the effects of quote(). Based heavily on urllib.unquote(). """ mychr = chr myatoi = int list = s.split('_') res = [list[0]] myappend = res.append del list[0] for item in list: if item[1:2]: try: myappend(mychr(myatoi(item[:2], 16)) + item[2:]) except __HOLE__: myappend('_' + item) else: myappend('_' + item) return "".join(res)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/admin/util.py/unquote
def find_resource(manager, name_or_id, **find_args): """Look for resource in a given manager. Used as a helper for the _find_* methods. Example: .. code-block:: python def _find_hypervisor(cs, hypervisor): #Get a hypervisor by name or ID. return cliutils.find_resource(cs.hypervisors, hypervisor) """ # first try to get entity as integer id try: return manager.get(int(name_or_id)) except (TypeError, __HOLE__, exceptions.NotFound): pass # now try to get entity as uuid try: if six.PY2: tmp_id = encodeutils.safe_encode(name_or_id) else: tmp_id = encodeutils.safe_decode(name_or_id) if uuidutils.is_uuid_like(tmp_id): return manager.get(tmp_id) except (TypeError, ValueError, exceptions.NotFound): pass # for str id which is not uuid if getattr(manager, 'is_alphanum_id_allowed', False): try: return manager.get(name_or_id) except exceptions.NotFound: pass try: try: return manager.find(human_id=name_or_id, **find_args) except exceptions.NotFound: pass # finally try to find entity by name try: resource = getattr(manager, 'resource_class', None) name_attr = resource.NAME_ATTR if resource else 'name' kwargs = {name_attr: name_or_id} kwargs.update(find_args) return manager.find(**kwargs) except exceptions.NotFound: msg = _("No %(name)s with a name or " "ID of '%(name_or_id)s' exists.") % \ { "name": manager.resource_class.__name__.lower(), "name_or_id": name_or_id } raise exceptions.CommandError(msg) except exceptions.NoUniqueMatch: msg = _("Multiple %(name)s matches found for " "'%(name_or_id)s', use an ID to be more specific.") % \ { "name": manager.resource_class.__name__.lower(), "name_or_id": name_or_id } raise exceptions.CommandError(msg)
ValueError
dataset/ETHPy150Open openstack/python-cloudkittyclient/cloudkittyclient/openstack/common/apiclient/utils.py/find_resource
def _load_from_file(self, path): states = [] try: if os.path.exists(path): with open(path, 'rb') as fd: states = pickle.load(fd) except (PickleError, PicklingError), err: raise SynapseException(err) except (__HOLE__, EOFError): pass self.logger.debug("Loading %d persisted resources states from %s" % (len(states), path)) return states
IOError
dataset/ETHPy150Open comodit/synapse-agent/synapse/states_manager.py/StatesManager._load_from_file
def persist(self): try: with open(self.path, 'wb') as fd: os.chmod(self.path, stat.S_IREAD | stat.S_IWRITE) pickle.dump(self.states, fd) except __HOLE__ as err: self.logger.error(err)
IOError
dataset/ETHPy150Open comodit/synapse-agent/synapse/states_manager.py/StatesManager.persist
def _get_index(self, res_id): try: return map(itemgetter('resource_id'), self.states).index(res_id) except __HOLE__: return -1
ValueError
dataset/ETHPy150Open comodit/synapse-agent/synapse/states_manager.py/StatesManager._get_index
def _register_pygments_rst_directive(): from docutils import nodes from docutils.parsers.rst import directives from pygments import highlight from pygments.lexers import get_lexer_by_name, TextLexer from pygments.formatters import HtmlFormatter DEFAULT = HtmlFormatter() VARIANTS = { 'linenos': HtmlFormatter(linenos=True), } def pygments_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): try: lexer = get_lexer_by_name(arguments[0]) except __HOLE__: # no lexer found - use the text one instead of an exception lexer = TextLexer() formatter = options and VARIANTS[options.keys()[0]] or DEFAULT parsed = highlight(u'\n'.join(content), lexer, formatter) return [nodes.raw('', parsed, format='html')] pygments_directive.arguments = (1, 0, 1) pygments_directive.content = 1 directives.register_directive('code', pygments_directive)
ValueError
dataset/ETHPy150Open jamesturk/django-markupfield/markupfield/markup.py/_register_pygments_rst_directive
def exit(self): """ Sometimes client is disconnected and command exits after. So cmdstack is gone """ try: self.protocol.cmdstack.pop() self.protocol.cmdstack[-1].resume() except __HOLE__: # cmdstack could be gone already (wget + disconnect) pass
AttributeError
dataset/ETHPy150Open cowrie/cowrie/cowrie/core/honeypot.py/HoneyPotCommand.exit
def __getdistro_setenv(servername): """ """ server = [s for s in env.bootmachine_servers if s.name == servername][0] env.servername = server.name env.host = server.public_ip env.host_string = "{0}:{1}".format(server.public_ip, server.port) env.hosts = [server.public_ip] env.port = server.port env.user = server.user distro_module = [s for s in settings.SERVERS if s["servername"] == server.name][0]["distro_module"] try: __import__(distro_module) return sys.modules[distro_module] except __HOLE__: abort("Unable to import the module: {0}".format(distro_module))
ImportError
dataset/ETHPy150Open rizumu/bootmachine/bootmachine/contrib/configurators/salt.py/__getdistro_setenv
def _from_json(self, datastring): try: return utils.loads(datastring) except __HOLE__: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg)
ValueError
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/wsgi.py/JSONDeserializer._from_json
def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (__HOLE__, TypeError): raise exception.InvalidContentType(content_type=content_type)
KeyError
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/wsgi.py/RequestDeserializer.get_body_deserializer
def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except __HOLE__: pass try: del args['format'] except KeyError: pass return args
KeyError
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/wsgi.py/RequestDeserializer.get_action_args
def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, __HOLE__): raise exception.InvalidContentType(content_type=content_type)
TypeError
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/wsgi.py/ResponseSerializer.get_body_serializer
@webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s" % {"method": request.method, "url": request.url}) try: action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) project_id = args.pop("project_id", None) if 'nova.context' in request.environ and project_id: request.environ['nova.context'].project_id = project_id try: action_result = self.dispatch(request, action, args) except webob.exc.HTTPException as ex: LOG.info(_("HTTP exception thrown: %s"), unicode(ex)) action_result = faults.Fault(ex) if type(action_result) is dict or action_result is None: response = self.serializer.serialize(action_result, accept, action=action) else: response = action_result try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict except __HOLE__, e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s" % msg_dict) LOG.info(msg) return response
AttributeError
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/wsgi.py/Resource.__call__
def dispatch(self, request, action, action_args): """Find action-spefic method on controller and call it.""" controller_method = getattr(self.controller, action) try: return controller_method(req=request, **action_args) except __HOLE__ as exc: LOG.exception(exc) return faults.Fault(webob.exc.HTTPBadRequest())
TypeError
dataset/ETHPy150Open nii-cloud/dodai-compute/nova/api/openstack/wsgi.py/Resource.dispatch
def get_nova_client(auth_token=None, bypass_url=None, previous_tries=0): if previous_tries > 3: return None # first try to use auth details from auth_ref so we # don't need to auth with keystone every time auth_ref = get_auth_ref() auth_details = get_auth_details() keystone = get_keystone_client(auth_ref) if not auth_token: auth_token = keystone.auth_token if not bypass_url: bypass_url = get_endpoint_url_for_service('compute', auth_ref, get_endpoint_type( auth_details)) nova = nova_client.Client('2', auth_token=auth_token, bypass_url=bypass_url, insecure=auth_details['OS_API_INSECURE']) try: flavors = nova.flavors.list() # Exceptions are only thrown when we try and do something [flavor.id for flavor in flavors] except (nova_exc.Unauthorized, nova_exc.AuthorizationFailure, __HOLE__) as e: # NOTE(mancdaz)nova doesn't properly pass back unauth errors, but # in fact tries to re-auth, all by itself. But we didn't pass it # an auth_url, so it bombs out horribly with an error. auth_ref = force_reauth() keystone = get_keystone_client(auth_ref) auth_token = keystone.auth_token nova = get_nova_client(auth_token, bypass_url, previous_tries + 1) # we only want to pass ClientException back to the calling poller # since this encapsulates all of our actual API failures. Other # exceptions will be treated as script/environmental issues and # sent to status_err except nova_exc.ClientException: raise except Exception as e: status_err(str(e)) return nova
AttributeError
dataset/ETHPy150Open rcbops/rpc-openstack/maas/plugins/maas_common.py/get_nova_client
def keystone_auth(auth_details): try: if auth_details['OS_AUTH_URL'].endswith('v3'): k_client = k3_client else: k_client = k2_client tenant_name = auth_details['OS_TENANT_NAME'] keystone = k_client.Client(username=auth_details['OS_USERNAME'], password=auth_details['OS_PASSWORD'], tenant_name=tenant_name, auth_url=auth_details['OS_AUTH_URL']) except Exception as e: status_err(str(e)) try: with open(TOKEN_FILE, 'w') as token_file: json.dump(keystone.auth_ref, token_file) except __HOLE__: # if we can't write the file we go on pass return keystone.auth_ref
IOError
dataset/ETHPy150Open rcbops/rpc-openstack/maas/plugins/maas_common.py/keystone_auth
def is_token_expired(token, auth_details): for fmt in ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ'): try: if auth_details['OS_AUTH_URL'].endswith('v3'): expires_at = token.get('expires_at') else: expires_at = token['token'].get('expires') expires = datetime.datetime.strptime(expires_at, fmt) break except __HOLE__ as e: pass else: raise e return datetime.datetime.now() >= expires
ValueError
dataset/ETHPy150Open rcbops/rpc-openstack/maas/plugins/maas_common.py/is_token_expired
def get_auth_from_file(): try: with open(TOKEN_FILE) as token_file: auth_ref = json.load(token_file) return auth_ref except __HOLE__ as e: if e.errno == errno.ENOENT: return None status_err(e)
IOError
dataset/ETHPy150Open rcbops/rpc-openstack/maas/plugins/maas_common.py/get_auth_from_file
def get_auth_details(openrc_file=OPENRC): auth_details = AUTH_DETAILS pattern = re.compile( '^(?:export\s)?(?P<key>\w+)(?:\s+)?=(?:\s+)?(?P<value>.*)$' ) try: with open(openrc_file) as openrc: for line in openrc: match = pattern.match(line) if match is None: continue k = match.group('key') v = match.group('value') if k in auth_details and auth_details[k] is None: auth_details[k] = v except __HOLE__ as e: if e.errno != errno.ENOENT: status_err(e) # no openrc file, so we try the environment for key in auth_details.keys(): auth_details[key] = os.environ.get(key) for key in auth_details.keys(): if auth_details[key] is None: status_err('%s not set' % key) return auth_details
IOError
dataset/ETHPy150Open rcbops/rpc-openstack/maas/plugins/maas_common.py/get_auth_details