function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def record_http_status(self, status): """Record the HTTP status code and the end time of the HTTP request.""" try: self.http_status = int(status) except (__HOLE__, TypeError): self.http_status = 0 self.end_timestamp = time.time()
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/appstats/recording.py/Recorder.record_http_status
def get_latest_for_name(self, klass_name, project_name): qs = self.filter( name=klass_name, module__project_version__project__name=project_name, ) or self.filter( name__iexact=klass_name, module__project_version__project__name__iexact=project_name, ) try: obj = qs.order_by('-module__project_version__version_number',)[0] except __HOLE__: raise self.model.DoesNotExist else: return obj # TODO: quite a few of the methods on here should probably be denormed.
IndexError
dataset/ETHPy150Open refreshoxford/django-cbv-inspector/cbv/models.py/KlassManager.get_latest_for_name
def get_prepared_attributes(self): attributes = self.get_attributes() # Make a dictionary of attributes based on name attribute_names = {} for attr in attributes: try: attribute_names[attr.name] += [attr] except __HOLE__: attribute_names[attr.name] = [attr] ancestors = self.get_all_ancestors() # Find overridden attributes for name, attrs in attribute_names.iteritems(): # Skip if we have only one attribute. if len(attrs) == 1: continue # Sort the attributes by ancestors. def _key(a): try: # If ancestor, return the index (>= 0) return ancestors.index(a.klass) except: # else a.klass == self, so return -1 return -1 sorted_attrs = sorted(attrs, key=_key) # Mark overriden KlassAttributes for a in sorted_attrs[1:]: a.overridden = True return attributes
KeyError
dataset/ETHPy150Open refreshoxford/django-cbv-inspector/cbv/models.py/Klass.get_prepared_attributes
def deregister(self, model): """ Deregisters the given model. Remove the model from the self.app as well If the model is not already registered, this will raise ImproperlyConfigured. """ try: del self.registry[model] except __HOLE__: raise ImproperlyConfigured( '%s was never registered in django-admin2' % model) # Remove the model from the apps registry # Get the app label app_label = utils.model_options(model).app_label # Delete the model from it's app registry del self.apps[app_label][model] # if no more models in an app's registry # then delete the app from the apps. if self.apps[app_label] is {}: del self.apps[app_label] # no
KeyError
dataset/ETHPy150Open pydanny/django-admin2/djadmin2/core.py/Admin2.deregister
def deregister_app_verbose_name(self, app_label): """ Deregisters the given app label. Remove the app label from the self.app_verbose_names as well. If the app label is not already registered, this will raise ImproperlyConfigured. """ try: del self.app_verbose_names[app_label] except __HOLE__: raise ImproperlyConfigured( '%s app label was never registered in django-admin2' % app_label)
KeyError
dataset/ETHPy150Open pydanny/django-admin2/djadmin2/core.py/Admin2.deregister_app_verbose_name
def autodiscover(self): """ Autodiscovers all admin2.py modules for apps in INSTALLED_APPS by trying to import them. """ for app_name in [x for x in settings.INSTALLED_APPS]: try: import_module("%s.admin2" % app_name) except __HOLE__ as e: if str(e).startswith("No module named") and 'admin2' in str(e): continue raise e
ImportError
dataset/ETHPy150Open pydanny/django-admin2/djadmin2/core.py/Admin2.autodiscover
def Notify(self): """Overridden to call the given callable. """ try: self.callable(*self.args, **self.kw_args) except __HOLE__: self.Stop() except: self.Stop() raise
StopIteration
dataset/ETHPy150Open enthought/mayavi/tvtk/tools/visual.py/VTimer.Notify
def migrations_dir(self): """ Returns the full path of the migrations directory. If it doesn't exist yet, returns where it would exist, based on the app's migrations module (defaults to app.migrations) """ module_path = self.migrations_module() try: module = importlib.import_module(module_path) except ImportError: # There's no migrations module made yet; guess! try: parent = importlib.import_module(".".join(module_path.split(".")[:-1])) except __HOLE__: # The parent doesn't even exist, that's an issue. raise exceptions.InvalidMigrationModule( application = self.application.__name__, module = module_path, ) else: # Good guess. return os.path.join(os.path.dirname(parent.__file__), module_path.split(".")[-1]) else: # Get directory directly return os.path.dirname(module.__file__)
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/migration/base.py/Migrations.migrations_dir
def migrations_module(self): "Returns the module name of the migrations module for this" app_label = application_to_app_label(self.application) if hasattr(settings, "SOUTH_MIGRATION_MODULES"): if app_label in settings.SOUTH_MIGRATION_MODULES: # There's an override. return settings.SOUTH_MIGRATION_MODULES[app_label] # We see if the south_migrations module exists first, and # use that if we find it. module_name = self._application.__name__ + '.south_migrations' try: importlib.import_module(module_name) except __HOLE__: return self._application.__name__ + '.migrations' else: return module_name
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/migration/base.py/Migrations.migrations_module
def set_application(self, application, force_creation=False, verbose_creation=True): """ Called when the application for this Migrations is set. Imports the migrations module object, and throws a paddy if it can't. """ self._application = application if not hasattr(application, 'migrations') and not hasattr(application, 'south_migrations'): try: module = importlib.import_module(self.migrations_module()) self._migrations = application.migrations = module except __HOLE__: if force_creation: self.create_migrations_directory(verbose_creation) module = importlib.import_module(self.migrations_module()) self._migrations = application.migrations = module else: raise exceptions.NoMigrations(application) if hasattr(application, 'south_migrations'): self._load_migrations_module(application.south_migrations) else: self._load_migrations_module(application.migrations)
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/migration/base.py/Migrations.set_application
def next_filename(self, name): "Returns the fully-formatted filename of what a new migration 'name' would be" highest_number = 0 for migration in self: try: number = int(migration.name().split("_")[0]) highest_number = max(highest_number, number) except __HOLE__: pass # Work out the new filename return "%04i_%s.py" % ( highest_number + 1, name, )
ValueError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/migration/base.py/Migrations.next_filename
def migration(self): "Tries to load the actual migration module" full_name = self.full_name() try: migration = sys.modules[full_name] except KeyError: try: migration = __import__(full_name, {}, {}, ['Migration']) except __HOLE__ as e: raise exceptions.UnknownMigration(self, sys.exc_info()) except Exception as e: raise exceptions.BrokenMigration(self, sys.exc_info()) # Override some imports migration._ = lambda x: x # Fake i18n migration.datetime = datetime_utils return migration
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/migration/base.py/Migration.migration
def no_dry_run(self): migration_class = self.migration_class() try: return migration_class.no_dry_run except __HOLE__: return False
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/migration/base.py/Migration.no_dry_run
def run(self): try: # # - workaround to fetch the master IP and credentials as there does not seem to # be a way to use 10.0.0.2 from within the pod # assert 'KUBERNETES_MASTER' in os.environ, '$KUBERNETES_MASTER not specified (check your portal pod)' assert 'KUBERNETES_USER' in os.environ, '$KUBERNETES_USER not specified (check your portal pod)' assert 'KUBERNETES_PWD' in os.environ, '$KUBERNETES_PWD not specified (check your portal pod)' auth = HTTPBasicAuth(os.environ['KUBERNETES_USER'], os.environ['KUBERNETES_PWD']) def _query(zk): replies = fire(zk, self.cluster, 'info') return len(replies), {key: hints for key, (_, hints, code) in replies.items() if code == 200} # # - each pod refers to its controller via the 'application' hint # total, js = run(self.proxy, _query) assert total == len(js), 'failure to communicate with one or more pods' for key in set([hints['application'] for hints in js.values()]): # # - HTTP DELETE the controller via the master API # url = 'https://%s/api/v1beta3/namespaces/default/replicationcontrollers/%s' % (os.environ['KUBERNETES_MASTER'], key) reply = requests.delete(url, auth=auth,verify=False) code = reply.status_code logger.debug('-> DELETE %s (HTTP %d)' % (url, code)) assert code == 200 or code == 201, 'replication controller deletion failed (HTTP %d)' % code # # - the 'task' hint is the pod's identifier # for key, hints in js.items(): # # - HTTP DELETE the pod via the master API # url = 'https://%s/api/v1beta3/namespaces/default/pods/%s' % (os.environ['KUBERNETES_MASTER'], hints['task']) reply = requests.delete(url, auth=auth,verify=False) code = reply.status_code logger.debug('-> DELETE %s (HTTP %d)' % (url, code)) assert code == 200 or code == 201, 'pod deletion failed (HTTP %d)' % code self.killed = len(js) self.ok = 1 except __HOLE__ as failure: logger.debug('%s : failed to deploy -> %s' % (self.cluster, failure)) except YAMLError as failure: if hasattr(failure, 'problem_mark'): mark = failure.problem_mark logger.debug('%s : invalid deploy.yml (line %s, column %s)' % (self.cluster, mark.line+1, mark.column+1)) except Exception as failure: logger.debug('%s : failed to deploy -> %s' % (self.cluster, diagnostic(failure)))
AssertionError
dataset/ETHPy150Open autodesk-cloud/ochonetes/images/portal/resources/toolset/toolset/commands/kill.py/_Automation.run
def form_counts(self): """form_counts() -> The max number of forms, some could be non-existent (deleted).""" try: return int(self.data[ self.add_prefix('next_form_id') ]) except __HOLE__: return self.fields['next_form_id'].initial
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/src/desktop/lib/django_forms.py/ManagementForm.form_counts
def full_clean(self): """Simlar to formsets.py:full_clean""" self._errors = [] if not self.is_bound: return for f in self.forms: self._errors.append(f.errors) try: self.clean() except __HOLE__, e: self._non_form_errors = e.messages
ValidationError
dataset/ETHPy150Open cloudera/hue/desktop/core/src/desktop/lib/django_forms.py/BaseSimpleFormSet.full_clean
def run(self): # The WebSocketApp loop runs in it's own thread, # so make sure you call TheButton.close() when you're done with it! self.the_button.start() try: while True: colour = self.the_button.ascii_colour self.interface.write(colour.encode()) except __HOLE__: pass self.close()
KeyboardInterrupt
dataset/ETHPy150Open ALPSquid/thebutton-monitor/src/examples/arduino_example.py/ButtonSerial.run
@property def _core_properties_part(self): """ |CorePropertiesPart| object related to this package. Creates a default core properties part if one is not present (not common). """ try: return self.part_related_by(RT.CORE_PROPERTIES) except __HOLE__: core_properties_part = CorePropertiesPart.default(self) self.relate_to(core_properties_part, RT.CORE_PROPERTIES) return core_properties_part
KeyError
dataset/ETHPy150Open python-openxml/python-docx/docx/opc/package.py/OpcPackage._core_properties_part
def read_system_config(self): """Parse and store the system config settings in electrum.conf into system_config[].""" name = '/etc/electrum.conf' if os.path.exists(name): try: import ConfigParser except __HOLE__: print "cannot parse electrum.conf. please install ConfigParser" return p = ConfigParser.ConfigParser() p.read(name) try: for k, v in p.items('client'): self.system_config[k] = v except ConfigParser.NoSectionError: pass
ImportError
dataset/ETHPy150Open bitxbay/BitXBay/electru/build/lib/electrum/simple_config.py/SimpleConfig.read_system_config
def read_user_config(self): """Parse and store the user config settings in electrum.conf into user_config[].""" if not self.path: return path = os.path.join(self.path, "config") if os.path.exists(path): try: with open(path, "r") as f: data = f.read() except __HOLE__: return try: d = ast.literal_eval( data ) #parse raw data from reading wallet file except Exception: raise IOError("Cannot read config file.") self.user_config = d
IOError
dataset/ETHPy150Open bitxbay/BitXBay/electru/build/lib/electrum/simple_config.py/SimpleConfig.read_user_config
def open(file, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True): r"""Open file and return a stream. Raise IOError upon failure. file is either a text or byte string giving the name (and the path if the file isn't in the current working directory) of the file to be opened or an integer file descriptor of the file to be wrapped. (If a file descriptor is given, it is closed when the returned I/O object is closed, unless closefd is set to False.) mode is an optional string that specifies the mode in which the file is opened. It defaults to 'r' which means open for reading in text mode. Other common values are 'w' for writing (truncating the file if it already exists), and 'a' for appending (which on some Unix systems, means that all writes append to the end of the file regardless of the current seek position). In text mode, if encoding is not specified the encoding used is platform dependent. (For reading and writing raw bytes use binary mode and leave encoding unspecified.) The available modes are: ========= =============================================================== Character Meaning --------- --------------------------------------------------------------- 'r' open for reading (default) 'w' open for writing, truncating the file first 'a' open for writing, appending to the end of the file if it exists 'b' binary mode 't' text mode (default) '+' open a disk file for updating (reading and writing) 'U' universal newline mode (for backwards compatibility; unneeded for new code) ========= =============================================================== The default mode is 'rt' (open for reading text). For binary random access, the mode 'w+b' opens and truncates the file to 0 bytes, while 'r+b' opens the file without truncation. Python distinguishes between files opened in binary and text modes, even when the underlying operating system doesn't. Files opened in binary mode (appending 'b' to the mode argument) return contents as bytes objects without any decoding. In text mode (the default, or when 't' is appended to the mode argument), the contents of the file are returned as strings, the bytes having been first decoded using a platform-dependent encoding or using the specified encoding if given. buffering is an optional integer used to set the buffering policy. Pass 0 to switch buffering off (only allowed in binary mode), 1 to select line buffering (only usable in text mode), and an integer > 1 to indicate the size of a fixed-size chunk buffer. When no buffering argument is given, the default buffering policy works as follows: * Binary files are buffered in fixed-size chunks; the size of the buffer is chosen using a heuristic trying to determine the underlying device's "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`. On many systems, the buffer will typically be 4096 or 8192 bytes long. * "Interactive" text files (files for which isatty() returns True) use line buffering. Other text files use the policy described above for binary files. encoding is the name of the encoding used to decode or encode the file. This should only be used in text mode. The default encoding is platform dependent, but any encoding supported by Python can be passed. See the codecs module for the list of supported encodings. errors is an optional string that specifies how encoding errors are to be handled---this argument should not be used in binary mode. Pass 'strict' to raise a ValueError exception if there is an encoding error (the default of None has the same effect), or pass 'ignore' to ignore errors. (Note that ignoring encoding errors can lead to data loss.) See the documentation for codecs.register for a list of the permitted encoding error strings. newline controls how universal newlines works (it only applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works as follows: * On input, if newline is None, universal newlines mode is enabled. Lines in the input can end in '\n', '\r', or '\r\n', and these are translated into '\n' before being returned to the caller. If it is '', universal newline mode is enabled, but line endings are returned to the caller untranslated. If it has any of the other legal values, input lines are only terminated by the given string, and the line ending is returned to the caller untranslated. * On output, if newline is None, any '\n' characters written are translated to the system default line separator, os.linesep. If newline is '', no translation takes place. If newline is any of the other legal values, any '\n' characters written are translated to the given string. If closefd is False, the underlying file descriptor will be kept open when the file is closed. This does not work when a file name is given and must be True in that case. open() returns a file object whose type depends on the mode, and through which the standard file operations such as reading and writing are performed. When open() is used to open a file in a text mode ('w', 'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open a file in a binary mode, the returned class varies: in read binary mode, it returns a BufferedReader; in write binary and append binary modes, it returns a BufferedWriter, and in read/write mode, it returns a BufferedRandom. It is also possible to use a string or bytearray as a file for both reading and writing. For strings StringIO can be used like a file opened in a text mode, and for bytes a BytesIO can be used like a file opened in a binary mode. """ if not isinstance(file, (basestring, int, long)): raise TypeError("invalid file: %r" % file) if not isinstance(mode, basestring): raise TypeError("invalid mode: %r" % mode) if not isinstance(buffering, (int, long)): raise TypeError("invalid buffering: %r" % buffering) if encoding is not None and not isinstance(encoding, basestring): raise TypeError("invalid encoding: %r" % encoding) if errors is not None and not isinstance(errors, basestring): raise TypeError("invalid errors: %r" % errors) modes = set(mode) if modes - set("arwb+tU") or len(mode) > len(modes): raise ValueError("invalid mode: %r" % mode) reading = "r" in modes writing = "w" in modes appending = "a" in modes updating = "+" in modes text = "t" in modes binary = "b" in modes if "U" in modes: if writing or appending: raise ValueError("can't use U and writing mode at once") reading = True if text and binary: raise ValueError("can't have text and binary mode at once") if reading + writing + appending > 1: raise ValueError("can't have read/write/append mode at once") if not (reading or writing or appending): raise ValueError("must have exactly one of read/write/append mode") if binary and encoding is not None: raise ValueError("binary mode doesn't take an encoding argument") if binary and errors is not None: raise ValueError("binary mode doesn't take an errors argument") if binary and newline is not None: raise ValueError("binary mode doesn't take a newline argument") raw = FileIO(file, (reading and "r" or "") + (writing and "w" or "") + (appending and "a" or "") + (updating and "+" or ""), closefd) line_buffering = False if buffering == 1 or buffering < 0 and raw.isatty(): buffering = -1 line_buffering = True if buffering < 0: buffering = DEFAULT_BUFFER_SIZE try: bs = os.fstat(raw.fileno()).st_blksize except (os.error, __HOLE__): pass else: if bs > 1: buffering = bs if buffering < 0: raise ValueError("invalid buffering size") if buffering == 0: if binary: return raw raise ValueError("can't have unbuffered text I/O") if updating: buffer = BufferedRandom(raw, buffering) elif writing or appending: buffer = BufferedWriter(raw, buffering) elif reading: buffer = BufferedReader(raw, buffering) else: raise ValueError("unknown mode: %r" % mode) if binary: return buffer text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) text.mode = mode return text
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/open
def readinto(self, b): """Read up to len(b) bytes into b. Like read(), this may issue multiple reads to the underlying raw stream, unless the latter is 'interactive'. Returns the number of bytes read (0 for EOF). Raises BlockingIOError if the underlying raw stream has no data at the moment. """ # XXX This ought to work with anything that supports the buffer API data = self.read(len(b)) n = len(data) try: b[:n] = data except __HOLE__ as err: import array if not isinstance(b, array.array): raise err b[:n] = array.array(b'b', data) return n
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/BufferedIOBase.readinto
def __repr__(self): clsname = self.__class__.__name__ try: name = self.name except __HOLE__: return "<_pyio.{0}>".format(clsname) else: return "<_pyio.{0} name={1!r}>".format(clsname, name) ### Lower-level APIs ###
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/_BufferedIOMixin.__repr__
def seek(self, pos, whence=0): if self.closed: raise ValueError("seek on closed file") try: pos.__index__ except __HOLE__: raise TypeError("an integer is required") if whence == 0: if pos < 0: raise ValueError("negative seek position %r" % (pos,)) self._pos = pos elif whence == 1: self._pos = max(0, self._pos + pos) elif whence == 2: self._pos = max(0, len(self._buffer) + pos) else: raise ValueError("invalid whence value") return self._pos
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/BytesIO.seek
def truncate(self, pos=None): if self.closed: raise ValueError("truncate on closed file") if pos is None: pos = self._pos else: try: pos.__index__ except __HOLE__: raise TypeError("an integer is required") if pos < 0: raise ValueError("negative truncate position %r" % (pos,)) del self._buffer[pos:] return pos
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/BytesIO.truncate
def _read_unlocked(self, n=None): nodata_val = b"" empty_values = (b"", None) buf = self._read_buf pos = self._read_pos # Special case for when the number of bytes to read is unspecified. if n is None or n == -1: self._reset_read_buf() chunks = [buf[pos:]] # Strip the consumed bytes. current_size = 0 while True: # Read until EOF or until read() would block. try: chunk = self.raw.read() except IOError as e: if e.errno != EINTR: raise continue if chunk in empty_values: nodata_val = chunk break current_size += len(chunk) chunks.append(chunk) return b"".join(chunks) or nodata_val # The number of bytes to read is specified, return at most n bytes. avail = len(buf) - pos # Length of the available buffered data. if n <= avail: # Fast path: the data to read is fully buffered. self._read_pos += n return buf[pos:pos+n] # Slow path: read from the stream until enough bytes are read, # or until an EOF occurs or until read() would block. chunks = [buf[pos:]] wanted = max(self.buffer_size, n) while avail < n: try: chunk = self.raw.read(wanted) except __HOLE__ as e: if e.errno != EINTR: raise continue if chunk in empty_values: nodata_val = chunk break avail += len(chunk) chunks.append(chunk) # n is more then avail only when an EOF occurred or when # read() would have blocked. n = min(n, avail) out = b"".join(chunks) self._read_buf = out[n:] # Save the extra data in the buffer. self._read_pos = 0 return out[:n] if out else nodata_val
IOError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/BufferedReader._read_unlocked
def _peek_unlocked(self, n=0): want = min(n, self.buffer_size) have = len(self._read_buf) - self._read_pos if have < want or have <= 0: to_read = self.buffer_size - have while True: try: current = self.raw.read(to_read) except __HOLE__ as e: if e.errno != EINTR: raise continue break if current: self._read_buf = self._read_buf[self._read_pos:] + current self._read_pos = 0 return self._read_buf[self._read_pos:]
IOError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/BufferedReader._peek_unlocked
def _flush_unlocked(self): if self.closed: raise ValueError("flush of closed file") while self._write_buf: try: n = self.raw.write(self._write_buf) except BlockingIOError: raise RuntimeError("self.raw should implement RawIOBase: it " "should not raise BlockingIOError") except __HOLE__ as e: if e.errno != EINTR: raise continue if n is None: raise BlockingIOError( errno.EAGAIN, "write could not complete without blocking", 0) if n > len(self._write_buf) or n < 0: raise IOError("write() returned incorrect number of bytes") del self._write_buf[:n]
IOError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/BufferedWriter._flush_unlocked
def __init__(self, buffer, encoding=None, errors=None, newline=None, line_buffering=False): if newline is not None and not isinstance(newline, basestring): raise TypeError("illegal newline type: %r" % (type(newline),)) if newline not in (None, "", "\n", "\r", "\r\n"): raise ValueError("illegal newline value: %r" % (newline,)) if encoding is None: try: import locale except __HOLE__: # Importing locale may fail if Python is being built encoding = "ascii" else: encoding = locale.getpreferredencoding() if not isinstance(encoding, basestring): raise ValueError("invalid encoding: %r" % encoding) if errors is None: errors = "strict" else: if not isinstance(errors, basestring): raise ValueError("invalid errors: %r" % errors) self._buffer = buffer self._line_buffering = line_buffering self._encoding = encoding self._errors = errors self._readuniversal = not newline self._readtranslate = newline is None self._readnl = newline self._writetranslate = newline != '' self._writenl = newline or os.linesep self._encoder = None self._decoder = None self._decoded_chars = '' # buffer for text returned from decoder self._decoded_chars_used = 0 # offset into _decoded_chars for read() self._snapshot = None # info for reconstructing decoder state self._seekable = self._telling = self.buffer.seekable() if self._seekable and self.writable(): position = self.buffer.tell() if position != 0: try: self._get_encoder().setstate(0) except LookupError: # Sometimes the encoder doesn't exist pass # self._snapshot is either None, or a tuple (dec_flags, next_input) # where dec_flags is the second (integer) item of the decoder state # and next_input is the chunk of input bytes that comes next after the # snapshot point. We use this to reconstruct decoder states in tell(). # Naming convention: # - "bytes_..." for integer variables that count input bytes # - "chars_..." for integer variables that count decoded characters
ImportError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/TextIOWrapper.__init__
def __repr__(self): try: name = self.name except __HOLE__: return "<_pyio.TextIOWrapper encoding='{0}'>".format(self.encoding) else: return "<_pyio.TextIOWrapper name={0!r} encoding='{1}'>".format( name, self.encoding)
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/TextIOWrapper.__repr__
def read(self, n=None): self._checkReadable() if n is None: n = -1 decoder = self._decoder or self._get_decoder() try: n.__index__ except __HOLE__: raise TypeError("an integer is required") if n < 0: # Read everything. result = (self._get_decoded_chars() + decoder.decode(self.buffer.read(), final=True)) self._set_decoded_chars('') self._snapshot = None return result else: # Keep reading chunks until we have n characters to return. eof = False result = self._get_decoded_chars(n) while len(result) < n and not eof: eof = not self._read_chunk() result += self._get_decoded_chars(n - len(result)) return result
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_pyio.py/TextIOWrapper.read
def convert_to_color ( object, name, value ): """ Converts a number into a QColor object. """ # Try the toolkit agnostic format. try: tup = eval(value) except: tup = value if isinstance(tup, tuple): if 3 <= len(tup) <= 4: try: color = QtGui.QColor(*tup) except __HOLE__: raise TraitError else: raise TraitError else: if isinstance(value, basestring): # Allow for spaces in the string value. value = value.replace(' ', '') # Let the standard ctors handle the value. try: color = QtGui.QColor(value) except TypeError: raise TraitError if not color.isValid(): raise TraitError return color
TypeError
dataset/ETHPy150Open enthought/traitsui/traitsui/qt4/color_trait.py/convert_to_color
def __init__(self, label='', width=32, hide=None, empty_char=BAR_EMPTY_CHAR, filled_char=BAR_FILLED_CHAR, expected_size=None, every=1): self.label = label self.width = width self.hide = hide # Only show bar in terminals by default (better for piping, logging etc.) if hide is None: try: self.hide = not STREAM.isatty() except __HOLE__: # output does not support isatty() self.hide = True self.empty_char = empty_char self.filled_char = filled_char self.expected_size = expected_size self.every = every self.start = time.time() self.ittimes = [] self.eta = 0 self.etadelta = time.time() self.etadisp = self.format_time(self.eta) self.last_progress = 0 if (self.expected_size): self.show(0)
AttributeError
dataset/ETHPy150Open kennethreitz/clint/clint/textui/progress.py/Bar.__init__
def _parseIntegerArgument(args, key, defaultValue): """ Attempts to parse the specified key in the specified argument dictionary into an integer. If the argument cannot be parsed, raises a BadRequestIntegerException. If the key is not present, return the specified default value. """ ret = defaultValue if key in args: try: ret = int(args[key]) except __HOLE__: raise exceptions.BadRequestIntegerException(key, args[key]) return ret
ValueError
dataset/ETHPy150Open ga4gh/server/ga4gh/backend.py/_parseIntegerArgument
def _parsePageToken(pageToken, numValues): """ Parses the specified pageToken and returns a list of the specified number of values. Page tokens are assumed to consist of a fixed number of integers seperated by colons. If the page token does not conform to this specification, raise a InvalidPageToken exception. """ tokens = pageToken.split(":") if len(tokens) != numValues: msg = "Invalid number of values in page token" raise exceptions.BadPageTokenException(msg) try: values = map(int, tokens) except __HOLE__: msg = "Malformed integers in page token" raise exceptions.BadPageTokenException(msg) return values
ValueError
dataset/ETHPy150Open ga4gh/server/ga4gh/backend.py/_parsePageToken
def variantAnnotationSetsGenerator(self, request): """ Returns a generator over the (variantAnnotationSet, nextPageToken) pairs defined by the specified request. """ compoundId = datamodel.VariantSetCompoundId.parse(request.variantSetId) dataset = self.getDataRepository().getDataset(compoundId.datasetId) results = [] for annset in dataset.getVariantAnnotationSets(): try: variantSetId = request.variantSetId except __HOLE__: variantSetId = "" if str(annset._variantSetId) == str(variantSetId): results.append(annset) return self._objectListGenerator(request, results)
ValueError
dataset/ETHPy150Open ga4gh/server/ga4gh/backend.py/Backend.variantAnnotationSetsGenerator
def runSearchRequest( self, requestStr, requestClass, responseClass, objectGenerator): """ Runs the specified request. The request is a string containing a JSON representation of an instance of the specified requestClass. We return a string representation of an instance of the specified responseClass in JSON format. Objects are filled into the page list using the specified object generator, which must return (object, nextPageToken) pairs, and be able to resume iteration from any point using the nextPageToken attribute of the request object. """ self.startProfile() try: requestDict = json.loads(requestStr) except __HOLE__: raise exceptions.InvalidJsonException(requestStr) self.validateRequest(requestDict, requestClass) request = requestClass.fromJsonDict(requestDict) if request.pageSize is None: request.pageSize = self._defaultPageSize if request.pageSize <= 0: raise exceptions.BadPageSizeException(request.pageSize) responseBuilder = protocol.SearchResponseBuilder( responseClass, request.pageSize, self._maxResponseLength) nextPageToken = None for obj, nextPageToken in objectGenerator(request): responseBuilder.addValue(obj) if responseBuilder.isFull(): break responseBuilder.setNextPageToken(nextPageToken) responseString = responseBuilder.getJsonString() self.validateResponse(responseString, responseClass) self.endProfile() return responseString
ValueError
dataset/ETHPy150Open ga4gh/server/ga4gh/backend.py/Backend.runSearchRequest
def runtests(*test_args): if not test_args: test_args = [app_to_test] parent = dirname(abspath(__file__)) sys.path.insert(0, parent) TestRunner = get_runner(settings) test_runner = TestRunner(verbosity=1, interactive=True) try: from django import setup setup() except __HOLE__: pass failures = test_runner.run_tests(test_args) sys.exit(failures)
ImportError
dataset/ETHPy150Open django-de/django-simple-ratings/runtests.py/runtests
@view_config(renderer='new_page.mak', route_name='new_page') @view_config(renderer='new_post.mak', route_name='new_post') def submit(request): s = request.session p = request.session['safe_post'] r = request qs = s['safe_get'] s['message'] = "Post a story." dbsession = DBSession() stories = None sections = section_queries.get_sections() new_url_text = '' new_title_text = '' route_name = r.matched_route.name if route_name == 'new_page': # require admin to load a new page form if 'logged_in_admin' not in s or s['logged_in_admin'] == False: return HTTPNotFound() #if uses came in with a share button, redirect to existing discussion if there is one if 'from' in qs and qs['from'] == 'button': existing_post = submission.get_story_by_url_oldest(qs['url']) if existing_post: return HTTPFound(r.route_url('full', sub_id=existing_post.id)) new_url_text = qs['url'] if 'title' in qs: new_title_text = qs['title'] if 'logged_in' not in s: s['message'] = 'Sorry, you must <a href="{0}">log in</a> before you can share a link.'.format(r.route_url('login')) return {'stories': [], 'success': False, 'code': 'ENOLOGIN'} if p and 'title' in p: if 'logged_in' not in s: s['message'] = 'Sorry, please log in first' return {'stories': [], 'success': False, 'code': 'ENOLOGIN'} if 'section_id' not in p or p['section_id'] == '': return {'stories': [], 'success': False, 'code': 'ENOSECTION'} if 'url' in p and p['url'] != '' and p['url'] is not None: p['url'] = general.strip_all_html(p['url']) if not re.match(r'http[s]*:\/\/', p['url']): p['url'] = 'http://' + p['url'] else: # set to None so that NULL goes into the database p['url'] = None if route_name == 'new_page': render_type = p['render_type'] slug = p['slug'] # if we can find this slug already, kill submission here. try: s = dbsession.query(Submission).filter(Submission.slug == slug).one() s['message'] = 'This slug is already taken.' success = False except sqlalchemy.orm.exc.NoResultFound: pass else: slug = '' render_type = 'story_md' if 'section_id' in p: sub = Submission(p['title'][:100], p['description'], p['url'], s['users.id'], section = p['section_id']) else: sub = Submission(p['title'][:100], p['description'], p['url'], s['users.id']) sub.render_type = render_type # slug octet no longer derived from story's actual id if slug == '': slug = u"{title}-{uuid_first_octet}".format( title = slugify.slugify(unicode(p['title'][:100])), uuid_first_octet = str(general.gen_uuid())[:8]) sub.slug = slug dbsession.add(sub) dbsession.flush() # add notify if general.check_notify_default(s['users.id'], r): notify_queries.create_notify(s['users.id'], sub.id, s['users.id']) v = Vote(sub.id, s['users.id'], 1, "submission", None) v.direction = 1 dbsession.add(v) s['message'] = "Added." try: if request.registry.solr_conn: # we flush here to ensure we have a vaild id object when added to solr # we use this if statement so that the exception will be raised before # dbsession is flushed, hence avoiding an unnecessary flush if the site # is not using solr. dbsession.flush() request.registry.solr_conn.add({'id': sub.id, 'title': sub.title, 'description': sub.description}) request.registry.solr_conn.commit() except __HOLE__: #solr is not configured for this connection pass return HTTPFound(r.route_url('home')) return {'stories': stories, 'success': True, 'code': 0, 'new_url_text': new_url_text, 'new_title_text': new_title_text, 'sections': sections}
AttributeError
dataset/ETHPy150Open sjuxax/raggregate/raggregate/views/submission.py/submit
def test_config(self): config = ssh_schemas.RosterEntryConfig() expected = { '$schema': 'http://json-schema.org/draft-04/schema#', 'title': 'Roster Entry', 'description': 'Salt SSH roster entry definition', 'type': 'object', 'properties': { 'host': { 'title': 'Host', 'description': 'The IP address or DNS name of the remote host', 'type': 'string', 'pattern': r'^((\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([A-Za-z0-9][A-Za-z0-9\.\-]{1,255}))$', 'minLength': 1 }, 'port': { 'description': 'The target system\'s ssh port number', 'title': 'Port', 'default': 22, 'maximum': 65535, 'minimum': 0, 'type': 'integer' }, 'user': { 'default': 'root', 'type': 'string', 'description': 'The user to log in as. Defaults to root', 'title': 'User', 'minLength': 1 }, 'passwd': { 'title': 'Password', 'type': 'string', 'description': 'The password to log in with', 'format': 'secret', 'minLength': 1 }, 'priv': { 'type': 'string', 'description': 'File path to ssh private key, defaults to salt-ssh.rsa', 'title': 'Private Key', 'minLength': 1 }, 'sudo': { 'default': False, 'type': 'boolean', 'description': 'run command via sudo. Defaults to False', 'title': 'Sudo' }, 'timeout': { 'type': 'integer', 'description': 'Number of seconds to wait for response when establishing an SSH connection', 'title': 'Timeout' }, 'thin_dir': { 'type': 'string', 'description': 'The target system\'s storage directory for Salt components. Defaults to /tmp/salt-<hash>.', 'title': 'Thin Directory' }, # The actuall representation of the minion options would make this HUGE! 'minion_opts': ssh_schemas.DictItem(title='Minion Options', description='Dictionary of minion options', properties=MinionConfiguration()).serialize(), }, 'anyOf': [ { 'required': [ 'passwd' ] }, { 'required': [ 'priv' ] } ], 'required': [ 'host', 'user', ], 'x-ordering': [ 'host', 'port', 'user', 'passwd', 'priv', 'sudo', 'timeout', 'thin_dir', 'minion_opts' ], 'additionalProperties': False } try: self.assertDictContainsSubset(expected['properties'], config.serialize()['properties']) self.assertDictContainsSubset(expected, config.serialize()) except __HOLE__: import json print(json.dumps(config.serialize(), indent=4)) raise
AssertionError
dataset/ETHPy150Open saltstack/salt/tests/unit/config/schemas/ssh_test.py/RoosterEntryConfigTest.test_config
def test_roster_config(self): try: self.assertDictContainsSubset( { "$schema": "http://json-schema.org/draft-04/schema#", "title": "Roster Configuration", "description": "Roster entries definition", "type": "object", "patternProperties": { r"^([^:]+)$": ssh_schemas.RosterEntryConfig.serialize() }, "additionalProperties": False }, ssh_schemas.RosterItem.serialize() ) except __HOLE__: import json print(json.dumps(ssh_schemas.RosterItem.serialize(), indent=4)) raise
AssertionError
dataset/ETHPy150Open saltstack/salt/tests/unit/config/schemas/ssh_test.py/RosterItemTest.test_roster_config
def _serialize_context(context): # Our sending format is made up of two messages. The first has a # quick to unpack set of meta data that our collector is going to # use for routing and stats. This is much faster than having the # collector decode the whole event. We're just going to use python # struct module to make a quick and dirty data structure context_dict = context.to_dict() for key in ('host', 'type'): if len(context_dict.get(key, "")) > 64: raise ValueError("Value too long: %r" % key) meta_data = struct.pack(META_STRUCT_FMT, META_STRUCT_VERSION, context_dict['end'], context_dict['host'], context_dict['type']) try: context_data = msgpack.packb(context_dict) except __HOLE__: try: # If we fail to serialize our context, we can try again with an # enhanced packer (it's slower though) context_data = msgpack.packb(context_dict, default=utils.msgpack_encode_default) except TypeError: log.exception("Serialization failure (not fatal, dropping data)") # One last try after dropping the body context_dict['body'] = None context_data = msgpack.packb(context_dict) return meta_data, context_data
TypeError
dataset/ETHPy150Open rhettg/BlueOx/blueox/network.py/_serialize_context
def merge(length, *sources): """Merge lists of lists. Each source produces (or contains) lists of ordered items. Items of each list must be greater or equal to all items of the previous list (that implies that items must be comparable). The function merges the sources into lists with the length equal to given one, except the last list which can be shorter. Example: it1 = iter([[1, 3, 5], [5, 7, 9, 14], [17, 21, 36, 41]]) it2 = iter([[2, 2, 4], [9, 10], [16, 19, 23, 26, 91]]) it3 = iter([[5], [5, 7, 11, 14, 14, 19, 23]]) it = merge(10, it1, it2, it3) for i in it: print i prints out: [1, 2, 2, 3, 4, 5, 5, 5, 5, 7, 7, 9, 9, 10] [11, 14, 14, 14, 16, 17, 19, 19, 21, 23, 23] [26, 36, 41, 91] :param: length, length of generated lists, except the last one. :param: sources, generators that produce lists of items to merge """ streams = [ {"data": [], "gen": src} for src in sources] out_chunk = [] while True: while len(out_chunk) < length: # Least right item among streams lri = None # Refresh data if needed for s in streams: if s["gen"] and not s["data"]: try: while not s["data"]: s["data"] = next(s["gen"]) except __HOLE__: s["gen"] = None # ... and define least right item if s["data"]: rightmost_item = s["data"][-1] if (lri is None) or (rightmost_item < lri): lri = rightmost_item # No more data to merge if lri is None: break to_merge = [] for s in streams: if s["data"]: pos = bisect.bisect_right(s["data"], lri) to_merge.append(s["data"][:pos]) s["data"] = s["data"][pos:] out_chunk += heapq.merge(*to_merge) if out_chunk: if len(out_chunk) > length: yield out_chunk[:length] out_chunk = out_chunk[length:] else: yield out_chunk out_chunk = [] else: return
StopIteration
dataset/ETHPy150Open openstack/rally/rally/common/utils.py/merge
def timeout_thread(queue): """Terminate threads by timeout. Function need to be run in separate thread. Its designed to terminate threads which are running longer then timeout. Parent thread will put tuples (thread_ident, deadline) in the queue, where `thread_ident` is Thread.ident value of thread to watch, and `deadline` is timestamp when thread should be terminated. Also tuple (None, None) should be put when all threads are exited and no more threads to watch. :param queue: Queue object to communicate with parent thread. """ all_threads = collections.deque() while True: if not all_threads: timeout = None else: thread_ident, deadline = all_threads[0] timeout = deadline - time.time() try: next_thread = queue.get(timeout=timeout) all_threads.append(next_thread) except (moves.queue.Empty, __HOLE__): # NOTE(rvasilets) Empty means that timeout was occurred. # ValueError means that timeout lower then 0. LOG.info("Thread %s is timed out. Terminating." % thread_ident) terminate_thread(thread_ident) all_threads.popleft() if next_thread == (None, None,): return
ValueError
dataset/ETHPy150Open openstack/rally/rally/common/utils.py/timeout_thread
def islink(path): """Test whether a path is a symbolic link""" try: st = os.lstat(path) except (os.error, __HOLE__): return False return stat.S_ISLNK(st.st_mode)
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/javapath.py/islink
def expandvars(path): """Expand shell variables of form $var and ${var}. Unknown variables are left unchanged.""" if '$' not in path: return path import string varchars = string.letters + string.digits + '_-' res = '' index = 0 pathlen = len(path) while index < pathlen: c = path[index] if c == '\'': # no expansion within single quotes path = path[index + 1:] pathlen = len(path) try: index = path.index('\'') res = res + '\'' + path[:index + 1] except __HOLE__: res = res + path index = pathlen - 1 elif c == '$': # variable or '$$' if path[index + 1:index + 2] == '$': res = res + c index = index + 1 elif path[index + 1:index + 2] == '{': path = path[index+2:] pathlen = len(path) try: index = path.index('}') var = path[:index] if os.environ.has_key(var): res = res + os.environ[var] except ValueError: res = res + path index = pathlen - 1 else: var = '' index = index + 1 c = path[index:index + 1] while c != '' and c in varchars: var = var + c index = index + 1 c = path[index:index + 1] if os.environ.has_key(var): res = res + os.environ[var] if c != '': res = res + c else: res = res + c index = index + 1 return res
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/javapath.py/expandvars
def get_mod_func(callback): """Convert a fully-qualified module.function name to (module, function) - stolen from Django""" try: dot = callback.rindex('.') except __HOLE__: return (callback, '') return (callback[:dot], callback[dot+1:])
ValueError
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/get_mod_func
def get_callable_from_string(f_name): """Takes a string containing a function name (optionally module qualified) and returns a callable object""" try: mod_name, func_name = get_mod_func(f_name) if mod_name == "" and func_name == "": raise AttributeError("%s couldn't be converted to a module or function name" % f_name) module = __import__(mod_name) if func_name == "": func_name = mod_name # The common case is an eponymous class return getattr(module, func_name) except (ImportError, __HOLE__), exc: raise RuntimeError("Unable to create a callable object for '%s': %s" % (f_name, exc))
AttributeError
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/get_callable_from_string
def load_config(options): """Load our configuration from plist or create a default file if none exists""" if not os.path.exists(options.config_file): logging.info("%s does not exist - initializing with an example configuration" % CRANKD_OPTIONS.config_file) print >>sys.stderr, 'Creating %s with default options for you to customize' % options.config_file print >>sys.stderr, '%s --list-events will list the events you can monitor on this system' % sys.argv[0] example_config = { 'SystemConfiguration': { 'State:/Network/Global/IPv4': { 'command': '/bin/echo "Global IPv4 config changed"' } }, 'NSWorkspace': { 'NSWorkspaceDidMountNotification': { 'command': '/bin/echo "A new volume was mounted!"' }, 'NSWorkspaceDidWakeNotification': { 'command': '/bin/echo "The system woke from sleep!"' }, 'NSWorkspaceWillSleepNotification': { 'command': '/bin/echo "The system is about to go to sleep!"' } } } writePlist(example_config, options.config_file) sys.exit(1) logging.info("Loading configuration from %s" % CRANKD_OPTIONS.config_file) plist = readPlist(options.config_file) if "imports" in plist: for module in plist['imports']: try: __import__(module) except __HOLE__, exc: print >> sys.stderr, "Unable to import %s: %s" % (module, exc) sys.exit(1) return plist
ImportError
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/load_config
def add_sc_notifications(sc_config): """ This uses the SystemConfiguration framework to get a SCDynamicStore session and register for certain events. See the Apple SystemConfiguration documentation for details: <http://developer.apple.com/documentation/Networking/Reference/SysConfig/SCDynamicStore/CompositePage.html> TN1145 may also be of interest: <http://developer.apple.com/technotes/tn/tn1145.html> Inspired by the PyObjC SystemConfiguration callback demos: <https://svn.red-bean.com/pyobjc/trunk/pyobjc/pyobjc-framework-SystemConfiguration/Examples/CallbackDemo/> """ keys = sc_config.keys() try: for key in keys: SC_HANDLERS[key] = get_callable_for_event(key, sc_config[key], context="SystemConfiguration: %s" % key) except __HOLE__, exc: print >> sys.stderr, "Error configuring SystemConfiguration events: %s" % exc sys.exit(1) store = get_sc_store() SCDynamicStoreSetNotificationKeys(store, None, keys) # Get a CFRunLoopSource for our store session and add it to the application's runloop: CFRunLoopAddSource( NSRunLoop.currentRunLoop().getCFRunLoop(), SCDynamicStoreCreateRunLoopSource(None, store, 0), kCFRunLoopCommonModes ) log_list("Listening for these SystemConfiguration events: %s", keys)
AttributeError
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/add_sc_notifications
def add_fs_notification(f_path, callback): """Adds an FSEvent notification for the specified path""" path = os.path.realpath(os.path.expanduser(f_path)) if not os.path.exists(path): raise AttributeError("Cannot add an FSEvent notification: %s does not exist!" % path) if not os.path.isdir(path): path = os.path.dirname(path) try: FS_WATCHED_FILES[path].append(callback) except __HOLE__: FS_WATCHED_FILES[path] = [callback]
KeyError
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/add_fs_notification
def main(): configure_logging() global CRANKD_OPTIONS, CRANKD_CONFIG CRANKD_OPTIONS = process_commandline() CRANKD_CONFIG = load_config(CRANKD_OPTIONS) if "NSWorkspace" in CRANKD_CONFIG: add_workspace_notifications(CRANKD_CONFIG['NSWorkspace']) if "SystemConfiguration" in CRANKD_CONFIG: add_sc_notifications(CRANKD_CONFIG['SystemConfiguration']) if "FSEvents" in CRANKD_CONFIG: add_fs_notifications(CRANKD_CONFIG['FSEvents']) # We reuse our FSEvents code to watch for changes to our files and # restart if any of our libraries have been updated: add_conditional_restart(CRANKD_OPTIONS.config_file, "Configuration file %s changed" % CRANKD_OPTIONS.config_file) for m in filter(lambda i: i and hasattr(i, '__file__'), sys.modules.values()): if m.__name__ == "__main__": msg = "%s was updated" % m.__file__ else: msg = "Module %s was updated" % m.__name__ add_conditional_restart(m.__file__, msg) signal.signal(signal.SIGHUP, partial(restart, "SIGHUP received")) start_fs_events() # NOTE: This timer is basically a kludge around the fact that we can't reliably get # signals or Control-C inside a runloop. This wakes us up often enough to # appear tolerably responsive: CFRunLoopAddTimer( NSRunLoop.currentRunLoop().getCFRunLoop(), CFRunLoopTimerCreate(None, CFAbsoluteTimeGetCurrent(), 2.0, 0, 0, timer_callback, None), kCFRunLoopCommonModes ) try: AppHelper.runConsoleEventLoop(installInterrupt=True) except __HOLE__: logging.info("KeyboardInterrupt received, exiting") sys.exit(0)
KeyboardInterrupt
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/main
def do_shell(command, context=None, **kwargs): """Executes a shell command with logging""" logging.info("%s: executing %s" % (context, command)) child_env = {'CRANKD_CONTEXT': context} # We'll pull a subset of the available information in for shell scripts. # Anyone who needs more will probably want to write a Python handler # instead so they can reuse things like our logger & config info and avoid # ordeals like associative arrays in Bash for k in [ 'info', 'key' ]: if k in kwargs and kwargs[k]: child_env['CRANKD_%s' % k.upper()] = str(kwargs[k]) user_info = kwargs.get("user_info") if user_info: for k, v in user_info.items(): child_env[create_env_name(k)] = str(v) try: rc = call(command, shell=True, env=child_env) if rc == 0: logging.debug("`%s` returned %d" % (command, rc)) elif rc < 0: logging.error("`%s` was terminated by signal %d" % (command, -rc)) else: logging.error("`%s` returned %d" % (command, rc)) except __HOLE__, exc: logging.error("Got an exception when executing %s:" % (command, exc))
OSError
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/do_shell
def add_conditional_restart(file_name, reason): """FSEvents monitors directories, not files. This function uses stat to restart only if the file's mtime has changed""" file_name = os.path.realpath(file_name) while not os.path.exists(file_name): file_name = os.path.dirname(file_name) orig_stat = os.stat(file_name).st_mtime def cond_restart(*args, **kwargs): try: if os.stat(file_name).st_mtime != orig_stat: restart(reason) except (OSError, __HOLE__, RuntimeError), exc: restart("Exception while checking %s: %s" % (file_name, exc)) add_fs_notification(file_name, cond_restart)
IOError
dataset/ETHPy150Open nigelkersten/pymacadmin/bin/crankd.py/add_conditional_restart
def __getattr__(self, name): try: return getattr(self.obj, name) except __HOLE__: return self.obj.get_tag(name)
AttributeError
dataset/ETHPy150Open pcapriotti/pledger/pledger/filter.py/SmartWrapper.__getattr__
def get_index_path(self): """ Returns the index path. Raises ImproperlyConfigured if the path is not set in the settings. """ try: return settings.SEARCH_INDEX except __HOLE__: raise ImproperlyConfigured("Set SEARCH_INDEX into your settings.")
AttributeError
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/utils/search.py/Index.get_index_path
def get_or_create_index(self): """ Returns an index object. Creats the index if it does not exist """ # Try to return a storage object that was created before. try: return self.storage except __HOLE__: pass path = self.get_index_path() if path != 'ram' and exists_in(path): return open_dir(path) return self.create_index()
AttributeError
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/utils/search.py/Index.get_or_create_index
def index_add_instance(sender, instance, **kwargs): """ Receiver that should be called by the post_save signal and the m2m_changed signal. If the instance has an method get_search_string, then it is written into the search index. The method has to return an dictonary that can be used as keyword arguments to writer.add_document. """ # TODO: This method blocks the search index. So in a multi thread environment # this method can raise whoosh.store.LockError. Therefore it has to # be done in tornado to support the big mode. # See: https://pythonhosted.org/Whoosh/indexing.html#indexing-documents try: get_search_index_string = instance.get_search_index_string except __HOLE__: # If the instance is not searchable, then exit this signal early. return created = kwargs.get('created', False) writer_kwargs = { 'id_collection': combine_id_and_collection(instance), 'id': str(instance.pk), 'collection': instance.get_collection_string(), 'content': get_search_index_string()} with index.get_or_create_index().writer() as writer: if created: writer.add_document(**writer_kwargs) else: writer.update_document(**writer_kwargs)
AttributeError
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/utils/search.py/index_add_instance
def index_del_instance(sender, instance, **kwargs): """ Like index_add_instance but deletes the instance from the index. Should be called by the post_delete signal. """ try: # Try to get the arrribute get_search_attributes. It is not needed # in this method (and therefore not called) but it tells us if the # instance is searchable. instance.get_search_index_string except __HOLE__: # If the instance is not searchable, then exit this signal early. return with index.get_or_create_index().writer() as writer: writer.delete_by_term('id_collection', combine_id_and_collection(instance))
AttributeError
dataset/ETHPy150Open OpenSlides/OpenSlides/openslides/utils/search.py/index_del_instance
def remove_artists(artists): """ Remove a collection of matplotlib artists from a scene :param artists: Container of artists """ for a in artists: try: a.remove() except __HOLE__: # already removed pass
ValueError
dataset/ETHPy150Open glue-viz/glue/glue/utils/matplotlib.py/remove_artists
def point_contour(x, y, data): """Calculate the contour that passes through (x,y) in data :param x: x location :param y: y location :param data: 2D image :type data: :class:`numpy.ndarray` Returns: * A (nrow, 2column) numpy array. The two columns give the x and y locations of the contour vertices """ try: from scipy import ndimage except __HOLE__: raise ImportError("Image processing in Glue requires SciPy") inten = data[y, x] labeled, nr_objects = ndimage.label(data >= inten) z = data * (labeled == labeled[y, x]) y, x = np.mgrid[0:data.shape[0], 0:data.shape[1]] from matplotlib import _cntr cnt = _cntr.Cntr(x, y, z) xy = cnt.trace(inten) if not xy: return None xy = xy[0] return xy
ImportError
dataset/ETHPy150Open glue-viz/glue/glue/utils/matplotlib.py/point_contour
def CheckFlow(self): urn = self.client_id.Add("/fs/tsk") fd = aff4.FACTORY.Open(urn, mode="r", token=self.token) volumes = list(fd.OpenChildren()) found = False for volume in volumes: file_urn = volume.urn.Add("Windows/regedit.exe") fd = aff4.FACTORY.Open(file_urn, mode="r", token=self.token) try: data = fd.Read(10) if data[:2] == "MZ": found = True self.delete_urns.add(file_urn) break except __HOLE__: # If the file does not exist on this volume, Open returns a aff4volume # which does not have a Read method. pass self.assertTrue(found)
AttributeError
dataset/ETHPy150Open google/grr/grr/endtoend_tests/transfer.py/TestGetFileTSKWindows.CheckFlow
def _get_field_stats_dates(self, field='@timestamp'): """ Add indices to `index_info` based on the value the stats api returns, as determined by `field` :arg field: The field with the date value. The field must be mapped in elasticsearch as a date datatype. Default: ``@timestamp`` """ self.loggit.debug('Getting index date from field_stats API') self.loggit.info( 'Cannot use field_stats on closed indices. ' 'Omitting any closed indices.' ) self.filter_closed() index_lists = chunk_index_list(self.indices) for l in index_lists: working_list = self.client.field_stats( index=to_csv(l), fields=field, level='indices' )['indices'] if working_list: for index in list(working_list.keys()): try: s = self.index_info[index]['age'] wl = working_list[index]['fields'][field] # Use these new references to keep these lines more # readable s['min_value'] = fix_epoch(wl['min_value']) s['max_value'] = fix_epoch(wl['max_value']) except __HOLE__ as e: raise ActionError( 'Field "{0}" not found in index ' '"{1}"'.format(field, index) )
KeyError
dataset/ETHPy150Open elastic/curator/curator/indexlist.py/IndexList._get_field_stats_dates
def filter_by_age(self, source='name', direction=None, timestring=None, unit=None, unit_count=None, field=None, stats_result='min_value', epoch=None, exclude=False, ): """ Match `indices` by relative age calculations. :arg source: Source of index age. Can be one of 'name', 'creation_date', or 'field_stats' :arg direction: Time to filter, either ``older`` or ``younger`` :arg timestring: An strftime string to match the datestamp in an index name. Only used for index filtering by ``name``. :arg unit: One of ``seconds``, ``minutes``, ``hours``, ``days``, ``weeks``, ``months``, or ``years``. :arg unit_count: The number of ``unit``s. ``unit_count`` * ``unit`` will be calculated out to the relative number of seconds. :arg field: A timestamp field name. Only used for ``field_stats`` based calculations. :arg stats_result: Either `min_value` or `max_value`. Only used in conjunction with `source`=``field_stats`` to choose whether to reference the minimum or maximum result value. :arg epoch: An epoch timestamp used in conjunction with ``unit`` and ``unit_count`` to establish a point of reference for calculations. If not provided, the current time will be used. :arg exclude: If `exclude` is `True`, this filter will remove matching indices from `indices`. If `exclude` is `False`, then only matching indices will be kept in `indices`. Default is `False` """ self.loggit.debug('Filtering indices by age') # Get timestamp point of reference, PoR PoR = get_point_of_reference(unit, unit_count, epoch) keyfield = source if not direction: raise MissingArgument('Must provide a value for "direction"') if direction not in ['older', 'younger']: raise ValueError( 'Invalid value for "direction": {0}'.format(direction) ) if source == 'name': if not timestring: raise MissingArgument( 'source "name" requires the "timestamp" keyword argument' ) self._get_name_based_ages(timestring) elif source == 'creation_date': # Nothing to do here as this comes from `get_metadata` in __init__ pass elif source == 'field_stats': if not field: raise MissingArgument( 'source "field_stats" requires the "field" keyword argument' ) if stats_result not in ['min_value', 'max_value']: raise ValueError( 'Invalid value for "stats_result": {0}'.format(stats_result) ) keyfield = stats_result self._get_field_stats_dates(field=field) else: raise ValueError( 'Invalid source: {0}. ' 'Must be one of "name", ' '"creation_date", "field_stats".'.format(source) ) for index in self.working_list(): try: msg = ( 'Index "{0}" age ({1}), direction: "{2}", point of ' 'reference, ({3})'.format( index, int(self.index_info[index]['age'][keyfield]), direction, PoR ) ) # Because time adds to epoch, smaller numbers are actually older # timestamps. if direction == 'older': agetest = self.index_info[index]['age'][keyfield] < PoR else: agetest = self.index_info[index]['age'][keyfield] > PoR self.__excludify(agetest, exclude, index, msg) except __HOLE__: self.loggit.info( 'Index "{0}" does not meet provided criteria. ' 'Removing from list.'.format(index, source)) self.indices.remove(index)
KeyError
dataset/ETHPy150Open elastic/curator/curator/indexlist.py/IndexList.filter_by_age
def filter_allocated(self, key=None, value=None, allocation_type='require', exclude=True, ): """ Match indices that have the routing allocation rule of `key=value` from `indices` :arg key: The allocation attribute to check for :arg value: The value to check for :arg allocation_type: Type of allocation to apply :arg exclude: If `exclude` is `True`, this filter will remove matching indices from `indices`. If `exclude` is `False`, then only matching indices will be kept in `indices`. Default is `True` """ self.loggit.debug( 'Filtering indices with shard routing allocation rules') if not key: raise MissingArgument('No value for "key" provided') if not value: raise MissingArgument('No value for "value" provided') if not allocation_type in ['include', 'exclude', 'require']: raise ValueError( 'Invalid "allocation_type": {0}'.format(allocation_type) ) self.empty_list_check() index_lists = chunk_index_list(self.indices) for l in index_lists: working_list = self.client.indices.get_settings(index=to_csv(l)) if working_list: for index in list(working_list.keys()): try: has_routing = ( working_list[index]['settings']['index']['routing']['allocation'][allocation_type][key] == value ) except __HOLE__: has_routing = False # if has_routing: msg = ( '{0}: Routing (mis)match: ' 'index.routing.allocation.{1}.{2}={3}.'.format( index, allocation_type, key, value ) ) # self.indices.remove(index) self.__excludify(has_routing, exclude, index, msg)
KeyError
dataset/ETHPy150Open elastic/curator/curator/indexlist.py/IndexList.filter_allocated
def testRun(self): db = DAL(DEFAULT_URI, check_reserved=['all']) db.define_table('person', Field('name', default="Michael"),Field('uuid')) db.define_table('pet',Field('friend',db.person),Field('name')) dbdict = db.as_dict(flat=True, sanitize=False) assert isinstance(dbdict, dict) uri = dbdict["uri"] assert isinstance(uri, basestring) and uri assert len(dbdict["tables"]) == 2 assert len(dbdict["tables"][0]["fields"]) == 3 assert dbdict["tables"][0]["fields"][1]["type"] == db.person.name.type assert dbdict["tables"][0]["fields"][1]["default"] == db.person.name.default db2 = DAL(**dbdict) assert len(db.tables) == len(db2.tables) assert hasattr(db2, "pet") and isinstance(db2.pet, Table) assert hasattr(db2.pet, "friend") and isinstance(db2.pet.friend, Field) db.pet.drop() db.commit() db2.commit() have_serializers = True try: import serializers dbjson = db.as_json(sanitize=False) assert isinstance(dbjson, basestring) and len(dbjson) > 0 unicode_keys = True if sys.version < "2.6.5": unicode_keys = False db3 = DAL(**serializers.loads_json(dbjson, unicode_keys=unicode_keys)) assert hasattr(db3, "person") and hasattr(db3.person, "uuid") and\ db3.person.uuid.type == db.person.uuid.type db3.person.drop() db3.commit() except __HOLE__: pass mpfc = "Monty Python's Flying Circus" dbdict4 = {"uri": DEFAULT_URI, "tables":[{"tablename": "tvshow", "fields": [{"fieldname": "name", "default":mpfc}, {"fieldname": "rating", "type":"double"}]}, {"tablename": "staff", "fields": [{"fieldname": "name", "default":"Michael"}, {"fieldname": "food", "default":"Spam"}, {"fieldname": "tvshow", "type": "reference tvshow"}]}]} db4 = DAL(**dbdict4) assert "staff" in db4.tables assert "name" in db4.staff assert db4.tvshow.rating.type == "double" assert (db4.tvshow.insert(), db4.tvshow.insert(name="Loriot"), db4.tvshow.insert(name="Il Mattatore")) == (1, 2, 3) assert db4(db4.tvshow).select().first().id == 1 assert db4(db4.tvshow).select().first().name == mpfc db4.staff.drop() db4.tvshow.drop() db4.commit() dbdict5 = {"uri": DEFAULT_URI} db5 = DAL(**dbdict5) assert db5.tables in ([], None) assert not (str(db5) in ("", None)) dbdict6 = {"uri": DEFAULT_URI, "tables":[{"tablename": "staff"}, {"tablename": "tvshow", "fields": [{"fieldname": "name"}, {"fieldname": "rating", "type":"double"} ] }] } db6 = DAL(**dbdict6) assert len(db6["staff"].fields) == 1 assert "name" in db6["tvshow"].fields assert db6.staff.insert() is not None assert db6(db6.staff).select().first().id == 1 db6.staff.drop() db6.tvshow.drop() db6.commit()
ImportError
dataset/ETHPy150Open uwdata/termite-visualizations/web2py/gluon/tests/test_dal.py/TestDALDictImportExport.testRun
def validate(self, image_shape, filter_shape, border_mode='valid', subsample=(1, 1), N_image_shape=None, N_filter_shape=None, input=None, filters=None, unroll_batch=None, unroll_kern=None, unroll_patch=None, verify_grad=True, should_raise=False): """ :param image_shape: The constant shape info passed to conv2d. :param filter_shape: The constant shape info passed to conv2d. :param N_image_shape: None(default to image_shape) or tuple of 4 elements with the shape of the input image :param N_filter_shape: None(default to filter_shape) or tuple of 4 elements with the shape of the input filter """ if N_image_shape is None: N_image_shape = [T.get_scalar_constant_value(T. as_tensor_variable(x)) for x in image_shape] if N_filter_shape is None: N_filter_shape = [T.get_scalar_constant_value(T. as_tensor_variable(x)) for x in filter_shape] if input is None: input = self.input if not filters: filters = self.filters ############# THEANO IMPLEMENTATION ############ # we create a symbolic function so that verify_grad can work def sym_conv2d(input, filters): # define theano graph and function input.name = 'input' filters.name = 'filters' rval = conv.conv2d(input, filters, image_shape, filter_shape, border_mode, subsample, unroll_batch=unroll_batch, unroll_kern=unroll_kern, unroll_patch=unroll_patch) rval.name = 'conv_output' return rval output = sym_conv2d(input, filters) output.name = 'conv2d(%s,%s)' % (input.name, filters.name) theano_conv = theano.function([input, filters], output, mode=self.mode) # initialize input and compute result image_data = numpy.random.random(N_image_shape).astype(self.dtype) filter_data = numpy.random.random(N_filter_shape).astype(self.dtype) try: theano_output = theano_conv(image_data, filter_data) except __HOLE__: if not should_raise: raise return else: if should_raise: raise Exception( "ConvOp should have generated an error") ############# REFERENCE IMPLEMENTATION ############ s = 1. orig_image_data = image_data if border_mode is not 'full': s = -1. out_shape2d = numpy.array(N_image_shape[-2:]) +\ s * numpy.array(N_filter_shape[-2:]) - s out_shape2d = numpy.ceil(out_shape2d / numpy.array(subsample)) # avoid numpy deprecation out_shape2d = out_shape2d.astype('int32') out_shape = (N_image_shape[0], N_filter_shape[0]) + tuple(out_shape2d) ref_output = numpy.zeros(out_shape) # loop over output feature maps ref_output.fill(0) if border_mode == 'full': image_data2 = numpy.zeros((N_image_shape[0], N_image_shape[1], N_image_shape[2] + 2 * N_filter_shape[2] - 2, N_image_shape[3] + 2 * N_filter_shape[3] - 2)) image_data2[:, :, N_filter_shape[2] - 1:N_filter_shape[2] - 1 + N_image_shape[2], N_filter_shape[3] - 1:N_filter_shape[3] - 1 + N_image_shape[3]] = image_data image_data = image_data2 N_image_shape = image_data.shape for bb in range(N_image_shape[0]): for nn in range(N_filter_shape[0]): for im0 in range(N_image_shape[1]): filter2d = filter_data[nn, im0, :, :] image2d = image_data[bb, im0, :, :] for row in range(ref_output.shape[2]): irow = row * subsample[0] # image row for col in range(ref_output.shape[3]): icol = col * subsample[1] # image col ref_output[bb, nn, row, col] += (image2d[ irow:irow + N_filter_shape[2], icol:icol + N_filter_shape[3]] * filter2d[::-1, ::-1] ).sum() self.assertTrue(_allclose(theano_output, ref_output)) ############# TEST GRADIENT ############ if verify_grad: utt.verify_grad(sym_conv2d, [orig_image_data, filter_data])
ValueError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tensor/nnet/tests/test_conv.py/TestConv2D.validate
@register.tag def categories_for_slugs(parser, token): """ Usage: {% categories_for_slugs "slug[,slug...]" as varname %} Sets the variable *varname* in the context to a list of categories, given by the list of slugs. Useful if you want to specify a custom list of categories and override the default category listing from satchmo. Example usage:: {% categories_for_slug "hats,boots,accessories" as categories %} <ul> {% for child in categories.child.active %} <li><a href="{{ child.get_absolute_url }}">{{ child.translated_name }}</a></li> {% endfor %} </ul> """ try: # Splitting by None == splitting by spaces. tag_name, arg = token.contents.split(None, 1) except __HOLE__: raise TemplateSyntaxError, "%r tag requires arguments" \ % token.contents.split()[0] m = re.search(r'"([^ "]+)" as (\w+)', arg) if not m: raise TemplateSyntaxError, "%r tag had invalid arguments" \ % tag_name cat_slugs, var = m.groups() cats=[] for cat_slug in cat_slugs.split(','): try: cat = Category.objects.get(slug__iexact=cat_slug) except Category.DoesNotExist: log.warn("No category found for slug: %s", cat_slug) cat = None cats.append(cat) return SetVariableInContextNode(var, cats)
ValueError
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/satchmo_store/shop/templatetags/satchmo_category.py/categories_for_slugs
@register.tag def all_products_for_category(parser, token): """ Usage: 1. {% all_products_for_category as varname %} 2. {% all_products_for_category for slug_var as varname %} 3. {% all_products_for_category for "slug" as varname %} Sets the variable *varname* in the context to a list of all products that are active in *category*, and is equivalent to the result of: category.active_products(include_children=True) where *category* is: 1. the 'category' variable in the context, for usage 1. 2. the instance of Category with the slug in the context variable *slug_var*, for usage 2. 3. the instance of Category with the slug *slug*, for usage 3. """ try: # Splitting by None == splitting by spaces. tag_name, arg = token.contents.split(None, 1) except __HOLE__: raise TemplateSyntaxError, "%r tag requires arguments" \ % token.contents.split()[0] m = re.search(r'(.*?)as (\w+)$', arg) # First, get the varname - the easiest if not m: raise TemplateSyntaxError, "Variable name was not specified for %r tag" \ % tag_name arg, var = m.groups() # Now, try and determine usage case the user wants if not arg: # We're of the first case. return AllProductsNode(var) m = re.search(r'^for (.+?)$', arg.strip()) if not m: raise TemplateSyntaxError, "Invalid arguments for %r tag" \ % tag_name arg = m.group(1) if arg[0] == '"' and arg[-1] == '"': # We're of the third case. cat_var = arg[1:-1] return AllProductsForSlugNode(arg[1:-1], var) elif arg: # We're of the second case. return AllProductsForVariableSlugNode(Variable(arg), var) raise TemplateSyntaxError, "Invalid arguments for %r tag" \ % tag_name
ValueError
dataset/ETHPy150Open dokterbob/satchmo/satchmo/apps/satchmo_store/shop/templatetags/satchmo_category.py/all_products_for_category
@webapi_check_local_site @webapi_login_required @webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED) @webapi_request_fields( allow_unknown=True ) def update(self, request, extra_fields={}, *args, **kwargs): """Updates a per-file diff. This is used solely for updating extra data on a file's diff. The contents of a diff cannot be modified. Extra data can be stored for later lookup by passing ``extra_data.key_name=value``. The ``key_name`` and ``value`` can be any valid strings. Passing a blank ``value`` will remove the key. The ``extra_data.`` prefix is required. """ try: review_request = \ resources.review_request.get_object(request, *args, **kwargs) filediff = self.get_object(request, *args, **kwargs) except __HOLE__: return DOES_NOT_EXIST if not review_request.is_mutable_by(request.user): return self.get_no_access_error(request) if extra_fields: self.import_extra_data(filediff, filediff.extra_data, extra_fields) filediff.save(update_fields=['extra_data']) return 200, { self.item_result_key: filediff, }
ObjectDoesNotExist
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/filediff.py/FileDiffResource.update
def _get_patch(self, request, *args, **kwargs): try: resources.review_request.get_object(request, *args, **kwargs) filediff = self.get_object(request, *args, **kwargs) except __HOLE__: return DOES_NOT_EXIST resp = HttpResponse(filediff.diff, content_type='text/x-patch') filename = '%s.patch' % urllib_quote(filediff.source_file) resp['Content-Disposition'] = 'inline; filename=%s' % filename set_last_modified(resp, filediff.diffset.timestamp) return resp
ObjectDoesNotExist
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/filediff.py/FileDiffResource._get_patch
def _get_diff_data(self, request, mimetype, *args, **kwargs): try: resources.review_request.get_object(request, *args, **kwargs) filediff = self.get_object(request, *args, **kwargs) except __HOLE__: return DOES_NOT_EXIST highlighting = request.GET.get('syntax-highlighting', False) files = get_diff_files(filediff.diffset, filediff, request=request) populate_diff_chunks(files, highlighting, request=request) if not files: # This may not be the right error here. return DOES_NOT_EXIST assert len(files) == 1 f = files[0] payload = { 'diff_data': { 'binary': f['binary'], 'chunks': f['chunks'], 'num_changes': f['num_changes'], 'changed_chunk_indexes': f['changed_chunk_indexes'], 'new_file': f['newfile'], } } # XXX: Kind of a hack. api_format = mimetype.split('+')[-1] resp = WebAPIResponse(request, payload, api_format=api_format) set_last_modified(resp, filediff.diffset.timestamp) return resp
ObjectDoesNotExist
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/filediff.py/FileDiffResource._get_diff_data
def _make_command(f, name, attrs, cls): if isinstance(f, Command): raise TypeError('Attempted to convert a callback into a ' 'command twice.') try: params = f.__click_params__ params.reverse() del f.__click_params__ except __HOLE__: params = [] help = attrs.get('help') if help is None: help = inspect.getdoc(f) if isinstance(help, bytes): help = help.decode('utf-8') else: help = inspect.cleandoc(help) attrs['help'] = help _check_for_unicode_literals() return cls(name=name or f.__name__.lower(), callback=f, params=params, **attrs)
AttributeError
dataset/ETHPy150Open pallets/click/click/decorators.py/_make_command
def version_option(version=None, *param_decls, **attrs): """Adds a ``--version`` option which immediately ends the program printing out the version number. This is implemented as an eager option that prints the version and exits the program in the callback. :param version: the version number to show. If not provided Click attempts an auto discovery via setuptools. :param prog_name: the name of the program (defaults to autodetection) :param message: custom message to show instead of the default (``'%(prog)s, version %(version)s'``) :param others: everything else is forwarded to :func:`option`. """ if version is None: module = sys._getframe(1).f_globals.get('__name__') def decorator(f): prog_name = attrs.pop('prog_name', None) message = attrs.pop('message', '%(prog)s, version %(version)s') def callback(ctx, param, value): if not value or ctx.resilient_parsing: return prog = prog_name if prog is None: prog = ctx.find_root().info_name ver = version if ver is None: try: import pkg_resources except __HOLE__: pass else: for dist in pkg_resources.working_set: scripts = dist.get_entry_map().get('console_scripts') or {} for script_name, entry_point in iteritems(scripts): if entry_point.module_name == module: ver = dist.version break if ver is None: raise RuntimeError('Could not determine version') echo(message % { 'prog': prog, 'version': ver, }, color=ctx.color) ctx.exit() attrs.setdefault('is_flag', True) attrs.setdefault('expose_value', False) attrs.setdefault('is_eager', True) attrs.setdefault('help', 'Show the version and exit.') attrs['callback'] = callback return option(*(param_decls or ('--version',)), **attrs)(f) return decorator
ImportError
dataset/ETHPy150Open pallets/click/click/decorators.py/version_option
@staticmethod def verify(encoded, allowed_users): """ Verify that `encoded` is a valid encoded credentials object and that its public key matches the public key we've already seen, if any. encoded: tuple Encoded credentials. allowed_users: dict Dictionary of users and corresponding public keys allowed access. If None, any user may access. If empty, no user may access. Returns :class:`Credentials` object from `encoded`. """ data, signature, client_creds = encoded key = (data, signature) try: credentials = _VERIFY_CACHE[key] except __HOLE__: credentials = Credentials(encoded) user = credentials.user for cred in _VERIFY_CACHE.values(): if cred.user == user: raise CredentialsError('Public key mismatch for %r' % user) else: _VERIFY_CACHE[key] = credentials if allowed_users is not None: try: pubkey = allowed_users[credentials.user] except KeyError: raise CredentialsError('User %r not in allowed_users' \ % credentials.user) else: if (credentials.public_key.e != pubkey.e) or \ (credentials.public_key.n != pubkey.n): raise CredentialsError('Allowed user mismatch for %r' \ % credentials.user) credentials.client_creds = client_creds credentials.remote = True return credentials
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/rbac.py/Credentials.verify
def get_credentials(): """ Get the current thread's credentials. """ try: return threading.current_thread().credentials except __HOLE__: credentials = Credentials() return set_credentials(credentials)
AttributeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/rbac.py/get_credentials
def remote_access(): """ Return True if the current thread is providing remote access. """ try: creds = threading.current_thread().credentials except __HOLE__: return False else: return creds.remote # For some reason use of a class as a decorator doesn't count as coverage.
AttributeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/rbac.py/remote_access
def need_proxy(meth, result, access_controller): """ Returns True if `result` from `meth` requires a proxy. If no proxy types have been explicitly defined for `meth`, then `access_controller` provides a default set. meth: method. Method to be checked. result: object Result value to be checked. access_controller: :class:`AccessController` Provides default proxy type information. """ try: roles, proxy_role, types, cache = meth._rbac except AttributeError: return False cls = result.__class__ try: return cache[cls] except __HOLE__: # Check if this result class or any base classes are in types. if not types: types = tuple(access_controller.proxy_types) if not types: cache[cls] = False else: cache[cls] = isinstance(result, types) return cache[cls]
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/rbac.py/need_proxy
def get_proxy_credentials(self, meth, credentials): """ If special credentials are needed while executing `meth`, return them, else return `credentials`. meth: method Method to be invoked. credentials: :class:`Credentials` Current credentials in effect. """ try: proxy_role = meth._rbac[1] except AttributeError: raise RoleError('No RBAC for method %s' % meth) if proxy_role: try: proxy_creds = self.credentials_map[proxy_role] except __HOLE__: raise RoleError('No credentials for proxy role %s' % proxy_role) else: proxy_creds.client_creds = credentials.client_creds return proxy_creds else: return credentials
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/rbac.py/AccessController.get_proxy_credentials
def need_proxy(self, obj, attr, res): """ Returns True if `attr` of `obj` whose value is `res` requires a proxy. obj: object Object whose attribute is to be returned. attr: string Name of attribute accessed. res: object Result to be returned. Checks `res` against registered classes to be proxied as well as the proxy registry for `obj.attr`. """ if isinstance(res, tuple(self.proxy_types)): return True key = '%s.%s' % (id(obj), attr) try: return self.attr_proxy_map[key] except __HOLE__: return False
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/rbac.py/AccessController.need_proxy
def check_role(role, meth): """ Verifies that `role` is matched by at least one :mod:`fnmatch`-style pattern in `meth`'s RBAC. Raises :class:`RoleError` if no match is found. role: string Role to be checked. meth: method. Method to be checked. """ try: patterns = meth._rbac[0] except __HOLE__: raise RoleError('No RBAC for function!') for pattern in patterns: if fnmatch.fnmatchcase(role, pattern): return raise RoleError("No access for role '%s'" % role)
AttributeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/rbac.py/check_role
def getunpackers(): """Scans the unpackers dir, finds unpackers and add them to UNPACKERS list. An unpacker will be loaded only if it is a valid python module (name must adhere to naming conventions) and it is not blacklisted (i.e. inserted into BLACKLIST.""" path = __path__ prefix = __name__ + '.' unpackers = [] interface = ['unpack', 'detect', 'PRIORITY'] for _importer, modname, _ispkg in pkgutil.iter_modules(path, prefix): if 'tests' not in modname and modname not in BLACKLIST: try: module = __import__(modname, fromlist=interface) except __HOLE__: raise UnpackingError('Bad unpacker: %s' % modname) else: unpackers.append(module) return sorted(unpackers, key = lambda mod: mod.PRIORITY)
ImportError
dataset/ETHPy150Open JT5D/Alfred-Popclip-Sublime/Sublime Text 2/JsFormat/libs/jsbeautifier/unpackers/__init__.py/getunpackers
def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self._last_ping_time = 0 self.client = client.DatabaseClient(self) try: self.ops = DatabaseOperations() except __HOLE__: self.ops = DatabaseOperations(self)
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/storage/speckle/python/django/backend/base.py/DatabaseWrapper.__init__
def _linux_brshow(br=None): ''' Internal, returns bridges and enslaved interfaces (GNU/Linux - brctl) ''' brctl = _tool_path('brctl') if br: cmd = '{0} show {1}'.format(brctl, br) else: cmd = '{0} show'.format(brctl) brs = {} for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines(): # get rid of first line if line.startswith('bridge name'): continue # get rid of ^\n's vals = line.split() if not vals: continue # bridge name bridge id STP enabled interfaces # br0 8000.e4115bac8ddc no eth0 # foo0 # br1 8000.e4115bac8ddc no eth1 if len(vals) > 1: brname = vals[0] brs[brname] = { 'id': vals[1], 'stp': vals[2], } if len(vals) > 3: brs[brname]['interfaces'] = [vals[3]] if len(vals) == 1 and brname: brs[brname]['interfaces'].append(vals[0]) if br: try: return brs[br] except __HOLE__: return None return brs
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/bridge.py/_linux_brshow
def _netbsd_brshow(br=None): ''' Internal, returns bridges and enslaved interfaces (NetBSD - brconfig) ''' brconfig = _tool_path('brconfig') if br: cmd = '{0} {1}'.format(brconfig, br) else: cmd = '{0} -a'.format(brconfig) brs = {} start_int = False for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines(): if line.startswith('bridge'): start_int = False brname = line.split(':')[0] # on NetBSD, always ^bridge([0-9]+): brs[brname] = { 'interfaces': [], 'stp': 'no' } if 'Interfaces:' in line: start_int = True continue if start_int and brname: m = re.match(r'\s*([a-z0-9]+)\s.*<.*>', line) if m: brs[brname]['interfaces'].append(m.group(1)) if 'STP' in line: brs[brname]['stp'] = 'yes' if br: try: return brs[br] except __HOLE__: return None return brs
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/bridge.py/_netbsd_brshow
def loop(self): """ main game loop. returns the final score. """ pause_key = self.board.PAUSE margins = {'left': 4, 'top': 4, 'bottom': 4} atexit.register(self.showCursor) try: self.hideCursor() while True: self.clearScreen() print(self.__str__(margins=margins)) if self.board.won() or not self.board.canMove(): break m = self.readMove() if (m == pause_key): self.saveBestScore() if self.store(): print("Game successfully saved. " "Resume it with `term2048 --resume`.") return self.score print("An error ocurred while saving your game.") return self.incScore(self.board.move(m)) except __HOLE__: self.saveBestScore() return self.saveBestScore() print('You won!' if self.board.won() else 'Game Over') return self.score
KeyboardInterrupt
dataset/ETHPy150Open bfontaine/term2048/term2048/game.py/Game.loop
def create_or_update_user(params, username): try: user = User.objects.get(username__exact=username) for key, value in iter(params.items()): setattr(user, key, value) except __HOLE__ as e: log.debug(e) log.debug('Try to save user with params: {}'.format(params)) user = User(**params) user.save() return user
ObjectDoesNotExist
dataset/ETHPy150Open 2gis/badger-api/authentication/backend.py/create_or_update_user
def _type(self): if self._type: return self._type if self._app: app = self._app else: try: app = self.tasks[0].type.app except (IndexError, __HOLE__): app = self.body.type.app return app.tasks['celery.chord']
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/core_tasks.py/_type
def provider_method_wrapper(tiid, provider, method_name): # logger.info(u"{:20}: in provider_method_wrapper with {tiid} {provider_name} {method_name} with {aliases}".format( # "wrapper", tiid=tiid, provider_name=provider.provider_name, method_name=method_name, aliases=input_aliases_dict)) product = Product.query.get(tiid) if not product: logger.warning(u"Empty product in provider_run for tiid {tiid}".format( tiid=tiid)) return None input_alias_tuples = product.aliases_for_providers try: method = getattr(provider, method_name) except __HOLE__: provider = ProviderFactory.get_provider(provider) method = getattr(provider, method_name) provider_name = provider.provider_name worker_name = provider_name+"_worker" try: method_response = method(input_alias_tuples) except ProviderError, e: method_response = None logger.info(u"{:20}: **ProviderError {tiid} {method_name} {provider_name}, Exception type {exception_type} {exception_arguments}".format( worker_name, tiid=tiid, provider_name=provider_name.upper(), method_name=method_name.upper(), exception_type=type(e).__name__, exception_arguments=e.args)) ellipses = "" if method_response and len(method_response) >= 50: ellipses = "..." logger.info(u"{:20}: /biblio_print, RETURNED {tiid} {method_name} {provider_name} : {method_response:.50} {ellipses}".format( worker_name, tiid=tiid, method_name=method_name.upper(), provider_name=provider_name.upper(), method_response=method_response, ellipses=ellipses)) add_to_database_if_nonzero(product, method_response, method_name, provider_name) return tiid # last variable is an artifact so it has same call signature as other callbacks
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-webapp/core_tasks.py/provider_method_wrapper
def get_data_dir(): if os.getenv(MEMSQL_LOADER_PATH_ENV, None): target = os.environ[MEMSQL_LOADER_PATH_ENV] else: target = os.path.join(os.path.expanduser("~"), ".memsql-loader") parent = os.path.dirname(target) if not os.path.exists(parent): print("Can't load MemSQL Loader Database. Please ensure that the path '%s' exists." % parent) sys.exit(1) if not os.path.exists(target): try: os.mkdir(target) except __HOLE__: print("Failed to create MemSQL Loader database path: %s" % target) sys.exit(1) return target
OSError
dataset/ETHPy150Open memsql/memsql-loader/memsql_loader/util/paths.py/get_data_dir
@csrf_exempt def proxy_request(request, **kwargs): """ generic view to proxy a request. Args: destination: string, the proxied url prefix: string, the prrefix behind we proxy the path headers: dict, custom HTTP headers no_redirect: boolean, False by default, do not redirect to "/" if no path is given decompress: boolean, False by default. If true the proxy will decompress the source body if it's gzip encoded. filters: list of revproxy.Filter instance Return: HttpResponse instance """ # proxy sid try: cookie_name = settings.REVPROXY_COOKIE except AttributeError: cookie_name = kwargs.get("cookie", "PROXY_SID") sid = request.COOKIES.get(cookie_name) # create a proxy session id only if it's needed so someone using # a cookie based authentification can just reuse this session id. # It can also be the session id from the session middleware. if not sid: sid = uuid.uuid4().hex kwargs['proxy_sid'] = sid # install request filters filters_classes = kwargs.get('filters') if not filters_classes: filters = None else: filters = [] for fclass in filters_classes: # add filter instance fobj = fclass(request, **kwargs) filters.append(fobj) # eventually rewrite request and kwargs if hasattr(fobj, 'setup'): ret = fobj.setup() if ret is not None: try: request, extra_kwargs = ret except __HOLE__: extra_kwargs = ret if extra_kwargs is not None: kwargs.update(extra_kwargs) destination = kwargs.get('destination') prefix = kwargs.get('prefix') headers = kwargs.get('headers') no_redirect = kwargs.get('no_redirect', False) decompress = kwargs.get("decompress", False) path = kwargs.get("path") proxy_sid = kwargs.get('proxy_sid') if path is None: path = request.path if prefix is not None and prefix: path = path.split(prefix, 1)[1] else: if not path and not request.path.endswith("/"): if not no_redirect: qs = request.META["QUERY_STRING"] redirect_url = "%s/" % request.path if qs: redirect_url = "%s?%s" % (redirect_url, qs) return HttpResponsePermanentRedirect(redirect_url) if path: prefix = request.path.rsplit(path, 1)[0] if not path.startswith("/"): path = "/%s" % path base_url = absolute_uri(request, destination) proxied_url = "" if not path: proxied_url = "%s/" % base_url else: proxied_url = "%s%s" % (base_url, path) qs = request.META.get("QUERY_STRING") if qs is not None and qs: proxied_url = "%s?%s" % (proxied_url, qs) # fix headers@ headers = headers or {} for key, value in request.META.iteritems(): if key.startswith('HTTP_'): key = header_name(key) elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'): key = key.replace('_', '-') if not value: continue else: continue # rewrite location if key.lower() != "host" and not is_hop_by_hop(key): headers[key] = value # we forward for headers["X-Forwarded-For"] = request.get_host() # django doesn't understand PUT sadly method = request.method.upper() if method == "PUT": coerce_put_post(request) # do the request try: resp = restkit.request(proxied_url, method=method, body=request.raw_post_data, headers=headers, follow_redirect=True, decompress=decompress, filters=filters) except restkit.RequestFailed, e: msg = getattr(e, 'msg', '') if e.status_int >= 100: resp = e.response body = msg else: return http.HttpResponseBadRequest(msg) body = resp.tee() response = HttpResponse(body, status=resp.status_int) # fix response headers for k, v in resp.headers.items(): kl = k.lower() if is_hop_by_hop(kl): continue if kl == "location": response[k] = rewrite_location(request, prefix, v) elif kl == "content-encoding": if not decompress: response[k] = v else: response[k] = v # save the session response.set_cookie( cookie_name, sid, max_age=None, expires=None, domain=settings.SESSION_COOKIE_DOMAIN, path=settings.SESSION_COOKIE_PATH, secure=True) return response
ValueError
dataset/ETHPy150Open benoitc/dj-revproxy/revproxy/proxy.py/proxy_request
def __getattr__(self, key): try: return self._get(key) except __HOLE__: # Proxy most special vars to config for dict procotol. if key in self._proxies: return getattr(self.config, key) # Otherwise, raise useful AttributeError to follow getattr proto. err = "No attribute or config key found for {0!r}".format(key) attrs = [x for x in dir(self.__class__) if not x.startswith('_')] err += "\n\nValid keys: {0!r}".format( sorted(list(self.config.keys())) ) err += "\n\nValid real attributes: {0!r}".format(attrs) raise AttributeError(err)
KeyError
dataset/ETHPy150Open pyinvoke/invoke/invoke/config.py/DataProxy.__getattr__
def _load_file(self, prefix, absolute=False): # Setup found = "_{0}_found".format(prefix) path = "_{0}_path".format(prefix) data = "_{0}".format(prefix) # Short-circuit if loading appears to have occurred already if getattr(self, found) is not None: return # Moar setup if absolute: absolute_path = getattr(self, path) # None -> expected absolute path but none set, short circuit if absolute_path is None: return paths = [absolute_path] else: path_prefix = getattr(self, "_{0}_prefix".format(prefix)) # Short circuit if loading seems unnecessary (eg for project config # files when not running out of a project) if path_prefix is None: return paths = [ '.'.join((path_prefix, x)) for x in self._file_suffixes ] # Poke 'em for filepath in paths: # Normalize filepath = expanduser(filepath) try: try: type_ = splitext(filepath)[1].lstrip('.') loader = getattr(self, "_load_{0}".format(type_)) except __HOLE__ as e: msg = "Config files of type {0!r} (from file {1!r}) are not supported! Please use one of: {2!r}" # noqa raise UnknownFileType(msg.format( type_, filepath, self._file_suffixes)) # Store data, the path it was found at, and fact that it was # found setattr(self, data, loader(filepath)) setattr(self, path, filepath) setattr(self, found, True) break # Typically means 'no such file', so just note & skip past. except IOError as e: # TODO: is there a better / x-platform way to detect this? if "No such file" in e.strerror: err = "Didn't see any {0}, skipping." debug(err.format(filepath)) else: raise # Still None -> no suffixed paths were found, record this fact if getattr(self, path) is None: setattr(self, found, False)
AttributeError
dataset/ETHPy150Open pyinvoke/invoke/invoke/config.py/Config._load_file
def set_list_value(self, list_, index, value): """Sets the value of `list` specified by `index` to the given `value`. Index '0' means the first position, '1' the second and so on. Similarly, '-1' is the last position, '-2' second last, and so on. Using an index that does not exist on the list causes an error. The index can be either an integer or a string that can be converted to an integer. Example: | Set List Value | ${L3} | 1 | xxx | | Set List Value | ${L3} | -1 | yyy | => - ${L3} = ['a', 'xxx', 'yyy'] """ try: list_[self._index_to_int(index)] = value except __HOLE__: self._index_error(list_, index)
IndexError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_List.set_list_value
def remove_from_list(self, list_, index): """Removes and returns the value specified with an `index` from `list`. Index '0' means the first position, '1' the second and so on. Similarly, '-1' is the last position, '-2' the second last, and so on. Using an index that does not exist on the list causes an error. The index can be either an integer or a string that can be converted to an integer. Example: | ${x} = | Remove From List | ${L2} | 0 | => - ${x} = 'a' - ${L2} = ['b'] """ try: return list_.pop(self._index_to_int(index)) except __HOLE__: self._index_error(list_, index)
IndexError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_List.remove_from_list
def get_from_list(self, list_, index): """Returns the value specified with an `index` from `list`. The given list is never altered by this keyword. Index '0' means the first position, '1' the second, and so on. Similarly, '-1' is the last position, '-2' the second last, and so on. Using an index that does not exist on the list causes an error. The index can be either an integer or a string that can be converted to an integer. Examples (including Python equivalents in comments): | ${x} = | Get From List | ${L5} | 0 | # L5[0] | | ${y} = | Get From List | ${L5} | -2 | # L5[-2] | => - ${x} = 'a' - ${y} = 'd' - ${L5} is not changed """ try: return list_[self._index_to_int(index)] except __HOLE__: self._index_error(list_, index)
IndexError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_List.get_from_list
def get_index_from_list(self, list_, value, start=0, end=None): """Returns the index of the first occurrence of the `value` on the list. The search can be narrowed to the selected sublist by the `start` and `end` indexes having the same semantics as in the `Get Slice From List` keyword. In case the value is not found, -1 is returned. The given list is never altered by this keyword. Example: | ${x} = | Get Index From List | ${L5} | d | => - ${x} = 3 - ${L5} is not changed """ if start == '': start = 0 list_ = self.get_slice_from_list(list_, start, end) try: return int(start) + list_.index(value) except __HOLE__: return -1
ValueError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_List.get_index_from_list
def _yield_list_diffs(self, list1, list2, names): for index, (item1, item2) in enumerate(zip(list1, list2)): name = ' (%s)' % names[index] if index in names else '' try: assert_equals(item1, item2, msg='Index %d%s' % (index, name)) except __HOLE__, err: yield unic(err)
AssertionError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_List._yield_list_diffs
def _index_to_int(self, index, empty_to_zero=False): if empty_to_zero and not index: return 0 try: return int(index) except __HOLE__: raise ValueError("Cannot convert index '%s' to an integer" % index)
ValueError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_List._index_to_int
def remove_from_dictionary(self, dictionary, *keys): """Removes the given `keys` from the `dictionary`. If the given `key` cannot be found from the `dictionary`, it is ignored. Example: | Remove From Dictionary | ${D3} | b | x | y | => - ${D3} = {'a': 1, 'c': 3} """ for key in keys: try: value = dictionary.pop(key) print "Removed item with key '%s' and value '%s'" % (key, value) except __HOLE__: print "Key '%s' not found" % (key)
KeyError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_Dictionary.remove_from_dictionary