function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False, local_only=False): """ Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). This routine is overridden from Query to handle customized selection of geometry columns. """ result = [] if opts is None: opts = self.query.model._meta aliases = set() only_load = self.deferred_to_columns() if start_alias: seen = {None: start_alias} for field, model in opts.get_fields_with_model(): # For local fields (even if through proxy) the model should # be None. if model == opts.concrete_model: model = None if local_only and model is not None: continue if start_alias: try: alias = seen[model] except __HOLE__: link_field = opts.get_ancestor_link(model) alias = self.query.join((start_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) seen[model] = alias else: # If we're starting from the base model of the queryset, the # aliases will have already been set up in pre_sql_setup(), so # we can save time here. alias = self.query.included_inherited_models[model] table = self.query.alias_map[alias].table_name if table in only_load and field.column not in only_load[table]: continue if as_pairs: result.append((alias, field.column)) aliases.add(alias) continue # This part of the function is customized for GeoQuery. We # see if there was any custom selection specified in the # dictionary, and set up the selection format appropriately. field_sel = self.get_field_select(field, alias) if with_aliases and field.column in col_aliases: c_alias = 'Col%d' % len(col_aliases) result.append('%s AS %s' % (field_sel, c_alias)) col_aliases.add(c_alias) aliases.add(c_alias) else: r = field_sel result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) return result, aliases
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/db/models/sql/compiler.py/GeoSQLCompiler.get_default_columns
def run(args): #Reset wipes the destination clean so we can start over. if args.reset: reset(args) #Set up validates the destination and source directories. #It also loads the previous state or creates one as necessary. state = setup(args) #We break out of this loop in batch mode and on KeyboardInterrupt while True: #The file scan uses the arguments and the state to filter down to #only new (since the last sync time) files. for path in scan(args, state): try: #Read the file and expose useful aspects for renaming/filtering replay = sc2reader.load_replay(path, load_level=2) except __HOLE__: raise except: #Failure to parse file_name = os.path.basename(path) directory = make_directory(args, ('parse_error',)) new_path = os.path.join(directory, file_name) source_path = path[len(args.source):] args.log.write("Error parsing replay: {0}".format(source_path)) if not args.dryrun: args.action.run(path, new_path) #Skip to the next replay continue aspects = generate_aspects(args, replay) #Use the filter args to select files based on replay attributes if filter_out_replay(args, replay): continue #Apply the aspects to the rename formatting. #'/' is a special character for creation of subdirectories. #TODO: Handle duplicate replay names, its possible.. path_parts = args.rename.format(**aspects).split('/') filename = path_parts.pop()+'.SC2Replay' #Construct the directory and file paths; create needed directories directory = make_directory(args, path_parts) new_path = os.path.join(directory, filename) #Find the source relative to the source directory for reporting dest_path = new_path[len(args.dest):] source_path = path[len(args.source):] #Log the action and run it if we are live msg = "{0}:\n\tSource: {1}\n\tDest: {2}\n" args.log.write(msg.format(args.action.type, source_path, dest_path)) if not args.dryrun: args.action.run(path, new_path) #After every batch completes, save the state and flush the log #TODO: modify the state to include a list of remaining files args.log.flush() save_state(state, args) #We only run once in batch mode! if args.mode == 'BATCH': break #Since new replays come in fairly infrequently, reduce system load #by sleeping for an acceptable response time before the next scan. time.sleep(args.period) args.log.write('Batch Completed')
KeyboardInterrupt
dataset/ETHPy150Open GraylinKim/sc2reader/examples/sc2autosave.py/run
def main(): parser = argparse.ArgumentParser( description='Automatically copy new replays to directory', fromfile_prefix_chars='@', formatter_class=sc2reader.scripts.utils.Formatter.new(max_help_position=35), epilog="And that's all folks") required = parser.add_argument_group('Required Arguments') required.add_argument('source', type=str, help='The source directory to poll') required.add_argument('dest', type=str, help='The destination directory to copy to') general = parser.add_argument_group('General Options') general.add_argument('--mode', dest='mode', type=str, choices=['BATCH', 'CYCLE'], default='BATCH', help='The operating mode for the organizer') general.add_argument('--action', dest='action', choices=['COPY', 'MOVE'], default="COPY", type=str, help='Have the organizer move your files instead of copying') general.add_argument('--period', dest='period', type=int, default=0, help='The period of time to wait between scans.') general.add_argument('--log', dest='log', metavar='LOGFILE', type=argparse.FileType('w'), default=sys.stdout, help='Destination file for log information') general.add_argument('--dryrun', dest='dryrun', action="store_true", help="Don't do anything. Only simulate the output") general.add_argument('--reset', dest='reset', action='store_true', default=False, help='Wipe the destination directory clean and start over.') fileargs = parser.add_argument_group('File Options') fileargs.add_argument('--depth', dest='depth', type=int, default=-1, help='Maximum recussion depth. -1 (default) is unlimited.') fileargs.add_argument('--exclude-dirs', dest='exclude_dirs', type=str, metavar='NAME', nargs='+', default=[], help='A list of directory names to exclude during recursion') fileargs.add_argument('--exclude-files', dest='exclude_files', type=str, metavar='REGEX', default="", help='An expression to match excluded files') fileargs.add_argument('--follow-links', dest='follow_links', action="store_true", default=False, help="Enable following of symbolic links while scanning") renaming = parser.add_argument_group('Renaming Options') renaming.add_argument('--rename', dest='rename', type=str, metavar='FORMAT', nargs='?', default="{length} {type} on {map}", help='''\ The renaming format string. can have the following values: * {length} - The length of the replay ([H:]MM:SS) * {type} - The type of the replay (1v1,2v2,4v4,etc) * {map} - The map that was played on. * {match} - Race matchup in team order, alphabetically by race. * {date} - The date the replay was played on * {teams} - The player line up ''') renaming.add_argument('--length-format', dest='length_format', type=str, metavar='FORMAT', default='%M.%S', help='The length format string. See the python time module for details') renaming.add_argument('--player-format', dest='player_format', type=str, metavar='FORMAT', default='{name} ({play_race})', help='The player format string used to render the :teams content item.') renaming.add_argument('--date-format', dest='date_format', type=str, metavar='FORMAT', default='%m-%d-%Y', help='The date format string used to render the :date content item.') ''' renaming.add_argument('--team-order-by', dest='team_order', type=str, metavar='FIELD', default='NUMBER', help='The field by which teams are ordered.') renaming.add_argument('--player-order-by', dest='player_order', type=str, metavar='FIELD', default='NAME', help='The field by which players are ordered on teams.') ''' renaming.add_argument('--favored', dest='favored', type=str, default=[], metavar='NAME', nargs='+', help='A list of the players to favor in ordering teams and players') filterargs = parser.add_argument_group('Filtering Options') filterargs.add_argument('--filter-rule', dest='filter_rule', choices=["ALLOW","DENY"], help="The filters can either be used as a white list or a black list") filterargs.add_argument('--filter-player', metavar='NAME', dest='filter_player', nargs='+', type=str, default=[], help="A list of players to filter on") try: run(parser.parse_args()) except __HOLE__: print "\n\nScript Interupted. Process Aborting"
KeyboardInterrupt
dataset/ETHPy150Open GraylinKim/sc2reader/examples/sc2autosave.py/main
def read(self, filename): """ Reads the file specified and parses the token elements generated from tokenizing the input data. `filename` Filename to read. Returns boolean. """ try: with open(filename, 'r') as _file: self.readstream(_file) self._filename = filename return True except __HOLE__: self._reset() return False
IOError
dataset/ETHPy150Open xtrementl/focus/focus/parser/parser.py/SettingParser.read
def readstream(self, stream): """ Reads the specified stream and parses the token elements generated from tokenizing the input data. `stream` ``File``-like object. Returns boolean. """ self._reset() try: # tokenize input stream self._lexer = SettingLexer() self._lexer.readstream(stream) # parse tokens into AST self._parse() return True except __HOLE__: self._reset() return False
IOError
dataset/ETHPy150Open xtrementl/focus/focus/parser/parser.py/SettingParser.readstream
def write(self, filename, header=None): """ Writes the AST as a configuration file. `filename` Filename to save configuration file to. `header` Header string to use for the file. Returns boolean. """ origfile = self._filename try: with open(filename, 'w') as _file: self.writestream(_file, header) self._filename = filename return True except __HOLE__: self._filename = origfile return False
IOError
dataset/ETHPy150Open xtrementl/focus/focus/parser/parser.py/SettingParser.write
def writestream(self, stream, header=None): """ Writes the AST as a configuration file to the File-like stream. `stream` ``File``-like object. `header` Header string to use for the stream. Returns boolean. * Raises a ``ValueError`` exception if `header` is invalid and a regular exception if no data is available to write to stream. """ def serialize_values(values): """ Serializes list of values into the following format:: "value","value2","value3" """ return ','.join('"{0}"'.format(v) for v in (common.to_utf8(v).replace('\\', '\\\\') .replace('"', '\\"') for v in values)) if not self._ast: raise Exception(u'No available data to write to stream') header = header or self._ast[0] if not header: raise ValueError(u"Must provide a header") if not self.RE_NAME.match(header): raise ValueError(u"Invalid header") try: # write header, opening { stream.write('{0} {{{1}'.format(header, os.linesep)) # write options for option, value_list in self.options: vals = serialize_values(value_list) stream.write(' {0} {1};{2}'.format(option, vals, os.linesep)) for block, option_list in self.blocks: # write block name, inner opening { stream.write(' {0} {{{1}'.format(block, os.linesep)) # write options for option, value_list in option_list: vals = serialize_values(value_list) stream.write(' {0} {1};{2}' .format(option, vals, os.linesep)) # write inner closing } stream.write(' }}{0}'.format(os.linesep)) # write closing } stream.write('}}{0}'.format(os.linesep)) # set the header self._ast[0] = header stream.flush() return True except __HOLE__: return False
IOError
dataset/ETHPy150Open xtrementl/focus/focus/parser/parser.py/SettingParser.writestream
def countGenesTranscripts(inlist, options): """count number of genes/transcripts in list.""" genes = {} transcripts = {} for x in inlist: try: species, transcript, gene = parseIdentifier(x, options) except __HOLE__: continue if species not in genes: genes[species] = set() transcripts[species] = set() transcripts[species].add(transcript) genes[species].add(gene) return genes, transcripts
ValueError
dataset/ETHPy150Open CGATOxford/cgat/scripts/diff_transcript_sets.py/countGenesTranscripts
def getTranscriptsForGenes(genes, transcripts, options): """get transcripts for list of genes.""" result = [] for x in transcripts: try: species, transcript, gene = parseIdentifier(x, options) except __HOLE__: continue if gene in genes: result.append(x) return result # --------------------------------------------------------------------------
ValueError
dataset/ETHPy150Open CGATOxford/cgat/scripts/diff_transcript_sets.py/getTranscriptsForGenes
def tagged_object_list(request, queryset_or_model=None, tag=None, related_tags=False, related_tag_counts=True, **kwargs): """ A thin wrapper around ``django.views.generic.list_detail.object_list`` which creates a ``QuerySet`` containing instances of the given queryset or model tagged with the given tag. In addition to the context variables set up by ``object_list``, a ``tag`` context variable will contain the ``Tag`` instance for the tag. If ``related_tags`` is ``True``, a ``related_tags`` context variable will contain tags related to the given tag for the given model. Additionally, if ``related_tag_counts`` is ``True``, each related tag will have a ``count`` attribute indicating the number of items which have it in addition to the given tag. """ if queryset_or_model is None: try: queryset_or_model = kwargs.pop('queryset_or_model') except KeyError: raise AttributeError(_('tagged_object_list must be called with a queryset or a model.')) if tag is None: try: tag = kwargs.pop('tag') except __HOLE__: raise AttributeError(_('tagged_object_list must be called with a tag.')) tag_instance = get_tag(tag) if tag_instance is None: raise Http404(_('No Tag found matching "%s".') % tag) queryset = TaggedItem.objects.get_by_model(queryset_or_model, tag_instance) if 'extra_context' not in kwargs: kwargs['extra_context'] = {} kwargs['extra_context']['tag'] = tag_instance if related_tags: kwargs['extra_context']['related_tags'] = \ Tag.objects.related_for_model(tag_instance, queryset_or_model, counts=related_tag_counts) return object_list(request, queryset, **kwargs)
KeyError
dataset/ETHPy150Open amarandon/smeuhsocial/apps/tagging/views.py/tagged_object_list
def getch(self): if self.lastChar == '': try: return self.iterator.next() except __HOLE__: return '' else: (ch, self.lastChar) = (self.lastChar, '') return ch
StopIteration
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/util/scanf.py/CharacterBufferFromIterable.getch
def isIterable(thing): """Returns true if 'thing' looks iterable.""" try: iter(thing) except __HOLE__: return False return True
TypeError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/util/scanf.py/isIterable
def isFileLike(thing): """Returns true if thing looks like a file.""" if hasattr(thing, "read") and hasattr(thing, "seek"): try: thing.seek(1, 1) thing.seek(-1, 1) return True except __HOLE__: pass return False
IOError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/util/scanf.py/isFileLike
def handleDecimalInt(buffer, optional=False, allowLeadingWhitespace=True): """Tries to scan for an integer. If 'optional' is set to False, returns None if an integer can't be successfully scanned.""" if allowLeadingWhitespace: handleWhitespace(buffer) ## eat leading spaces chars = [] chars += buffer.scanCharacterSet(_PLUS_MINUS_SET, 1) chars += buffer.scanCharacterSet(_DIGIT_SET) try: return int(''.join(chars), 10) except __HOLE__: if optional: return None raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/util/scanf.py/handleDecimalInt
def handleOct(buffer): chars = [] chars += buffer.scanCharacterSet(_PLUS_MINUS_SET) chars += buffer.scanCharacterSet(_OCT_SET) try: return int(''.join(chars), 8) except __HOLE__: raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/util/scanf.py/handleOct
def handleInt(buffer, base=0): chars = [] chars += buffer.scanCharacterSet(_PLUS_MINUS_SET) chars += buffer.scanCharacterSet("0") if chars and chars[-1] == '0': chars += buffer.scanCharacterSet("xX") chars += buffer.scanCharacterSet(_HEX_SET) try: return int(''.join(chars), base) except __HOLE__: raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/util/scanf.py/handleInt
def handleFloat(buffer, allowLeadingWhitespace=True): if allowLeadingWhitespace: handleWhitespace(buffer) ## eat leading whitespace chars = [] chars += buffer.scanCharacterSet(_PLUS_MINUS_SET) chars += buffer.scanCharacterSet(_DIGIT_SET) chars += buffer.scanCharacterSet(".") chars += buffer.scanCharacterSet(_DIGIT_SET) chars += buffer.scanCharacterSet("eE") chars += buffer.scanCharacterSet(_PLUS_MINUS_SET) chars += buffer.scanCharacterSet(_DIGIT_SET) try: return float(''.join(chars)) except __HOLE__: raise FormatError, ("invalid literal characters: %s" % ''.join(chars))
ValueError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/pymel/util/scanf.py/handleFloat
def _check_list_display_item(self, obj, model, item, label): if callable(item): return [] elif hasattr(obj, item): return [] elif hasattr(model, item): # getattr(model, item) could be an X_RelatedObjectsDescriptor try: field = model._meta.get_field(item) except FieldDoesNotExist: try: field = getattr(model, item) except __HOLE__: field = None if field is None: return [ checks.Error( "The value of '%s' refers to '%s', which is not a " "callable, an attribute of '%s', or an attribute or method on '%s.%s'." % ( label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), obj=obj.__class__, id='admin.E108', ) ] elif getattr(field, 'many_to_many', False): return [ checks.Error( "The value of '%s' must not be a many-to-many field." % label, obj=obj.__class__, id='admin.E109', ) ] else: return [] else: try: model._meta.get_field(item) except FieldDoesNotExist: return [ # This is a deliberate repeat of E108; there's more than one path # required to test this condition. checks.Error( "The value of '%s' refers to '%s', which is not a callable, " "an attribute of '%s', or an attribute or method on '%s.%s'." % ( label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name ), obj=obj.__class__, id='admin.E108', ) ] else: return []
AttributeError
dataset/ETHPy150Open django/django/django/contrib/admin/checks.py/ModelAdminChecks._check_list_display_item
def _check_relation(self, obj, parent_model): try: _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name) except __HOLE__ as e: return [checks.Error(e.args[0], obj=obj.__class__, id='admin.E202')] else: return []
ValueError
dataset/ETHPy150Open django/django/django/contrib/admin/checks.py/InlineModelAdminChecks._check_relation
def rst_to_html(rst_text): try: from docutils.core import publish_string bootstrap_css_path = os.path.join(sublime.packages_path(), 'RstPreview/css/bootstrap.min.css') base_css_path = os.path.join(sublime.packages_path(), 'RstPreview/css/base.css') args = { 'stylesheet_path': ','.join([bootstrap_css_path, base_css_path]), 'syntax_highlight': 'short' } return publish_string(rst_text, writer_name='html', settings_overrides=args) except __HOLE__: error_msg = """RstPreview requires docutils to be installed for the python interpreter that Sublime uses. run: `sudo easy_install-2.6 docutils` and restart Sublime (if on Mac OS X or Linux). For Windows check the docs at https://github.com/d0ugal/RstPreview""" sublime.error_message(error_msg) raise
ImportError
dataset/ETHPy150Open d0ugal-archive/RstPreview/RstPreview.py/rst_to_html
def __init__(self, simulation_cfg, default_wait_time_seconds=0.05, epsilon_time=0.05): super(PrefixPeeker, self).__init__(simulation_cfg, default_wait_time_seconds=default_wait_time_seconds, epsilon_time=epsilon_time) try: import pytrie except __HOLE__: raise RuntimeError("Need to install pytrie: `sudo pip install pytrie`") # The prefix trie stores lists of input events as keys, # and lists of both input and internal events as values # Note that we pass the trie around between DAG views self._prefix_trie = pytrie.Trie()
ImportError
dataset/ETHPy150Open ucb-sts/sts/sts/control_flow/peeker.py/PrefixPeeker.__init__
def get_form(self, payment, data=None): if not payment.id: payment.save() xml_request = render_to_string( 'payments/sofort/new_transaction.xml', { 'project_id': self.project_id, 'language_code': get_language(), 'interface_version': 'django-payments', 'amount': payment.total, 'currency': payment.currency, 'description': payment.description, 'success_url': payment.get_process_url(), 'abort_url': payment.get_process_url(), 'customer_protection': '0'}) doc, response = self.post_request(xml_request) if response.status_code == 200: try: raise RedirectNeeded(doc['new_transaction']['payment_url']) except __HOLE__: raise PaymentError( 'Error in %s: %s' % ( doc['errors']['error']['field'], doc['errors']['error']['message']))
KeyError
dataset/ETHPy150Open mirumee/django-payments/payments/sofort/__init__.py/SofortProvider.get_form
def process_data(self, payment, request): if not 'trans' in request.GET: return HttpResponseForbidden('FAILED') transaction_id = request.GET.get('trans') payment.transaction_id = transaction_id transaction_request = render_to_string( 'payments/sofort/transaction_request.xml', {'transactions': [transaction_id]}) doc, response = self.post_request(transaction_request) try: # If there is a transaction and status returned, # the payment was successful status = doc['transactions']['transaction_details']['status'] except __HOLE__: # Payment Failed payment.change_status('rejected') return redirect(payment.get_failure_url()) else: payment.captured_amount = payment.total payment.change_status('confirmed') payment.extra_data = json.dumps(doc) sender_data = doc['transactions']['transaction_details']['sender'] holder_data = sender_data['holder'] first_name, last_name = holder_data.rsplit(' ', 1) payment.billing_first_name = first_name payment.billing_last_name = last_name payment.billing_country_code = sender_data['country_code'] payment.save() return redirect(payment.get_success_url())
KeyError
dataset/ETHPy150Open mirumee/django-payments/payments/sofort/__init__.py/SofortProvider.process_data
def test_singular_gh_3312(self): # "Bad" test case that leads SuperLU to call LAPACK with invalid # arguments. Check that it fails moderately gracefully. ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32) v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296]) A = csc_matrix((v, ij.T), shape=(20, 20)) b = np.arange(20) with warnings.catch_warnings(): try: # should either raise a runtimeerror or return value # appropriate for singular input x = spsolve(A, b, use_umfpack=False) assert_(not np.isfinite(x).any()) except __HOLE__: pass
RuntimeError
dataset/ETHPy150Open scipy/scipy/scipy/sparse/linalg/dsolve/tests/test_linsolve.py/TestLinsolve.test_singular_gh_3312
@property def main_menu(self): """ Returns Python object represents a menu """ try: return PluginManager().get_plugin_menu(self._main_menu) except __HOLE__: self._main_menu = sublime.decode_value(sublime.load_resource( "Packages/Javatar/menu/MainMenu.json" )) return self.main_menu
AttributeError
dataset/ETHPy150Open spywhere/Javatar/commands/menu.py/JavatarCommand.main_menu
def create_repo(name, attributes): config_parser = ConfigParser.RawConfigParser() values = ("name", "baseurl", "metalink", "mirrorlist", "gpgcheck", "gpgkey", "exclude", "includepkgs", "enablegroups", "enabled", "failovermethod", "keepalive", "timeout", "enabled", "http_caching", "retries", "throttle", "bandwidth", "sslcacert", "sslverify", "sslclientcert", "metadata_expire", "mirrorlist_expire", "proxy", "proxy_username", "proxy_password", "cost", "skip_if_unavailable") baseurl = None try: baseurl = attributes['baseurl'].split()[0] except (KeyError, __HOLE__) as err: raise ResourceException("Wrong baseurl attribute [%s]" % err) # Check if repo already exists repo = get_repos(name) # If it exists, get the filename in which the repo is defined # If not, check if a filename is user provided # If no filename is provided, create one based on the repo name if repo.get('present'): filename = repo.get("filename") elif attributes.get("filename"): filename = attributes["filename"] else: filename = "%s.repo" % name # Read the config file (empty or not) and load it in a ConfigParser # object repo_file_path = os.path.join(repo_path, filename) config_parser.read(repo_file_path) # Check if the repo is define in the ConfigParser context. # If not, add a section based on the repo name. if not config_parser.has_section(name): config_parser.add_section(name) config_parser.set(name, "name", name) # Set gpgcheck to 0 by default to bypass some issues config_parser.set(name, 'gpgcheck', 0) # Update the section with not None fields provided by the user for key, value in attributes.items(): if value is not None and key in values: config_parser.set(name, key, value) config_parser.set(name, 'baseurl', baseurl) # Write changes to the repo file. with open(repo_file_path, 'wb') as repofile: config_parser.write(repofile)
AttributeError
dataset/ETHPy150Open comodit/synapse-agent/synapse/resources/repos-plugin/yum-repos.py/create_repo
def _WaitForExternalIp(self, instance_name): """Waits for the instance to get external IP and returns it. Args: instance_name: Name of the Compute Engine instance. Returns: External IP address in string. Raises: ClusterSetUpError: External IP assignment times out. """ for _ in xrange(self.INSTANCE_IP_ADDRESS_MAX_CHECK_TIMES): instance = self._GetApi().GetInstance(instance_name) if instance: try: return instance['networkInterfaces'][0]['accessConfigs'][0]['natIP'] except (__HOLE__, IndexError): pass time.sleep(self.INSTANCE_IP_ADDRESS_CHECK_INTERVAL) raise ClusterSetUpError('External IP address time out for %s' % instance_name)
KeyError
dataset/ETHPy150Open GoogleCloudPlatform/Data-Pipeline/app/src/hadoop/hadoop_cluster.py/HadoopCluster._WaitForExternalIp
def do_show(request, event_id): service = str(request.GET.get('service', 'nova')) event_id = int(event_id) results = [] model = _model_factory(service) try: event = model.get(id=event_id) results = _append_raw_attributes(event, results, service) final = [results, ] j = json.loads(event.json) final.append(json.dumps(j, indent=2)) final.append(event.uuid) return rsp(json.dumps(final)) except __HOLE__: return rsp({})
ObjectDoesNotExist
dataset/ETHPy150Open openstack/stacktach/stacktach/stacky_server.py/do_show
def search(request): service = str(request.GET.get('service', 'nova')) field = request.GET.get('field') value = request.GET.get('value') model = _model_factory(service) filters = {field: value} _add_when_filters(request, filters) results = [] try: events = model_search(request, model, filters, order_by='-when') for event in events: when = dt.dt_from_decimal(event.when) routing_key_status = routing_key_type(event.routing_key) results = event.search_results(results, when, routing_key_status) return rsp(json.dumps(results)) except __HOLE__: return error_response(404, 'Not Found', ["The requested object does not exist"]) except FieldError: return error_response(400, 'Bad Request', "The requested field '%s' does not exist for the corresponding object.\n" "Note: The field names of database are case-sensitive." % field)
ObjectDoesNotExist
dataset/ETHPy150Open openstack/stacktach/stacktach/stacky_server.py/search
def _parse_created(created): try: created_datetime = datetime.datetime.strptime(created, '%Y-%m-%d') return dt.dt_to_decimal(created_datetime) except __HOLE__: raise BadRequestException( "'%s' value has an invalid format. It must be in YYYY-MM-DD format." % created)
ValueError
dataset/ETHPy150Open openstack/stacktach/stacktach/stacky_server.py/_parse_created
def _parse_id(id): try: return int(id) except __HOLE__: raise BadRequestException( "'%s' value has an invalid format. It must be in integer " "format." % id)
ValueError
dataset/ETHPy150Open openstack/stacktach/stacktach/stacky_server.py/_parse_id
def do_jsonreports_search(request): try: model = models.JsonReport filters = _create_query_filters(request) reports = model_search(request, model.objects, filters, order_by='-id') results = [['Id', 'Start', 'End', 'Created', 'Name', 'Version']] for report in reports: results.append([report.id, datetime.datetime.strftime( report.period_start, UTC_FORMAT), datetime.datetime.strftime( report.period_end, UTC_FORMAT), datetime.datetime.strftime( dt.dt_from_decimal(report.created), UTC_FORMAT), report.name, report.version]) except BadRequestException as be: return error_response(400, 'Bad Request', str(be)) except __HOLE__ as ve: return error_response(400, 'Bad Request', ve.messages[0]) return rsp(json.dumps(results))
ValidationError
dataset/ETHPy150Open openstack/stacktach/stacktach/stacky_server.py/do_jsonreports_search
def _crawl_config_files( self, root_dir='/', exclude_dirs=['proc', 'mnt', 'dev', 'tmp'], root_dir_alias=None, known_config_files=[], discover_config_files=False, ): assert(self.crawl_mode is not Modes.OUTCONTAINER) saved_args = locals() logger.debug('Crawling config files: %s' % (saved_args)) accessed_since = self.feature_epoch try: assert os.path.isdir(root_dir) if root_dir_alias is None: root_dir_alias = root_dir exclude_dirs = [os.path.join(root_dir, d) for d in exclude_dirs] exclude_regex = r'|'.join([fnmatch.translate(d) for d in exclude_dirs]) or r'$.' known_config_files[:] = [os.path.join(root_dir, f) for f in known_config_files] known_config_files[:] = [f for f in known_config_files if not re.match(exclude_regex, f)] config_file_set = set() for fpath in known_config_files: if os.path.exists(fpath): lstat = os.lstat(fpath) if (lstat.st_atime > accessed_since or lstat.st_ctime > accessed_since): config_file_set.add(fpath) except Exception as e: logger.error('Error examining %s' % root_dir, exc_info=True) raise CrawlError(e) try: if discover_config_files: # Walk the directory hierarchy starting at 'root_dir' in BFS # order looking for config files. for (root_dirpath, dirs, files) in os.walk(root_dir): dirs[:] = [os.path.join(root_dirpath, d) for d in dirs] dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)] files = [os.path.join(root_dirpath, f) for f in files] files = [f for f in files if not re.match(exclude_regex, f)] for fpath in files: if os.path.exists(fpath) \ and self.is_config_file(fpath): lstat = os.lstat(fpath) if lstat.st_atime > accessed_since \ or lstat.st_ctime > accessed_since: config_file_set.add(fpath) except Exception as e: logger.error('Error examining %s' % root_dir, exc_info=True) raise CrawlError(e) try: for fpath in config_file_set: try: (_, fname) = os.path.split(fpath) frelpath = fpath.replace(root_dir, root_dir_alias, 1) # root_dir relative path # Copy this config_file into / before reading it, so we # don't change its atime attribute. (th, temppath) = tempfile.mkstemp(prefix='config.', dir='/') os.close(th) shutil.copyfile(fpath, temppath) with codecs.open(filename=fpath, mode='r', encoding='utf-8', errors='ignore') as \ config_file: # Encode the contents of config_file as utf-8. yield (frelpath, ConfigFeature(fname, config_file.read(), frelpath)) os.remove(temppath) except __HOLE__ as e: raise CrawlError(e) except Exception as e: logger.error('Error crawling config file %s' % fpath, exc_info=True) raise CrawlError(e) except Exception as e: logger.error('Error examining %s' % root_dir, exc_info=True) raise CrawlError(e) # crawl disk partition information
IOError
dataset/ETHPy150Open cloudviz/agentless-system-crawler/crawler/features_crawler.py/FeaturesCrawler._crawl_config_files
def _crawl_packages(self, dbpath=None, root_dir='/'): assert(self.crawl_mode is not Modes.OUTCONTAINER) # package attributes: ["installed", "name", "size", "version"] (installtime, name, version, size) = (None, None, None, None) if self.crawl_mode == Modes.INVM: logger.debug('Using in-VM state information (crawl mode: ' + self.crawl_mode + ')') system_type = platform.system().lower() distro = platform.linux_distribution()[0].lower() elif self.crawl_mode == Modes.MOUNTPOINT: logger.debug('Using disk image information (crawl mode: ' + self.crawl_mode + ')') system_type = \ platform_outofband.system(prefix=root_dir).lower() distro = platform_outofband.linux_distribution(prefix=root_dir)[ 0].lower() else: logger.error('Unsupported crawl mode: ' + self.crawl_mode + '. Skipping package crawl.') system_type = 'unknown' distro = 'unknown' installed_since = self.feature_epoch if system_type != 'linux' or (system_type == 'linux' and distro == ''): # Distro is blank for FROM scratch images # Package feature is only valid for Linux platforms. raise StopIteration() logger.debug('Crawling Packages') pkg_manager = 'unknown' if distro in ['ubuntu', 'debian']: pkg_manager = 'dpkg' elif distro.startswith('red hat') or distro in ['redhat', 'fedora', 'centos']: pkg_manager = 'rpm' elif os.path.exists(os.path.join(root_dir, 'var/lib/dpkg')): pkg_manager = 'dpkg' elif os.path.exists(os.path.join(root_dir, 'var/lib/rpm')): pkg_manager = 'rpm' try: if pkg_manager == 'dpkg': if not dbpath: dbpath = 'var/lib/dpkg' if os.path.isabs(dbpath): logger.warning( 'dbpath: ' + dbpath + ' is defined absolute. Ignoring prefix: ' + root_dir + '.') # Update for a different route. dbpath = os.path.join(root_dir, dbpath) if installed_since > 0: logger.warning( 'dpkg does not provide install-time, defaulting to ' 'all packages installed since epoch') try: dpkg = subprocess.Popen(['dpkg-query', '-W', '--admindir={0}'.format(dbpath), '-f=${Package}|${Version}' '|${Installed-Size}\n' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) dpkglist = dpkg.stdout.read().strip('\n') except OSError as e: logger.error( 'Failed to launch dpkg query for packages. Check if ' 'dpkg-query is installed: [Errno: %d] ' % e.errno + e.strerror + ' [Exception: ' + type(e).__name__ + ']') dpkglist = None if dpkglist: for dpkginfo in dpkglist.split('\n'): (name, version, size) = dpkginfo.split(r'|') # dpkg does not provide any installtime field # feature_key = '{0}/{1}'.format(name, version) --> # changed to below per Suriya's request feature_key = '{0}'.format(name, version) yield (feature_key, PackageFeature(None, name, size, version)) elif pkg_manager == 'rpm': if not dbpath: dbpath = 'var/lib/rpm' if os.path.isabs(dbpath): logger.warning( 'dbpath: ' + dbpath + ' is defined absolute. Ignoring prefix: ' + root_dir + '.') # update for a different route dbpath = os.path.join(root_dir, dbpath) try: rpm = subprocess.Popen([ 'rpm', '--dbpath', dbpath, '-qa', '--queryformat', '%{installtime}|%{name}|%{version}' '-%{release}|%{size}\n', ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) rpmlist = rpm.stdout.read().strip('\n') except __HOLE__ as e: logger.error( 'Failed to launch rpm query for packages. Check if ' 'rpm is installed: [Errno: %d] ' % e.errno + e.strerror + ' [Exception: ' + type(e).__name__ + ']') rpmlist = None if rpmlist: for rpminfo in rpmlist.split('\n'): (installtime, name, version, size) = \ rpminfo.split(r'|') # if int(installtime) <= installed_since: --> this # barfs for sth like: 1376416422. Consider try: xxx # except ValueError: pass if installtime <= installed_since: continue # feature_key = '{0}/{1}'.format(name, version) --> # changed to below per Suriya's request feature_key = '{0}'.format(name, version) yield (feature_key, PackageFeature(installtime, name, size, version)) else: raise CrawlError( Exception( 'Unsupported package manager for Linux distro %s' % distro)) except Exception as e: logger.error('Error crawling package %s' % ((name if name else 'Unknown')), exc_info=True) raise CrawlError(e) # crawl virtual memory information
OSError
dataset/ETHPy150Open cloudviz/agentless-system-crawler/crawler/features_crawler.py/FeaturesCrawler._crawl_packages
def get_temp_path(self, name=None): if name is None: name = os.urandom(20).encode('hex') dirname = self.info.env.temp_path try: os.makedirs(dirname) except __HOLE__: pass return os.path.join(dirname, name)
OSError
dataset/ETHPy150Open lektor/lektor-archive/lektor/admin/modules/common.py/AdminContext.get_temp_path
def sorted_list_difference(expected, actual): """Finds elements in only one or the other of two, sorted input lists. Returns a two-element tuple of lists. The first list contains those elements in the "expected" list but not in the "actual" list, and the second contains those elements in the "actual" list but not in the "expected" list. Duplicate elements in either input list are ignored. """ i = j = 0 missing = [] unexpected = [] while True: try: e = expected[i] a = actual[j] if e < a: missing.append(e) i += 1 while expected[i] == e: i += 1 elif e > a: unexpected.append(a) j += 1 while actual[j] == a: j += 1 else: i += 1 try: while expected[i] == e: i += 1 finally: j += 1 while actual[j] == a: j += 1 except __HOLE__: missing.extend(expected[i:]) unexpected.extend(actual[j:]) break return missing, unexpected
IndexError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/util.py/sorted_list_difference
def unorderable_list_difference(expected, actual, ignore_duplicate=False): """Same behavior as sorted_list_difference but for lists of unorderable items (like dicts). As it does a linear search per item (remove) it has O(n*n) performance. """ missing = [] unexpected = [] while expected: item = expected.pop() try: actual.remove(item) except __HOLE__: missing.append(item) if ignore_duplicate: for lst in expected, actual: try: while True: lst.remove(item) except ValueError: pass if ignore_duplicate: while actual: item = actual.pop() unexpected.append(item) try: while True: actual.remove(item) except ValueError: pass return missing, unexpected # anything left in actual is unexpected return missing, actual
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/util.py/unorderable_list_difference
@permission_required("core.manage_shop") def update_attachments(request, product_id): """Saves/deletes attachments with given ids (passed by request body). """ product = lfs_get_object_or_404(Product, pk=product_id) action = request.POST.get("action") message = _(u"Attachment has been updated.") if action == "delete": message = _(u"Attachment has been deleted.") for key in request.POST.keys(): if key.startswith("delete-"): try: id = key.split("-")[1] attachment = ProductAttachment.objects.get(pk=id).delete() except (__HOLE__, ObjectDoesNotExist): pass elif action == "update": message = _(u"Attachment has been updated.") for attachment in product.attachments.all(): attachment.title = request.POST.get("title-%s" % attachment.id)[:50] attachment.position = request.POST.get("position-%s" % attachment.id) attachment.description = request.POST.get("description-%s" % attachment.id) attachment.save() # Refresh positions for i, attachment in enumerate(product.attachments.all()): attachment.position = (i + 1) * 10 attachment.save() product_changed.send(product, request=request) html = [["#attachments-list", list_attachments(request, product_id, as_string=True)]] result = json.dumps({ "html": html, "message": message, }, cls=LazyEncoder) return HttpResponse(result, content_type='application/json')
IndexError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/manage/product/attachments.py/update_attachments
def transform(self, data): try: val = data[self.db_field] data[self.output_name] = val del data[self.db_field] except __HOLE__: data[self.output_name] = None return data
KeyError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/FieldNameTransform.transform
def transform(self, data): try: data.update(data[self.db_field]) del data[self.db_field] except __HOLE__: pass return data
KeyError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/FlattenFieldTransform.transform
def _contribute_fields(self, collection): is_primary = collection == self.primary_collection coll_name = collection._meta['collection'] try: excluded_fields = list(self.excluded_fields[coll_name]) except __HOLE__: excluded_fields = [] excluded_fields.append('_id') excluded_field_set = set(excluded_fields) for field_name in collection._fields_ordered: field = collection._fields[field_name] db_field_name = field.db_field if db_field_name in excluded_field_set: continue if is_primary and self._is_relationship_field(field): # Track relationship fields but don't add them to the set of # output fields self._relationships[field.db_field] = field.document_type._meta['collection'] else: output_field_name = self._transform_field_name(coll_name, db_field_name) if output_field_name: self._output_fields.append(output_field_name)
KeyError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/Roller._contribute_fields
def _transform_field_name(self, collection_name, field_name): try: return self.field_transforms[collection_name][field_name].output_name except __HOLE__: return field_name
KeyError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/Roller._transform_field_name
def build_filters(self, **filter_kwargs): """ Returns a dictionary of Q objects that will be used to limit the mapper querysets. This allows for translating arguments from upstream code to the filter format used by the underlying data store abstraction. This will build a set of filters common to all querysets and will call any build_filters_COLLECTION_NAME methods that have been implemented on this class. Arguments: * state: Required. Postal code for a state. For example, "md". * datefilter: Date specified in "YYYY" or "YYYY-MM-DD" used to filter elections before they are baked. * election_type: Election type. For example, general, primary, etc. * reporting_level: Reporting level of the election results. For example, "state", "county", "precinct", etc. Value must be one of the options specified in openelex.models.Result.REPORTING_LEVEL_CHOICES. """ # TODO: Implement filtering by office, district and party after the # the data is standardized # By default, should filter to all state/contest-wide results for all # races when no filters are specified. filters= {} q_kwargs = {} q_kwargs['state'] = filter_kwargs['state'].upper() try: q_kwargs['election_id__contains'] = filter_kwargs['election_type'] except KeyError: pass common_q = Q(**q_kwargs) # Merge in the date filters try: common_q &= self.build_date_filters(filter_kwargs['datefilter']) except __HOLE__: pass for collection_name in self._querysets.keys(): filters[collection_name] = common_q try: fn = getattr(self, 'build_filters_' + collection_name) collection_q = fn(**filter_kwargs) if collection_q: filters[collection_name] &= collection_q except AttributeError: pass return filters
KeyError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/Roller.build_filters
def build_filters_result(self, **filter_kwargs): try: return Q(reporting_level=filter_kwargs['reporting_level']) except __HOLE__: return None
KeyError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/Roller.build_filters_result
def get_fields(self): """ Returns a list of all fields encountered when building the flattened data with a call to get_list() This list is appropriate for writing a header row in a csv file using csv.DictWriter. """ try: return list(self._fields) except __HOLE__: return self._output_fields
AttributeError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/Roller.get_fields
def build_filters_raw_result(self, **filter_kwargs): try: return Q(reporting_level=filter_kwargs['reporting_level']) except __HOLE__: return None
KeyError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/RawResultRoller.build_filters_raw_result
def get_items(self): """ Retrieve a flattened, filtered list of election results. Returns: A list of result dictionaries. By default, this is the value of ``self._items`` which should be populated with a call to ``collect_items()``. If results need to be retrieved in some other way, this method should be overridden in a subclass. """ try: return self._items except __HOLE__: return []
AttributeError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/BaseBaker.get_items
def write(self, fmt='csv', outputdir=None, timestamp=None): """ Writes collected data to a file. Arguments: * fmt: Output format. Either 'csv' or 'json'. Default is 'csv'. * outputdir: Directory where output files will be written. Defaults to "openelections/us/bakery" """ try: fmt_method = getattr(self, 'write_' + fmt) except __HOLE__: raise UnsupportedFormatError("Format %s is not supported" % (fmt)) if outputdir is None: outputdir = self.default_outputdir() if not os.path.exists(outputdir): os.makedirs(outputdir) if timestamp is None: timestamp = datetime.now() return fmt_method(outputdir, timestamp)
AttributeError
dataset/ETHPy150Open openelections/openelections-core/openelex/base/bake.py/BaseBaker.write
def test_check(self): client = BlazeMeterClientEmul(logging.getLogger('')) client.results.append({"marker": "ping", 'result': {}}) client.results.append({"marker": "projects", 'result': []}) client.results.append({"marker": "project-create", 'result': { "id": time.time(), "name": "boo", "userId": time.time(), "description": None, "created": time.time(), "updated": time.time(), "organizationId": None }}) client.results.append({"marker": "tests", 'result': {}}) client.results.append({"marker": "test-create", 'result': {'id': 'unittest1'}}) client.results.append( {"marker": "sess-start", 'result': {'session': {'id': 'sess1', 'userId': 1}, 'signature': ''}}) client.results.append({"marker": "first push", 'result': {'session': {}}}) # client.results.append(None) # first check error stats client.results.append( {"marker": "second push", 'result': {'session': {"statusCode": 140, 'status': 'ENDED'}}}) # client.results.append(None) # second check error stats client.results.append({"marker": "post-proc push", 'result': {'session': {}}}) client.results.append({"marker": "upload1", "result": True}) # post-proc error stats client.results.append({"marker": "terminate", 'result': {'session': {}}}) obj = BlazeMeterUploader() obj.parameters['project'] = 'Proj name' obj.settings['token'] = '123' obj.settings['browser-open'] = 'none' obj.engine = EngineEmul() shutil.copy(__file__, obj.engine.artifacts_dir + os.path.basename(__file__)) obj.client = client obj.prepare() obj.startup() for x in range(0, 31): obj.aggregated_second(random_datapoint(x)) obj.check() for x in range(32, 65): obj.aggregated_second(random_datapoint(x)) try: obj.check() self.fail() except __HOLE__: pass obj.aggregated_second(random_datapoint(10)) obj.shutdown() obj.post_process()
KeyboardInterrupt
dataset/ETHPy150Open Blazemeter/taurus/tests/modules/test_blazeMeterUploader.py/TestBlazeMeterUploader.test_check
def test_hash(self): for obj_1, obj_2 in self.eq_pairs: try: if not hash(obj_1) == hash(obj_2): self.fail("%r and %r do not hash equal" % (obj_1, obj_2)) except __HOLE__: raise except Exception, e: self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e)) for obj_1, obj_2 in self.ne_pairs: try: if hash(obj_1) == hash(obj_2): self.fail("%s and %s hash equal, but shouldn't" % (obj_1, obj_2)) except KeyboardInterrupt: raise except Exception, e: self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
KeyboardInterrupt
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/unittest/test/support.py/TestHashing.test_hash
def popen_wrapper(args, os_err_exc_type=CommandError): """ Friendly wrapper around Popen. Returns stdout output, stderr output and OS status code. """ try: p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True) except __HOLE__ as e: six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' % (args[0], e.strerror)), sys.exc_info()[2]) output, errors = p.communicate() return ( output, force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True), p.returncode )
OSError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/core/management/utils.py/popen_wrapper
def command(self, *args, **options): try: cover = args[0] cover = cover.lower() == "true" except __HOLE__: cover = False if cover: #Grab the pythonpath argument and look for tests there app_names = settings.INSTALLED_APPS #Grab the top level app name from installed_apps app_labels = list(set([a.split('.')[0] for a in app_names])) app_paths = [] #We want to figure out coverage for the "lower-level" apps, so import all the top level apps #and get their paths for al in app_labels: mod = import_module(al) app_paths.append(os.path.dirname(mod.__file__)) #Pass paths to pkgutil to get the names of the submodules sub_labels = [name for _, name, _ in pkgutil.iter_modules(app_paths) if name not in settings.DO_NOT_COVER] #Produce a coverage report for installed_apps argv = ['{0}'.format(options['pythonpath']), '--with-coverage', '--cover-package={0}'.format(','.join(app_labels + sub_labels))] nose.run(argv=argv) else: argv = ['{0}'.format(options['pythonpath'])] nose.run(argv=argv)
IndexError
dataset/ETHPy150Open VikParuchuri/percept/percept/tests/commands/test.py/Command.command
def get_sli_manifest_part(self): part = {"columnName": self.name, "mode": "FULL", } if self.referenceKey: part["referenceKey"] = 1 if self.format: part['constraints'] = {'date': self.format} try: part['populates'] = self.populates() except __HOLE__: pass return part
NotImplementedError
dataset/ETHPy150Open comoga/gooddata-python/gooddataclient/columns.py/Column.get_sli_manifest_part
def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except __HOLE__: # not Mac OS X pass return plat
ValueError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/get_supported_platform
def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except __HOLE__: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module)
KeyError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/get_provider
def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except __HOLE__: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat
ValueError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/get_build_platform
@classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except __HOLE__: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws
ImportError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/WorkingSet._build_master
def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except __HOLE__: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! app_data = 'Application Data' app_homes = [ # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME (('WINDIR',), app_data), ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname, subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" )
KeyError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/get_default_cache
@classmethod def comparison(cls, nodelist): if len(nodelist) > 4: msg = "Chained comparison not allowed in environment markers" raise SyntaxError(msg) comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except __HOLE__: msg = repr(cop) + " operator not allowed in environment markers" raise SyntaxError(msg) return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
KeyError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/MarkerEvaluation.comparison
@classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ from pip._vendor import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except __HOLE__ as e: raise SyntaxError(e.args[0]) return result
NameError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/MarkerEvaluation._markerlib_evaluate
@classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except __HOLE__: raise SyntaxError("Comparison or logical expression expected") return op(nodelist)
KeyError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/MarkerEvaluation.interpret
@classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except __HOLE__: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg)
KeyError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/MarkerEvaluation.evaluate
def _index(self): try: return self._dirindex except __HOLE__: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/ZipProvider._index
def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except __HOLE__: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock()
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/declare_namespace
def _normalize_cached(filename, _cache={}): try: return _cache[filename] except __HOLE__: _cache[filename] = result = normalize_path(filename) return result
KeyError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/_normalize_cached
def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except __HOLE__ as exc: raise ImportError(str(exc))
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/EntryPoint.resolve
@property def key(self): try: return self._key except __HOLE__: self._key = key = self.project_name.lower() return key
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution.key
@property def version(self): try: return self._version except __HOLE__: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self)
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution.version
@property def _dep_map(self): try: return self.__dep_map except __HOLE__: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs=[] elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution._dep_map
def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except __HOLE__: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps
KeyError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution.requires
def __str__(self): try: version = getattr(self, 'version', None) except __HOLE__: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version)
ValueError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution.__str__
def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except __HOLE__: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution.get_entry_map
def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: break elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p+1) except __HOLE__: break else: del npath[np], path[np] # ha! p = np return
ValueError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution.insert_on
def has_version(self): try: self.version except __HOLE__: issue_warning("Unbuilt egg for " + repr(self)) return False return True
ValueError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/Distribution.has_version
@property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except __HOLE__: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/DistInfoDistribution._parsed_pkg_info
@property def _dep_map(self): try: return self.__dep_map except __HOLE__: self.__dep_map = self._compute_dependencies() return self.__dep_map
AttributeError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/DistInfoDistribution._dep_map
def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except __HOLE__: pass warnings.warn(stacklevel=level + 1, *args, **kw)
ValueError
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/issue_warning
def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except __HOLE__: msg = "\\ must not appear on the last nonblank line" raise RequirementParseError(msg) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise RequirementParseError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise RequirementParseError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise RequirementParseError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras)
StopIteration
dataset/ETHPy150Open anzev/hedwig/build/pip/pip/_vendor/pkg_resources/__init__.py/parse_requirements
def testSystem( self ): """test system and environment functionality""" org = os.environ self._testCmds( javashell._shellEnv, testCmds, "default" ) # trigger initialization of environment os.environ[ key ] = value assert org.get( key, None ) == value, \ "expected stub to have %s set" % key assert os.environ.get( key, None ) == value, \ "expected real os.environment to have %s set" % key # if environment is initialized and jython gets ARGS=-i, it thinks # it is running in interactive mode, and fails to exit until # process.getOutputStream().close() try: del os.environ[ "ARGS" ] except __HOLE__: pass # test system using the non-default environment self._testCmds( javashell._shellEnv, testCmds, "initialized" ) assert os.environ.has_key( "PATH" ), \ "expected environment to have PATH attribute " \ "(this may not apply to all platforms!)"
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_javashell.py/JavaShellTest.testSystem
def chunk_assertion_blocks(text): chunks = [] buffer = None for lineno, line in enumerate(text.splitlines()): if not line.startswith('::'): if buffer: buffer[-1].append(line) continue tokens = line[2:].split() try: pragma, args = tokens[0], tokens[1:] if pragma == 'test': if buffer: raise ChunkError("test out of order") chunknum = len(chunks) + 1 if args: title = ' '.join(args) else: title = str(chunknum) buffer = [chunknum, title, []] continue elif pragma == 'endtest': if not buffer or len(buffer) == 3: raise ChunkError("endtest out of order") buffer[2] = ' '.join(buffer[2]).strip() buffer[3] = ' '.join(buffer[3]).strip() chunks.append(buffer) buffer = None continue elif pragma == 'eq': if not buffer or len(buffer) > 3: raise ChunkError("eq out of order") buffer.append([]) else: raise ChunkError("unknown pragma" + pragma) except (ChunkError, __HOLE__), exc: lineno += 1 arg = exc.args[0] if exc.args else '' raise AssertionError( "Invalid testing chunk specification: %s\n" "line %s:\n%r" % ( arg, lineno, line)) return chunks
IndexError
dataset/ETHPy150Open jek/flatland/tests/genshi/_util.py/chunk_assertion_blocks
def get_handler(self, *args, **options): if int(options['verbosity']) < 1: handler = WSGIHandler() else: handler = DevServerHandler() # AdminMediaHandler is removed in Django 1.5 # Add it only when it avialable. try: from django.core.servers.basehttp import AdminMediaHandler except __HOLE__: pass else: handler = AdminMediaHandler( handler, options['admin_media_path']) if 'django.contrib.staticfiles' in settings.INSTALLED_APPS and options['use_static_files']: from django.contrib.staticfiles.handlers import StaticFilesHandler handler = StaticFilesHandler(handler) return handler
ImportError
dataset/ETHPy150Open dcramer/django-devserver/devserver/management/commands/runserver.py/Command.get_handler
def inner_run(self, *args, **options): # Flag the server as active from devserver import settings import devserver settings.DEVSERVER_ACTIVE = True settings.DEBUG = True from django.conf import settings from django.utils import translation shutdown_message = options.get('shutdown_message', '') use_werkzeug = options.get('use_werkzeug', False) quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C' wsgi_app = options.get('wsgi_app', None) if use_werkzeug: try: from werkzeug import run_simple, DebuggedApplication except ImportError, e: self.stderr.write("WARNING: Unable to initialize werkzeug: %s\n" % e) use_werkzeug = False else: from django.views import debug debug.technical_500_response = null_technical_500_response self.stdout.write("Validating models...\n\n") self.validate(display_num_errors=True) self.stdout.write(( "Django version %(version)s, using settings %(settings)r\n" "Running django-devserver %(devserver_version)s\n" "%(server_model)s %(server_type)s server is running at http://%(addr)s:%(port)s/\n" "Quit the server with %(quit_command)s.\n" ) % { "server_type": use_werkzeug and 'werkzeug' or 'Django', "server_model": options['use_forked'] and 'Forked' or 'Threaded', "version": self.get_version(), "devserver_version": devserver.get_version(), "settings": settings.SETTINGS_MODULE, "addr": self._raw_ipv6 and '[%s]' % self.addr or self.addr, "port": self.port, "quit_command": quit_command, }) # django.core.management.base forces the locale to en-us. We should # set it up correctly for the first request (particularly important # in the "--noreload" case). translation.activate(settings.LANGUAGE_CODE) app = self.get_handler(*args, **options) if wsgi_app: self.stdout.write("Using WSGI application %r\n" % wsgi_app) if os.path.exists(os.path.abspath(wsgi_app)): # load from file app = imp.load_source('wsgi_app', os.path.abspath(wsgi_app)).application else: try: app = __import__(wsgi_app, {}, {}, ['application']).application except (ImportError, AttributeError): raise if options['use_forked']: mixin = SocketServer.ForkingMixIn else: mixin = SocketServer.ThreadingMixIn middleware = getattr(settings, 'DEVSERVER_WSGI_MIDDLEWARE', []) for middleware in middleware: module, class_name = middleware.rsplit('.', 1) app = getattr(__import__(module, {}, {}, [class_name]), class_name)(app) if options['use_dozer']: from dozer import Dozer app = Dozer(app) try: if use_werkzeug: run_simple( self.addr, int(self.port), DebuggedApplication(app, True), use_reloader=False, use_debugger=True) else: run(self.addr, int(self.port), app, mixin, ipv6=self.use_ipv6) except wsgi_server_exc_cls, e: # Use helpful error messages instead of ugly tracebacks. ERRORS = { errno.EACCES: "You don't have permission to access that port.", errno.EADDRINUSE: "That port is already in use.", errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.", } if not isinstance(e, socket.error): # Django < 1.6 ERRORS[13] = ERRORS.pop(errno.EACCES) ERRORS[98] = ERRORS.pop(errno.EADDRINUSE) ERRORS[99] = ERRORS.pop(errno.EADDRNOTAVAIL) try: if not isinstance(e, socket.error): # Django < 1.6 error_text = ERRORS[e.args[0].args[0]] else: error_text = ERRORS[e.errno] except (AttributeError, KeyError): error_text = str(e) sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n') # Need to use an OS exit because sys.exit doesn't work in a thread os._exit(1) except __HOLE__: if shutdown_message: self.stdout.write("%s\n" % shutdown_message) sys.exit(0)
KeyboardInterrupt
dataset/ETHPy150Open dcramer/django-devserver/devserver/management/commands/runserver.py/Command.inner_run
def get_next_adapter(self, intr_type='eth'): """ method to get the next adapter we'll want to always return an adapter with a 0 alias take the highest primary if exists, increment by 1 and return :param type: The type of network adapter :type type: str :return: 3 strings 'adapter_name', 'primary_number', 'alias_number' """ if self.staticinterface_set.count() == 0: return intr_type, '0', '0' else: primary_list = [] for i in self.staticinterface_set.all(): i.update_attrs() try: primary_list.append(int(i.attrs.primary)) except __HOLE__, e: continue ## sort and reverse the list to get the highest ## perhaps someday come up with the lowest available ## this should work for now primary_list.sort() primary_list.reverse() if not primary_list: return intr_type, '0', '0' else: return intr_type, str(primary_list[0] + 1), '0'
AttributeError
dataset/ETHPy150Open rtucker-mozilla/mozilla_inventory/systems/models.py/System.get_next_adapter
def read_handler(self): while 1: try: rl = self._poller.poll(0) except __HOLE__ as why: if why.errno == EINTR: continue else: raise else: break readysocks = [] for fd, flags in rl: sock = self.smap.get(fd) if sock is not None: readysocks.append(sock) self._pyrod.events(readysocks) self.update()
IOError
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/remote/pyro.py/PyroAsyncAdapter.read_handler
def collect_placement(self, it, app): _log.analyze(self._node.id, "+ BEGIN", {}, tb=True) if app._collect_placement_cb: app._collect_placement_cb.cancel() app._collect_placement_cb = None try: while True: _log.analyze(self._node.id, "+ ITER", {}) actor_node_id = it.next() app._collect_placement_last_value = app._collect_placement_counter app.actor_placement.setdefault(actor_node_id[0], set([])).add(actor_node_id[1]) except dynops.PauseIteration: _log.analyze(self._node.id, "+ PAUSED", {'counter': app._collect_placement_counter, 'last_value': app._collect_placement_last_value, 'diff': app._collect_placement_counter - app._collect_placement_last_value}) # FIXME the dynops should be self triggering, but is not... # This is a temporary fix by keep trying delay = 0.0 if app._collect_placement_counter > app._collect_placement_last_value + 100 else 0.2 app._collect_placement_counter += 1 app._collect_placement_cb = async.DelayedCall(delay, self.collect_placement, it=it, app=app) return except __HOLE__: if not app.done_final: app.done_final = True # all possible actor placements derived _log.analyze(self._node.id, "+ ALL", {}) self._app_requirements(app) _log.analyze(self._node.id, "+ END", {}) except: _log.exception("appmanager:collect_placement") ### DEPLOYMENT ###
StopIteration
dataset/ETHPy150Open EricssonResearch/calvin-base/calvin/runtime/north/appmanager.py/AppManager.collect_placement
def select_actor(self, out_iter, kwargs, final, comp_name_desc): _log.analyze(self.node.id, "+", {'comp_name_desc': comp_name_desc}, tb=True) if final[0] and not kwargs['done']: kwargs['done'] = True for name, desc_list in kwargs['priority'].iteritems(): if desc_list: out_iter.append(desc_list[0]) out_iter.final() return desc = comp_name_desc[1] try: # List of (found, is_primitive, info) actor_types = [self.actorstore.lookup(actor['actor_type']) for actor in desc['component']['structure']['actors'].values()] except __HOLE__: actor_types = [] # Not a component, shadow actor candidate, likely kwargs['priority'][comp_name_desc[0]].insert(0, comp_name_desc) comp_name_desc[1]['shadow_actor'] = True return except Exception as e: # FIXME Handled when security verification failed _log.exception("select_actor desc: %s" % desc) raise e if all([a[0] and a[1] for a in actor_types]): # All found and primitive (quite unlikely), insert after any primitive shadow actors in priority index = len([1 for a in kwargs['priority'][comp_name_desc[0]] if 'shadow_actor' in a[1]]) kwargs['priority'][comp_name_desc[0]].insert(index, comp_name_desc) comp_name_desc[1]['shadow_component'] = actor_types return # A component containing shadow actors # TODO Dig deeper to priorities between shadow components, now just insert in order kwargs['priority'][comp_name_desc[0]].append(comp_name_desc) comp_name_desc[1]['shadow_component'] = actor_types
KeyError
dataset/ETHPy150Open EricssonResearch/calvin-base/calvin/runtime/north/appmanager.py/Deployer.select_actor
def deploy_unhandled_actors(self, comp_name_desc): while True: try: name, desc = comp_name_desc.next() _log.analyze(self.node.id, "+", {'name': name, 'desc': desc}, tb=True) except __HOLE__: # Done if self._deploy_cont_done: return self._deploy_cont_done = True self.group_components() _log.analyze(self.node.id, "+ DONE", {'deployable': self.deployable, 'components': self.components}) self._deploy_cont() return except dynops.PauseIteration: return if 'shadow_actor' in desc: _log.analyze(self.node.id, "+ SHADOW ACTOR", {'name': name}) # It was a normal primitive shadow actor, just instanciate req = self.get_req(name) info = self.deployable['actors'][name] actor_id = self.instantiate_primitive(name, info['actor_type'], info['args'], req, info['signature']) if not actor_id: _log.error("Second phase, could not make shadow actor %s!" % info['actor_type']) self.actor_map[name] = actor_id self.node.app_manager.add(self.app_id, actor_id) elif 'shadow_component' in desc: _log.analyze(self.node.id, "+ SHADOW COMPONENT", {'name': name}) # A component that needs to be broken up into individual primitive actors # First get the info and remove the component req = self.get_req(name) info = self.deployable['actors'][name] self.deployable['actors'].pop(name) # Then add the new primitive actors for actor_name, actor_desc in desc['component']['structure']['actors'].iteritems(): args = {k: v[1] if v[0] == 'VALUE' else info['args'][v[1]] for k, v in actor_desc['args'].iteritems()} inports = [c['dst_port'] for c in desc['component']['structure']['connections'] if c['dst'] == actor_name] outports = [c['src_port'] for c in desc['component']['structure']['connections'] if c['src'] == actor_name] sign_desc = {'is_primitive': True, 'actor_type': actor_desc['actor_type'], 'inports': inports[:], 'outports': outports[:]} sign = GlobalStore.actor_signature(sign_desc) self.deployable['actors'][name + ":" + actor_name] = {'args': args, 'actor_type': actor_desc['actor_type'], 'signature_desc': sign_desc, 'signature': sign} # Replace component connections with actor connection # outports comp_outports = [(c['dst_port'], c['src_port']) for c in desc['component']['structure']['connections'] if c['src'] == actor_name and c['dst'] == "."] for c_port, a_port in comp_outports: if (name + "." + c_port) in self.deployable['connections']: self.deployable['connections'][name + ":" + actor_name + "." + a_port] = \ self.deployable['connections'].pop(name + "." + c_port) # inports comp_inports = [(c['src_port'], c['dst_port']) for c in desc['component']['structure']['connections'] if c['dst'] == actor_name and c['src'] == "."] for outport, ports in self.deployable['connections'].iteritems(): for c_inport, a_inport in comp_inports: if (name + "." + c_inport) in ports: ports.remove(name + "." + c_inport) ports.append(name + ":" + actor_name + "." + a_inport) _log.analyze(self.node.id, "+ REPLACED PORTS", {'comp_outports': comp_outports, 'comp_inports': comp_inports, 'actor_name': actor_name, 'connections': self.deployable['connections']}) # Add any new component internal connections (enough with outports) for connection in desc['component']['structure']['connections']: if connection['src'] == actor_name and connection['src_port'] in outports and connection['dst'] != ".": self.deployable['connections'].setdefault( name + ":" + actor_name + "." + connection['src_port'], []).append( name + ":" + connection['dst'] + "." + connection['dst_port']) _log.analyze(self.node.id, "+ ADDED PORTS", {'connections': self.deployable['connections']}) # Instanciate it actor_id = self.instantiate_primitive(name + ":" + actor_name, actor_desc['actor_type'], args, req, sign) if not actor_id: _log.error("Third phase, could not make shadow actor %s!" % info['actor_type']) self.actor_map[name + ":" + actor_name] = actor_id self.node.app_manager.add(self.app_id, actor_id)
StopIteration
dataset/ETHPy150Open EricssonResearch/calvin-base/calvin/runtime/north/appmanager.py/Deployer.deploy_unhandled_actors
def set(self, value): """Assign the native and Unicode value. :returns: True if adaptation of *value* was successful. Attempts to adapt the given value and assigns this element's :attr:`~flatland.Element.value` and :attr:`u` attributes in tandem. If adaptation succeeds, ``.value`` will contain the :meth:`adapted<adapt>` native Python value and ``.u`` will contain a Unicode :meth:`serialized<serialize>` version of it. A native value of ``None`` will be represented as ``u''`` in ``.u``. If adaptation fails, ``.value`` will be ``None`` and ``.u`` will contain ``unicode(value)`` or ``u''`` for none. """ try: # adapt and normalize the value, if possible value = self.value = self.adapt(value) except AdaptationError: self.value = None if value is None: self.u = u'' elif isinstance(value, unicode): self.u = value else: try: self.u = unicode(value) except __HOLE__: self.u = unicode(value, errors='replace') return False # stringify it, possibly storing what we received verbatim or a # normalized version of it. if value is None: self.u = u'' else: self.u = self.serialize(value) return True
UnicodeDecodeError
dataset/ETHPy150Open jek/flatland/flatland/schema/scalars.py/Scalar.set
def adapt(self, value): """Generic numeric coercion. :returns: an instance of :attr:`type_` or ``None`` Attempt to convert *value* using the class's :attr:`type_` callable. """ if value is None: return None if isinstance(value, basestring): value = value.strip() # decimal.Decimal doesn't like whitespace try: native = self.type_(value) except (ValueError, __HOLE__, ArithmeticError): raise AdaptationError() else: if not self.signed: if native < 0: raise AdaptationError() return native
TypeError
dataset/ETHPy150Open jek/flatland/flatland/schema/scalars.py/Number.adapt
def adapt(self, value): """Coerces value to a native type. If *value* is an instance of :attr:`type_`, returns it unchanged. If a string, attempts to parse it and construct a :attr:`type` as described in the attribute documentation. """ if value is None: return value elif isinstance(value, self.type_): return value elif isinstance(value, basestring): if self.strip: value = value.strip() match = self.regex.match(value) if not match: raise AdaptationError() try: args = [int(match.group(f)) for f in self.used] return self.type_(*args) except (TypeError, __HOLE__): raise AdaptationError() else: raise AdaptationError()
ValueError
dataset/ETHPy150Open jek/flatland/flatland/schema/scalars.py/Temporal.adapt
def test_soft_remove_silent_on_no_file(self): try: util.remove(self.path + 'XXX', True) except __HOLE__: self.fail(u'OSError when removing path')
OSError
dataset/ETHPy150Open beetbox/beets/test/test_files.py/SoftRemoveTest.test_soft_remove_silent_on_no_file
def find_modules(modules_dir): try: return [f[:-3] for f in os.listdir(modules_dir) if not f.startswith('_') and f.endswith('.py')] except __HOLE__: return []
OSError
dataset/ETHPy150Open felinx/d3status/d3status/libs/utils.py/find_modules
@exceptions_handled @marshal_with(responses_v2_1.MaintenanceMode) def post(self, maintenance_action, **_): maintenance_file_path = get_maintenance_file_path() if maintenance_action == 'activate': if os.path.isfile(maintenance_file_path): state = utils.read_json_file(maintenance_file_path) return state, 304 now = str(datetime.now()) try: user = rest_security.get_username() except __HOLE__: user = '' remaining_executions = get_running_executions() utils.mkdirs(config.instance().maintenance_folder) new_state = prepare_maintenance_dict( status=MAINTENANCE_MODE_ACTIVATING, activation_requested_at=now, remaining_executions=remaining_executions, requested_by=user) utils.write_dict_to_json_file(maintenance_file_path, new_state) return new_state if maintenance_action == 'deactivate': if not os.path.isfile(maintenance_file_path): return prepare_maintenance_dict( MAINTENANCE_MODE_DEACTIVATED), 304 os.remove(maintenance_file_path) return prepare_maintenance_dict(MAINTENANCE_MODE_DEACTIVATED) valid_actions = ['activate', 'deactivate'] raise BadParametersError( 'Invalid action: {0}, Valid action ' 'values are: {1}'.format(maintenance_action, valid_actions))
AttributeError
dataset/ETHPy150Open cloudify-cosmo/cloudify-manager/rest-service/manager_rest/resources_v2_1.py/MaintenanceModeAction.post
def test_sda(): skip.skip_if_no_data() yaml_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) save_path = os.path.dirname(os.path.realpath(__file__)) train_layer1(yaml_file_path, save_path) train_layer2(yaml_file_path, save_path) train_mlp(yaml_file_path, save_path) try: os.remove("{}/dae_l1.pkl".format(save_path)) os.remove("{}/dae_l2.pkl".format(save_path)) except __HOLE__: pass
OSError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/scripts/tutorials/stacked_autoencoders/tests/test_dae.py/test_sda
def check_pid(self, pid): if os.name == 'nt': try: import ctypes # returns 0 if no such process (of ours) exists # positive int otherwise p = ctypes.windll.kernel32.OpenProcess(1,0,pid) except Exception: self.log.warn( "Could not determine whether pid %i is running via `OpenProcess`. " " Making the likely assumption that it is."%pid ) return True return bool(p) else: try: p = Popen(['ps','x'], stdout=PIPE, stderr=PIPE) output,_ = p.communicate() except __HOLE__: self.log.warn( "Could not determine whether pid %i is running via `ps x`. " " Making the likely assumption that it is."%pid ) return True pids = list(map(int, re.findall(r'^\W*\d+', output, re.MULTILINE))) return pid in pids
OSError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/parallel/apps/baseapp.py/BaseParallelApplication.check_pid
@title.setter def title(self, value): """Set a sheet title, ensuring it is valid. Limited to 31 characters, no special characters.""" if hasattr(value, "decode"): if not isinstance(value, unicode): try: value = value.decode("ascii") except __HOLE__: raise ValueError("Worksheet titles must be unicode") if self.bad_title_char_re.search(value): msg = 'Invalid character found in sheet title' raise SheetTitleException(msg) sheets = self._parent.get_sheet_names() if self.title is not None and self.title != value: value = avoid_duplicate_name(sheets, value) if len(value) > 31: msg = 'Maximum 31 characters allowed in sheet title' raise SheetTitleException(msg) self._title = value
UnicodeDecodeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/worksheet/worksheet.py/Worksheet.title
def nrhead(): try: import newrelic.agent except __HOLE__: return '' return newrelic.agent.get_browser_timing_header()
ImportError
dataset/ETHPy150Open jupyter/nbviewer/nbviewer/app.py/nrhead
def nrfoot(): try: import newrelic.agent except __HOLE__: return '' return newrelic.agent.get_browser_timing_footer()
ImportError
dataset/ETHPy150Open jupyter/nbviewer/nbviewer/app.py/nrfoot
def run_cmd(cmd, attempts=1): """ Runs a command attempts times, logging its output. Returns True if it succeeds once, or False if it never does. """ try: for i in range(attempts): proc = subprocess.Popen(cmd, stdin=open(devnull, "r")) proc.wait() if not proc.returncode == 0: print("Command ", str(cmd), " failed") else: print("Command ", str(cmd), " succeeded.") return True sleep(3) return False except __HOLE__ as e: print("Error while attempting to run command ", cmd) print(e)
OSError
dataset/ETHPy150Open CiscoCloud/mantl/testing/build-cluster.py/run_cmd
def _close_callback(self): """Callback called when redis closed the connection. The callback queue is emptied and we call each callback found with None or with an exception object to wake up blocked client. """ while True: try: callback = self.__callback_queue.popleft() callback(ConnectionError("closed connection")) except __HOLE__: break if self.subscribed: # pubsub clients self._reply_list.append(ConnectionError("closed connection")) self._condition.notify_all()
IndexError
dataset/ETHPy150Open thefab/tornadis/tornadis/client.py/Client._close_callback