function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def version(self): com_list = [self.xmlsec, "--version"] pof = Popen(com_list, stderr=PIPE, stdout=PIPE) try: return pof.stdout.read().split(" ")[1] except __HOLE__: return ""
IndexError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/CryptoBackendXmlSec1.version
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr): """ Validate signature on XML document. :param signedtext: The XML document as a string :param cert_file: The public key that was used to sign the document :param cert_type: The file type of the certificate :param node_name: The name of the class that is signed :param node_id: The identifier of the node :param id_attr: Should normally be one of "id", "Id" or "ID" :return: Boolean True if the signature was correct otherwise False. """ _, fil = make_temp(signedtext, suffix=".xml", decode=False, delete=self._xmlsec_delete_tmpfiles) com_list = [self.xmlsec, "--verify", "--pubkey-cert-%s" % cert_type, cert_file, "--id-attr:%s" % id_attr, node_name] if self.debug: com_list.append("--store-signatures") if node_id: com_list.extend(["--node-id", node_id]) if self.__DEBUG: try: print " ".join(com_list) except __HOLE__: print "cert_type", cert_type print "cert_file", cert_file print "node_name", node_name print "fil", fil raise print "%s: %s" % (cert_file, os.access(cert_file, os.F_OK)) print "%s: %s" % (fil, os.access(fil, os.F_OK)) (_stdout, stderr, _output) = self._run_xmlsec(com_list, [fil], exception=SignatureError) return parse_xmlsec_output(stderr)
TypeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/CryptoBackendXmlSec1.validate_signature
def security_context(conf, debug=None): """ Creates a security context based on the configuration :param conf: The configuration :return: A SecurityContext instance """ if not conf: return None if debug is None: try: debug = conf.debug except AttributeError: pass try: metadata = conf.metadata except __HOLE__: metadata = None _only_md = conf.only_use_keys_in_metadata if _only_md is None: _only_md = False if conf.crypto_backend == 'xmlsec1': xmlsec_binary = conf.xmlsec_binary if not xmlsec_binary: try: _path = conf.xmlsec_path except AttributeError: _path = [] xmlsec_binary = get_xmlsec_binary(_path) # verify that xmlsec is where it's supposed to be if not os.path.exists(xmlsec_binary): #if not os.access(, os.F_OK): raise SigverError( "xmlsec binary not in '%s' !" % xmlsec_binary) crypto = _get_xmlsec_cryptobackend(xmlsec_binary, debug=debug) elif conf.crypto_backend == 'XMLSecurity': # new and somewhat untested pyXMLSecurity crypto backend. crypto = CryptoBackendXMLSecurity(debug=debug) else: raise SigverError('Unknown crypto_backend %s' % ( repr(conf.crypto_backend))) return SecurityContext( crypto, conf.key_file, cert_file=conf.cert_file, metadata=metadata, debug=debug, only_use_keys_in_metadata=_only_md, cert_handler_extra_class=conf.cert_handler_extra_class, generate_cert_info=conf.generate_cert_info, tmp_cert_file=conf.tmp_cert_file, tmp_key_file=conf.tmp_key_file, validate_certificate=conf.validate_certificate, key_file_passphrase=conf.key_file_passphrase)
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/security_context
def _check_signature(self, decoded_xml, item, node_name=NODE_NAME, origdoc=None, id_attr="", must=False, only_valid_cert=False): #print item try: issuer = item.issuer.text.strip() except __HOLE__: issuer = None # More trust in certs from metadata then certs in the XML document if self.metadata: try: _certs = self.metadata.certs(issuer, "any", "signing") except KeyError: _certs = [] certs = [] for cert in _certs: if isinstance(cert, basestring): certs.append(make_temp(pem_format(cert), suffix=".pem", decode=False, delete=self._xmlsec_delete_tmpfiles)) else: certs.append(cert) else: certs = [] if not certs and not self.only_use_keys_in_metadata: logger.debug("==== Certs from instance ====") certs = [make_temp(pem_format(cert), suffix=".pem", decode=False, delete=self._xmlsec_delete_tmpfiles) for cert in cert_from_instance(item)] else: logger.debug("==== Certs from metadata ==== %s: %s ====" % (issuer, certs)) if not certs: raise MissingKey("%s" % issuer) #print certs verified = False last_pem_file = None for _, pem_file in certs: try: last_pem_file = pem_file if origdoc is not None: try: if self.verify_signature(origdoc, pem_file, node_name=node_name, node_id=item.id, id_attr=id_attr): verified = True break except Exception: if self.verify_signature(decoded_xml, pem_file, node_name=node_name, node_id=item.id, id_attr=id_attr): verified = True break else: if self.verify_signature(decoded_xml, pem_file, node_name=node_name, node_id=item.id, id_attr=id_attr): verified = True break except XmlsecError, exc: logger.error("check_sig: %s" % exc) pass except SignatureError, exc: logger.error("check_sig: %s" % exc) pass except Exception, exc: logger.error("check_sig: %s" % exc) raise if (not verified) and (not only_valid_cert): raise SignatureError("Failed to verify signature") else: if not self.cert_handler.verify_cert(last_pem_file): raise CertificateError("Invalid certificate!") return item
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/SecurityContext._check_signature
def correctly_signed_message(self, decoded_xml, msgtype, must=False, origdoc=None, only_valid_cert=False): """Check if a request is correctly signed, if we have metadata for the entity that sent the info use that, if not use the key that are in the message if any. :param decoded_xml: The SAML message as an XML infoset (a string) :param msgtype: SAML protocol message type :param must: Whether there must be a signature :param origdoc: :return: """ try: _func = getattr(samlp, "%s_from_string" % msgtype) except __HOLE__: _func = getattr(saml, "%s_from_string" % msgtype) msg = _func(decoded_xml) if not msg: raise TypeError("Not a %s" % msgtype) if not msg.signature: if must: raise SignatureError("Required signature missing on %s" % msgtype) else: return msg return self._check_signature(decoded_xml, msg, class_name(msg), origdoc, must=must, only_valid_cert=only_valid_cert)
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/SecurityContext.correctly_signed_message
def test_constants_only(): try: from pants.constants_only.constants import VALID_IDENTIFIERS # noqa except __HOLE__ as e: assert False, 'Failed to correctly generate python package: %s' % e
ImportError
dataset/ETHPy150Open pantsbuild/pants/testprojects/tests/python/pants/constants_only/test_constants_only.py/test_constants_only
def test_foreign_key_cross_database_protection(self): "Foreign keys can cross databases if they two databases have a common source" # Create a book and author on the default database pro = Book.objects.using('default').create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('default').create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Set a foreign key with an object from a different database try: dive.editor = marty except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Set a foreign key set with an object from a different database try: marty.edited.set([pro, dive], bulk=False) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Assignment implies a save, so database assignments of original objects have changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'default') self.assertEqual(mark._state.db, 'other') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Add to a foreign key set with an object from a different database try: marty.edited.add(dive, bulk=False) except __HOLE__: self.fail("Assignment across primary/replica databases with a common source should be ok") # Add implies a save, so database assignments of original objects have changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'default') self.assertEqual(mark._state.db, 'other') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') # If you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. chris = Person(name="Chris Mills") html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15)) # initially, no db assigned self.assertEqual(chris._state.db, None) self.assertEqual(html5._state.db, None) # old object comes from 'other', so the new object is set to use the # source of 'other'... self.assertEqual(dive._state.db, 'other') chris.save() dive.editor = chris html5.editor = mark self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') self.assertEqual(chris._state.db, 'default') self.assertEqual(html5._state.db, 'default') # This also works if you assign the FK in the constructor water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark) self.assertEqual(water._state.db, 'default') # For the remainder of this test, create a copy of 'mark' in the # 'default' database to prevent integrity errors on backends that # don't defer constraints checks until the end of the transaction mark.save(using='default') # This moved 'mark' in the 'default' database, move it back in 'other' mark.save(using='other') self.assertEqual(mark._state.db, 'other') # If you create an object through a FK relation, it will be # written to the write database, even if the original object # was on the read database cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15)) self.assertEqual(cheesecake._state.db, 'default') # Same goes for get_or_create, regardless of whether getting or creating cheesecake, created = mark.edited.get_or_create( title='Dive into Cheesecake', published=datetime.date(2010, 3, 15), ) self.assertEqual(cheesecake._state.db, 'default') puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15)) self.assertEqual(puddles._state.db, 'default')
ValueError
dataset/ETHPy150Open django/django/tests/multiple_database/tests.py/RouterTestCase.test_foreign_key_cross_database_protection
def test_m2m_cross_database_protection(self): "M2M relations can cross databases if the database share a source" # Create books and authors on the inverse to the usual database pro = Book.objects.using('other').create(pk=1, title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('other').create(pk=1, name="Marty Alchin") dive = Book.objects.using('default').create(pk=2, title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim") # Now save back onto the usual database. # This simulates primary/replica - the objects exist on both database, # but the _state.db is as it is for all other tests. pro.save(using='default') marty.save(using='default') dive.save(using='other') mark.save(using='other') # Check that we have 2 of both types of object on both databases self.assertEqual(Book.objects.using('default').count(), 2) self.assertEqual(Book.objects.using('other').count(), 2) self.assertEqual(Person.objects.using('default').count(), 2) self.assertEqual(Person.objects.using('other').count(), 2) # Set a m2m set with an object from a different database try: marty.book_set.set([pro, dive]) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 2) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() # Add to an m2m with an object from a different database try: marty.book_set.add(dive) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() # Set a reverse m2m with an object from a different database try: dive.authors.set([mark, marty]) except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 2) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() self.assertEqual(Book.authors.through.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Add to a reverse m2m with an object from a different database try: dive.authors.add(marty) except __HOLE__: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # If you create an object through a M2M relation, it will be # written to the write database, even if the original object # was on the read database alice = dive.authors.create(name='Alice') self.assertEqual(alice._state.db, 'default') # Same goes for get_or_create, regardless of whether getting or creating alice, created = dive.authors.get_or_create(name='Alice') self.assertEqual(alice._state.db, 'default') bob, created = dive.authors.get_or_create(name='Bob') self.assertEqual(bob._state.db, 'default')
ValueError
dataset/ETHPy150Open django/django/tests/multiple_database/tests.py/RouterTestCase.test_m2m_cross_database_protection
def test_o2o_cross_database_protection(self): "Operations that involve sharing FK objects across databases raise an error" # Create a user and profile on the default database alice = User.objects.db_manager('default').create_user('alice', '[email protected]') # Create a user and profile on the other database bob = User.objects.db_manager('other').create_user('bob', '[email protected]') # Set a one-to-one relation with an object from a different database alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate') try: bob.userprofile = alice_profile except __HOLE__: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(alice._state.db, 'default') self.assertEqual(alice_profile._state.db, 'default') self.assertEqual(bob._state.db, 'other') # ... but they will when the affected object is saved. bob.save() self.assertEqual(bob._state.db, 'default')
ValueError
dataset/ETHPy150Open django/django/tests/multiple_database/tests.py/RouterTestCase.test_o2o_cross_database_protection
def test_generic_key_cross_database_protection(self): "Generic Key operations can span databases if they share a source" # Create a book and author on the default database pro = Book.objects.using( 'default').create(title="Pro Django", published=datetime.date(2008, 12, 16)) review1 = Review.objects.using( 'default').create(source="Python Monthly", content_object=pro) # Create a book and author on the other database dive = Book.objects.using( 'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) review2 = Review.objects.using( 'other').create(source="Python Weekly", content_object=dive) # Set a generic foreign key with an object from a different database try: review1.content_object = dive except ValueError: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(pro._state.db, 'default') self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(review2._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Add to a generic foreign key set with an object from a different database try: dive.reviews.add(review1) except __HOLE__: self.fail("Assignment across primary/replica databases with a common source should be ok") # Database assignments of original objects haven't changed... self.assertEqual(pro._state.db, 'default') self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(review2._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved try: Book.objects.using('default').get(title='Dive into Python').delete() except Book.DoesNotExist: self.fail('Source database should have a copy of saved object') # BUT! if you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. review3 = Review(source="Python Daily") # initially, no db assigned self.assertEqual(review3._state.db, None) # Dive comes from 'other', so review3 is set to use the source of 'other'... review3.content_object = dive self.assertEqual(review3._state.db, 'default') # If you create an object through a M2M relation, it will be # written to the write database, even if the original object # was on the read database dive = Book.objects.using('other').get(title='Dive into Python') nyt = dive.reviews.create(source="New York Times", content_object=dive) self.assertEqual(nyt._state.db, 'default')
ValueError
dataset/ETHPy150Open django/django/tests/multiple_database/tests.py/RouterTestCase.test_generic_key_cross_database_protection
def get_owtf_transactions(self, hash_list): transactions_dict = None target_list = self.target.GetIndexedTargets() if target_list: # If there are no targets in db, where are we going to add. OMG transactions_dict = {} host_list = self.target.GetAllInScope('host_name') for request_hash in hash_list: request = request_from_cache(os.path.join(self.cache_dir, request_hash)) response = response_from_cache(os.path.join(self.cache_dir, request_hash)) target_id, request.in_scope = self.derive_target_for_transaction(request, response, target_list, host_list) owtf_transaction = transaction.HTTP_Transaction(timer.Timer()) owtf_transaction.ImportProxyRequestResponse(request, response) try: transactions_dict[target_id].append(owtf_transaction) except __HOLE__: transactions_dict[target_id] = [owtf_transaction] return(transactions_dict)
KeyError
dataset/ETHPy150Open owtf/owtf/framework/http/proxy/transaction_logger.py/TransactionLogger.get_owtf_transactions
def pseudo_run(self): try: while self.poison_q.empty(): if glob.glob(os.path.join(self.cache_dir, "*.rd")): hash_list = self.get_hash_list(self.cache_dir) transactions_dict = self.get_owtf_transactions(hash_list) if transactions_dict: # Make sure you donot have None self.transaction.LogTransactionsFromLogger(transactions_dict) else: time.sleep(2) except __HOLE__: exit(-1)
KeyboardInterrupt
dataset/ETHPy150Open owtf/owtf/framework/http/proxy/transaction_logger.py/TransactionLogger.pseudo_run
def realize(self, args=None, doc=None, progname=None, raise_getopt_errs=True): """Realize a configuration. Optional arguments: args -- the command line arguments, less the program name (default is sys.argv[1:]) doc -- usage message (default is __main__.__doc__) """ # Provide dynamic default method arguments if args is None: args = sys.argv[1:] if progname is None: progname = sys.argv[0] if doc is None: import __main__ doc = __main__.__doc__ self.progname = progname self.doc = doc self.options = [] self.args = [] # Call getopt try: self.options, self.args = getopt.getopt( args, "".join(self.short_options), self.long_options) except getopt.error, msg: if raise_getopt_errs: self.usage(msg) # Check for positional args if self.args and not self.positional_args_allowed: self.usage("positional arguments are not supported") # Process options returned by getopt for opt, arg in self.options: name, handler = self.options_map[opt] if handler is not None: try: arg = handler(arg) except __HOLE__, msg: self.usage("invalid value for %s %r: %s" % (opt, arg, msg)) if name and arg is not None: if getattr(self, name) is not None: self.usage("conflicting command line option %r" % opt) self._set(name, arg, 2) # Process environment variables for envvar in self.environ_map.keys(): name, handler = self.environ_map[envvar] if os.environ.has_key(envvar): value = os.environ[envvar] if handler is not None: try: value = handler(value) except ValueError, msg: self.usage("invalid environment value for %s %r: %s" % (envvar, value, msg)) if name and value is not None: self._set(name, value, 1) if self.configfile is None: if os.getuid() == 0 and self.progname.find("supervisord") > -1: # pragma: no cover self.warnings.warn( 'Supervisord is running as root and it is searching ' 'for its configuration file in default locations ' '(including its current working directory); you ' 'probably want to specify a "-c" argument specifying an ' 'absolute path to a configuration file for improved ' 'security.' ) self.configfile = self.default_configfile() self.process_config_file()
ValueError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/Options.realize
def process_config_file(self, do_usage=True): """Process config file.""" if not hasattr(self.configfile, 'read'): self.here = os.path.abspath(os.path.dirname(self.configfile)) set_here(self.here) try: self.read_config(self.configfile) except __HOLE__, msg: if do_usage: # if this is not called from an RPC method, run usage and exit. self.usage(str(msg)) else: # if this is called from an RPC method, raise an error raise ValueError(msg) # Copy config options to attributes of self. This only fills # in options that aren't already set from the command line. for name, confname in self.names_list: if confname: parts = confname.split(".") obj = self.configroot for part in parts: if obj is None: break # Here AttributeError is not a user error! obj = getattr(obj, part) self._set(name, obj, 0) # Process defaults for name, value in self.default_map.items(): if getattr(self, name) is None: setattr(self, name, value) # Process required options for name, message in self.required_map.items(): if getattr(self, name) is None: self.usage(message)
ValueError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/Options.process_config_file
def get_plugins(self, parser, factory_key, section_prefix): factories = [] for section in parser.sections(): if not section.startswith(section_prefix): continue name = section.split(':', 1)[1] factory_spec = parser.saneget(section, factory_key, None) if factory_spec is None: raise ValueError('section [%s] does not specify a %s' % (section, factory_key)) try: factory = self.import_spec(factory_spec) except __HOLE__: raise ValueError('%s cannot be resolved within [%s]' % ( factory_spec, section)) items = parser.items(section) items.remove((factory_key, factory_spec)) factories.append((name, factory, dict(items))) return factories
ImportError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/Options.get_plugins
def read_config(self, fp): # Clear parse warnings, since we may be re-reading the # config a second time after a reload. self.parse_warnings = [] section = self.configroot.supervisord if not hasattr(fp, 'read'): try: fp = open(fp, 'r') except (IOError, __HOLE__): raise ValueError("could not find config file %s" % fp) parser = UnhosedConfigParser() try: parser.readfp(fp) except ConfigParser.ParsingError, why: raise ValueError(str(why)) if parser.has_section('include'): if not parser.has_option('include', 'files'): raise ValueError(".ini file has [include] section, but no " "files setting") files = parser.get('include', 'files') files = files.split() if hasattr(fp, 'name'): base = os.path.dirname(os.path.abspath(fp.name)) else: base = '.' for pattern in files: pattern = os.path.join(base, pattern) for filename in glob.glob(pattern): self.parse_warnings.append( 'Included extra file "%s" during parsing' % filename) try: parser.read(filename) except ConfigParser.ParsingError, why: raise ValueError(str(why)) sections = parser.sections() if not 'supervisord' in sections: raise ValueError, '.ini file does not include supervisord section' get = parser.getdefault section.minfds = integer(get('minfds', 1024)) section.minprocs = integer(get('minprocs', 200)) directory = get('directory', None) if directory is None: section.directory = None else: section.directory = existing_directory(directory) section.user = get('user', None) section.umask = octal_type(get('umask', '022')) section.logfile = existing_dirpath(get('logfile', 'supervisord.log')) section.logfile_maxbytes = byte_size(get('logfile_maxbytes', '50MB')) section.logfile_backups = integer(get('logfile_backups', 10)) section.loglevel = logging_level(get('loglevel', 'info')) section.pidfile = existing_dirpath(get('pidfile', 'supervisord.pid')) section.subprocpidfile = existing_dirpath(get('subprocpidfile', '')) section.identifier = get('identifier', 'supervisor') section.nodaemon = boolean(get('nodaemon', 'false')) tempdir = tempfile.gettempdir() section.childlogdir = existing_directory(get('childlogdir', tempdir)) section.nocleanup = boolean(get('nocleanup', 'false')) section.strip_ansi = boolean(get('strip_ansi', 'false')) expansions = {'here':self.here} expansions.update(environ_expansions()) environ_str = get('environment', '') environ_str = expand(environ_str, expansions, 'environment') section.environment = dict_of_key_value_pairs(environ_str) # Process rpcinterface plugins before groups to allow custom events to # be registered. section.rpcinterface_factories = self.get_plugins( parser, 'supervisor.rpcinterface_factory', 'rpcinterface:' ) section.process_group_configs = self.process_groups_from_parser(parser) for group in section.process_group_configs: for proc in group.process_configs: env = section.environment.copy() env.update(proc.environment) proc.environment = env section.server_configs = self.server_configs_from_parser(parser) section.profile_options = None return section
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.read_config
def process_groups_from_parser(self, parser): groups = [] all_sections = parser.sections() homogeneous_exclude = [] get = parser.saneget # process heterogeneous groups for section in all_sections: if not section.startswith('group:'): continue group_name = section.split(':', 1)[1] programs = list_of_strings(get(section, 'programs', None)) priority = integer(get(section, 'priority', 999)) group_processes = [] for program in programs: program_section = "program:%s" % program if not program_section in all_sections: raise ValueError( '[%s] names unknown program %s' % (section, program)) homogeneous_exclude.append(program_section) processes = self.processes_from_section(parser, program_section, group_name, ProcessConfig) group_processes.extend(processes) groups.append( ProcessGroupConfig(self, group_name, priority, group_processes) ) # process "normal" homogeneous groups for section in all_sections: if ( (not section.startswith('program:') ) or section in homogeneous_exclude ): continue program_name = section.split(':', 1)[1] priority = integer(get(section, 'priority', 999)) processes=self.processes_from_section(parser, section, program_name, ProcessConfig) groups.append( ProcessGroupConfig(self, program_name, priority, processes) ) # process "event listener" homogeneous groups for section in all_sections: if not section.startswith('eventlistener:'): continue pool_name = section.split(':', 1)[1] # give listeners a "high" default priority so they are started first # and stopped last at mainloop exit priority = integer(get(section, 'priority', -1)) buffer_size = integer(get(section, 'buffer_size', 10)) result_handler = get(section, 'result_handler', 'supervisor.dispatchers:default_handler') try: result_handler = self.import_spec(result_handler) except ImportError: raise ValueError('%s cannot be resolved within [%s]' % ( result_handler, section)) pool_event_names = [x.upper() for x in list_of_strings(get(section, 'events', ''))] pool_event_names = set(pool_event_names) if not pool_event_names: raise ValueError('[%s] section requires an "events" line' % section) from supervisor.events import EventTypes pool_events = [] for pool_event_name in pool_event_names: pool_event = getattr(EventTypes, pool_event_name, None) if pool_event is None: raise ValueError('Unknown event type %s in [%s] events' % (pool_event_name, section)) pool_events.append(pool_event) processes=self.processes_from_section(parser, section, pool_name, EventListenerConfig) groups.append( EventListenerPoolConfig(self, pool_name, priority, processes, buffer_size, pool_events, result_handler) ) # process fastcgi homogeneous groups for section in all_sections: if ( (not section.startswith('fcgi-program:') ) or section in homogeneous_exclude ): continue program_name = section.split(':', 1)[1] priority = integer(get(section, 'priority', 999)) proc_uid = name_to_uid(get(section, 'user', None)) socket_owner = get(section, 'socket_owner', None) if socket_owner is not None: try: socket_owner = colon_separated_user_group(socket_owner) except ValueError: raise ValueError('Invalid socket_owner value %s' % socket_owner) socket_mode = get(section, 'socket_mode', None) if socket_mode is not None: try: socket_mode = octal_type(socket_mode) except (TypeError, ValueError): raise ValueError('Invalid socket_mode value %s' % socket_mode) socket = get(section, 'socket', None) if not socket: raise ValueError('[%s] section requires a "socket" line' % section) expansions = {'here':self.here, 'program_name':program_name} expansions.update(environ_expansions()) socket = expand(socket, expansions, 'socket') try: socket_config = self.parse_fcgi_socket(socket, proc_uid, socket_owner, socket_mode) except __HOLE__, e: raise ValueError('%s in [%s] socket' % (str(e), section)) processes=self.processes_from_section(parser, section, program_name, FastCGIProcessConfig) groups.append( FastCGIGroupConfig(self, program_name, priority, processes, socket_config) ) groups.sort() return groups
ValueError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.process_groups_from_parser
def server_configs_from_parser(self, parser): configs = [] inet_serverdefs = self._parse_servernames(parser, 'inet_http_server') for name, section in inet_serverdefs: config = {} get = parser.saneget config.update(self._parse_username_and_password(parser, section)) config['name'] = name config['family'] = socket.AF_INET port = get(section, 'port', None) if port is None: raise ValueError('section [%s] has no port value' % section) host, port = inet_address(port) config['host'] = host config['port'] = port config['section'] = section configs.append(config) unix_serverdefs = self._parse_servernames(parser, 'unix_http_server') for name, section in unix_serverdefs: config = {} get = parser.saneget sfile = get(section, 'file', None) if sfile is None: raise ValueError('section [%s] has no file value' % section) sfile = sfile.strip() config['name'] = name config['family'] = socket.AF_UNIX sfile = expand(sfile, {'here':self.here}, 'socket file') config['file'] = normalize_path(sfile) config.update(self._parse_username_and_password(parser, section)) chown = get(section, 'chown', None) if chown is not None: try: chown = colon_separated_user_group(chown) except ValueError: raise ValueError('Invalid sockchown value %s' % chown) else: chown = (-1, -1) config['chown'] = chown chmod = get(section, 'chmod', None) if chmod is not None: try: chmod = octal_type(chmod) except (__HOLE__, ValueError): raise ValueError('Invalid chmod value %s' % chmod) else: chmod = 0700 config['chmod'] = chmod config['section'] = section configs.append(config) return configs
TypeError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.server_configs_from_parser
def daemonize(self): # To daemonize, we need to become the leader of our own session # (process) group. If we do not, signals sent to our # parent process will also be sent to us. This might be bad because # signals such as SIGINT can be sent to our parent process during # normal (uninteresting) operations such as when we press Ctrl-C in the # parent terminal window to escape from a logtail command. # To disassociate ourselves from our parent's session group we use # os.setsid. It means "set session id", which has the effect of # disassociating a process from is current session and process group # and setting itself up as a new session leader. # # Unfortunately we cannot call setsid if we're already a session group # leader, so we use "fork" to make a copy of ourselves that is # guaranteed to not be a session group leader. # # We also change directories, set stderr and stdout to null, and # change our umask. # # This explanation was (gratefully) garnered from # http://www.hawklord.uklinux.net/system/daemons/d3.htm pid = os.fork() if pid != 0: # Parent self.logger.blather("supervisord forked; parent exiting") os._exit(0) # Child self.logger.info("daemonizing the supervisord process") if self.directory: try: os.chdir(self.directory) except __HOLE__, err: self.logger.critical("can't chdir into %r: %s" % (self.directory, err)) else: self.logger.info("set current directory: %r" % self.directory) os.close(0) self.stdin = sys.stdin = sys.__stdin__ = open("/dev/null") os.close(1) self.stdout = sys.stdout = sys.__stdout__ = open("/dev/null", "w") os.close(2) self.stderr = sys.stderr = sys.__stderr__ = open("/dev/null", "w") os.setsid() os.umask(self.umask) # XXX Stevens, in his Advanced Unix book, section 13.3 (page # 417) recommends calling umask(0) and closing unused # file descriptors. In his Network Programming book, he # additionally recommends ignoring SIGHUP and forking again # after the setsid() call, for obscure SVR4 reasons.
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.daemonize
def write_pidfile(self): pid = os.getpid() try: f = open(self.pidfile, 'w') f.write('%s\n' % pid) f.close() except (IOError, __HOLE__): self.logger.critical('could not write pidfile %s' % self.pidfile) else: self.logger.info('supervisord started with pid %s' % pid)
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.write_pidfile
def write_subproc_pidfile(self): if not self.subprocpidfile: return try: f = open(self.subprocpidfile, 'w') for pid, process in self.pidhistory.iteritems(): f.write('%s %d %d\n' % (process.config.name, pid, process.laststart)) f.close() except (IOError, __HOLE__): self.logger.critical('could not write sub-process pidfile %s' % self.subprocpidfile) else: self.logger.info('supervisord wrote sub-process pidfile')
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.write_subproc_pidfile
def load_subproc_pidfile(self, process_groups): if not self.subprocpidfile: return resumed_processes = {} try: f = open(self.subprocpidfile, 'r') for line in f: process_name, pid, laststart = line.split() pid = int(pid) laststart = int(laststart) try: os.kill(pid, 0) except: self.logger.info( "pid doesn't exist, can't resume '%s' with pid %d" % (process_name, pid)) else: self.logger.info( "would resume process '%s' with pid %d later" % (process_name, pid)) resumed_processes[process_name] = (pid, laststart) f.close() except (IOError, __HOLE__, ValueError) as e: self.logger.warn('could not load sub-process pidfile %s' % self.subprocpidfile) print type(e) else: self.logger.info('supervisord load sub-process pidfile') for group in process_groups.itervalues(): for process in group.processes.itervalues(): process_name = process.config.name if process_name in resumed_processes: process.pid, process.laststart = resumed_processes[process_name] process.resumed = True process.change_state(ProcessStates.RUNNING) self.add_process(process) del resumed_processes[process_name] self.resumed_pids.add(process.pid) self.logger.info( "success: resumed process '%s' with pid %d" % (process_name, process.pid))
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.load_subproc_pidfile
def cleanup(self): try: for config, server in self.httpservers: if config['family'] == socket.AF_UNIX: if self.unlink_socketfiles: socketname = config['file'] try: os.unlink(socketname) except OSError: pass except OSError: pass try: os.unlink(self.pidfile) except __HOLE__: pass
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.cleanup
def openhttpservers(self, supervisord): try: self.httpservers = self.make_http_servers(supervisord) except socket.error, why: if why[0] == errno.EADDRINUSE: self.usage('Another program is already listening on ' 'a port that one of our HTTP servers is ' 'configured to use. Shut this program ' 'down first before starting supervisord.') else: help = 'Cannot open an HTTP server: socket.error reported' errorname = errno.errorcode.get(why[0]) if errorname is None: self.usage('%s %s' % (help, why[0])) else: self.usage('%s errno.%s (%d)' % (help, errorname, why[0])) self.unlink_socketfiles = False except __HOLE__, why: self.usage(why[0])
ValueError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.openhttpservers
def clear_autochildlogdir(self): # must be called after realize() childlogdir = self.childlogdir fnre = re.compile(r'.+?---%s-\S+\.log\.{0,1}\d{0,4}' % self.identifier) try: filenames = os.listdir(childlogdir) except (__HOLE__, OSError): self.logger.warn('Could not clear childlog dir') return for filename in filenames: if fnre.match(filename): pathname = os.path.join(childlogdir, filename) try: os.remove(pathname) except (OSError, IOError): self.logger.warn('Failed to clean up %r' % pathname)
IOError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.clear_autochildlogdir
def cleanup_fds(self): # try to close any leaked file descriptors (for reload) start = 5 for x in range(start, self.minfds): try: os.close(x) except __HOLE__: pass
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.cleanup_fds
def dropPrivileges(self, user): # Drop root privileges if we have them if user is None: return "No user specified to setuid to!" if os.getuid() != 0: return "Can't drop privilege as nonroot user" try: uid = int(user) except ValueError: try: pwrec = pwd.getpwnam(user) except KeyError: return "Can't find username %r" % user uid = pwrec[2] else: try: pwrec = pwd.getpwuid(uid) except KeyError: return "Can't find uid %r" % uid gid = pwrec[3] if hasattr(os, 'setgroups'): user = pwrec[0] groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]] # always put our primary gid first in this list, otherwise we can # lose group info since sometimes the first group in the setgroups # list gets overwritten on the subsequent setgid call (at least on # freebsd 9 with python 2.7 - this will be safe though for all unix # /python version combos) groups.insert(0, gid) try: os.setgroups(groups) except OSError: return 'Could not set groups of effective user' try: os.setgid(gid) except __HOLE__: return 'Could not set group id of effective user' os.setuid(uid)
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.dropPrivileges
def waitpid(self): # firstly send a signal to all resumed processes to check if they are # still running. resumed process is NOT spawned child process of # supervisord, so the os.waitpid doesn't work. for pid in self.resumed_pids: try: os.kill(pid, 0) except: # got an exception, we blindly consider the process has exited. self.resumed_pids.remove(pid) return pid, 0 # need pthread_sigmask here to avoid concurrent sigchild, but # Python doesn't offer it as it's not standard across UNIX versions. # there is still a race condition here; we can get a sigchild while # we're sitting in the waitpid call. try: pid, sts = os.waitpid(-1, os.WNOHANG) except __HOLE__, why: err = why[0] if err not in (errno.ECHILD, errno.EINTR): self.logger.critical( 'waitpid error; a process may not be cleaned up properly') if err == errno.EINTR: self.logger.blather('EINTR during reap') pid, sts = None, None return pid, sts
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.waitpid
def set_rlimits(self): limits = [] if hasattr(resource, 'RLIMIT_NOFILE'): limits.append( { 'msg':('The minimum number of file descriptors required ' 'to run this process is %(min)s as per the "minfds" ' 'command-line argument or config file setting. ' 'The current environment will only allow you ' 'to open %(hard)s file descriptors. Either raise ' 'the number of usable file descriptors in your ' 'environment (see README.rst) or lower the ' 'minfds setting in the config file to allow ' 'the process to start.'), 'min':self.minfds, 'resource':resource.RLIMIT_NOFILE, 'name':'RLIMIT_NOFILE', }) if hasattr(resource, 'RLIMIT_NPROC'): limits.append( { 'msg':('The minimum number of available processes required ' 'to run this program is %(min)s as per the "minprocs" ' 'command-line argument or config file setting. ' 'The current environment will only allow you ' 'to open %(hard)s processes. Either raise ' 'the number of usable processes in your ' 'environment (see README.rst) or lower the ' 'minprocs setting in the config file to allow ' 'the program to start.'), 'min':self.minprocs, 'resource':resource.RLIMIT_NPROC, 'name':'RLIMIT_NPROC', }) msgs = [] for limit in limits: min = limit['min'] res = limit['resource'] msg = limit['msg'] name = limit['name'] soft, hard = resource.getrlimit(res) if (soft < min) and (soft != -1): # -1 means unlimited if (hard < min) and (hard != -1): # setrlimit should increase the hard limit if we are # root, if not then setrlimit raises and we print usage hard = min try: resource.setrlimit(res, (min, hard)) msgs.append('Increased %(name)s limit to %(min)s' % locals()) except (resource.error, __HOLE__): self.usage(msg % locals()) return msgs
ValueError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.set_rlimits
def close_fd(self, fd): try: os.close(fd) except __HOLE__: pass
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.close_fd
def readfd(self, fd): try: data = os.read(fd, 2 << 16) # 128K except __HOLE__, why: if why[0] not in (errno.EWOULDBLOCK, errno.EBADF, errno.EINTR): raise data = '' return data
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.readfd
def make_pipes(self, stderr=True): """ Create pipes for parent to child stdin/stdout/stderr communications. Open fd in nonblocking mode so we can read them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr. """ pipes = {'child_stdin':None, 'stdin':None, 'stdout':None, 'child_stdout':None, 'stderr':None, 'child_stderr':None} try: stdin, child_stdin = os.pipe() pipes['child_stdin'], pipes['stdin'] = stdin, child_stdin stdout, child_stdout = os.pipe() pipes['stdout'], pipes['child_stdout'] = stdout, child_stdout if stderr: stderr, child_stderr = os.pipe() pipes['stderr'], pipes['child_stderr'] = stderr, child_stderr for fd in (pipes['stdout'], pipes['stderr'], pipes['stdin']): if fd is not None: fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | os.O_NDELAY) return pipes except __HOLE__: for fd in pipes.values(): if fd is not None: self.close_fd(fd)
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ServerOptions.make_pipes
def read_config(self, fp): section = self.configroot.supervisorctl if not hasattr(fp, 'read'): self.here = os.path.dirname(normalize_path(fp)) try: fp = open(fp, 'r') except (IOError, __HOLE__): raise ValueError("could not find config file %s" % fp) config = UnhosedConfigParser() config.mysection = 'supervisorctl' config.readfp(fp) sections = config.sections() if not 'supervisorctl' in sections: raise ValueError,'.ini file does not include supervisorctl section' serverurl = config.getdefault('serverurl', 'http://localhost:9001') if serverurl.startswith('unix://'): sf = serverurl[7:] path = expand(sf, {'here':self.here}, 'serverurl') path = normalize_path(path) serverurl = 'unix://%s' % path section.serverurl = serverurl # The defaults used below are really set in __init__ (since # section==self.configroot.supervisorctl) section.prompt = config.getdefault('prompt', section.prompt) section.username = config.getdefault('username', section.username) section.password = config.getdefault('password', section.password) history_file = config.getdefault('history_file', section.history_file) if history_file: history_file = normalize_path(history_file) section.history_file = history_file self.history_file = history_file else: section.history_file = None self.history_file = None from supervisor.supervisorctl import DefaultControllerPlugin self.plugin_factories = self.get_plugins( config, 'supervisor.ctl_factory', 'ctlplugin:' ) default_factory = ('default', DefaultControllerPlugin, {}) # if you want to a supervisorctl without the default plugin, # please write your own supervisorctl. self.plugin_factories.insert(0, default_factory) return section
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/ClientOptions.read_config
def readFile(filename, offset, length): """ Read length bytes from the file named by filename starting at offset """ absoffset = abs(offset) abslength = abs(length) try: f = open(filename, 'rb') if absoffset != offset: # negative offset returns offset bytes from tail of the file if length: raise ValueError('BAD_ARGUMENTS') f.seek(0, 2) sz = f.tell() pos = int(sz - absoffset) if pos < 0: pos = 0 f.seek(pos) data = f.read(absoffset) else: if abslength != length: raise ValueError('BAD_ARGUMENTS') if length == 0: f.seek(offset) data = f.read() else: sz = f.seek(offset) data = f.read(length) except (__HOLE__, IOError): raise ValueError('FAILED') return data
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/readFile
def tailFile(filename, offset, length): """ Read length bytes from the file named by filename starting at offset, automatically increasing offset and setting overflow flag if log size has grown beyond (offset + length). If length bytes are not available, as many bytes as are available are returned. """ overflow = False try: f = open(filename, 'rb') f.seek(0, 2) sz = f.tell() if sz > (offset + length): overflow = True offset = sz - 1 if (offset + length) > sz: if (offset > (sz - 1)): length = 0 offset = sz - length if offset < 0: offset = 0 if length < 0: length = 0 if length == 0: data = '' else: f.seek(offset) data = f.read(length) offset = sz return [data, offset, overflow] except (__HOLE__, IOError): return ['', offset, False] # Helpers for dealing with signals and exit status
OSError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/tailFile
def expand(s, expansions, name): try: return s % expansions except __HOLE__: raise ValueError( 'Format string %r for %r contains names which cannot be ' 'expanded' % (s, name)) except: raise ValueError( 'Format string %r for %r is badly formatted' % (s, name) )
KeyError
dataset/ETHPy150Open XiaoMi/minos/supervisor/supervisor/options.py/expand
def ProcessMessage(self, message): """Begins an enrollment flow for this client. Args: message: The Certificate sent by the client. Note that this message is not authenticated. """ cert = rdf_crypto.Certificate(message.payload) queue = self.well_known_session_id.Queue() client_id = message.source # It makes no sense to enrol the same client multiple times, so we # eliminate duplicates. Note, that we can still enroll clients multiple # times due to cache expiration. try: enrolment_cache.Get(client_id) return except __HOLE__: enrolment_cache.Put(client_id, 1) # Create a new client object for this client. client = aff4.FACTORY.Create(client_id, "VFSGRRClient", mode="rw", token=self.token) # Only enroll this client if it has no certificate yet. if not client.Get(client.Schema.CERT): # Start the enrollment flow for this client. flow.GRRFlow.StartFlow(client_id=client_id, flow_name="CAEnroler", csr=cert, queue=queue, token=self.token)
KeyError
dataset/ETHPy150Open google/grr/grr/lib/flows/general/ca_enroller.py/Enroler.ProcessMessage
def getExecutorInfo(self): frameworkDir = os.path.abspath(os.path.dirname(sys.argv[0])) executorPath = os.path.join(frameworkDir, "executor.py") execInfo = mesos_pb2.ExecutorInfo() execInfo.executor_id.value = "default" execInfo.command.value = executorPath v = execInfo.command.environment.variables.add() v.name = 'UID' v.value = str(os.getuid()) v = execInfo.command.environment.variables.add() v.name = 'GID' v.value = str(os.getgid()) mem = execInfo.resources.add() mem.name = 'mem' mem.type = mesos_pb2.Value.SCALAR mem.scalar.value = EXECUTOR_MEMORY cpus = execInfo.resources.add() cpus.name = 'cpus' cpus.type = mesos_pb2.Value.SCALAR cpus.scalar.value = EXECUTOR_CPUS if hasattr(execInfo, 'framework_id'): execInfo.framework_id.value = str(self.framework_id) if self.options.image and hasattr(execInfo, 'container'): execInfo.container.type = mesos_pb2.ContainerInfo.DOCKER execInfo.container.docker.image = self.options.image for path in ['/etc/passwd', '/etc/group']: v = execInfo.container.volumes.add() v.host_path = v.container_path = path v.mode = mesos_pb2.Volume.RO for path in conf.MOOSEFS_MOUNT_POINTS: v = execInfo.container.volumes.add() v.host_path = v.container_path = path v.mode = mesos_pb2.Volume.RW if self.options.volumes: for volume in self.options.volumes.split(','): fields = volume.split(':') if len(fields) == 3: host_path, container_path, mode = fields mode = mesos_pb2.Volume.RO if mode.lower() == 'ro' else mesos_pb2.Volume.RW elif len(fields) == 2: host_path, container_path = fields mode = mesos_pb2.Volume.RW elif len(fields) == 1: container_path, = fields host_path = '' mode = mesos_pb2.Volume.RW else: raise Exception("cannot parse volume %s", volume) try: os.makedirs(host_path) except __HOLE__: pass v = execInfo.container.volumes.add() v.container_path = container_path v.mode = mode if host_path: v.host_path = host_path return execInfo
OSError
dataset/ETHPy150Open douban/dpark/tools/scheduler.py/BaseScheduler.getExecutorInfo
def unsub_sig(): cherrypy.log("unsubsig: %s" % cherrypy.config.get('unsubsig', False)) if cherrypy.config.get('unsubsig', False): cherrypy.log("Unsubscribing the default cherrypy signal handler") cherrypy.engine.signal_handler.unsubscribe() try: from signal import signal, SIGTERM except __HOLE__: pass else: def old_term_handler(signum=None, frame=None): cherrypy.log("I am an old SIGTERM handler.") sys.exit(0) cherrypy.log("Subscribing the new one.") signal(SIGTERM, old_term_handler)
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/test/_test_states_demo.py/unsub_sig
def _find_all(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name else: strainer = SoupStrainer(name, attrs, text, **kwargs) if text is None and not limit and not attrs and not kwargs: if name is True or name is None: # Optimization to find all tags. result = (element for element in generator if isinstance(element, Tag)) return ResultSet(strainer, result) elif isinstance(name, str): # Optimization to find all tags with a given name. result = (element for element in generator if isinstance(element, Tag) and element.name == name) return ResultSet(strainer, result) results = ResultSet(strainer) while True: try: i = next(generator) except __HOLE__: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These generators can be used to navigate starting from both #NavigableStrings and Tags.
StopIteration
dataset/ETHPy150Open socketubs/pyhn/pyhn/lib/bs4_py3/element.py/PageElement._find_all
def select(self, selector, _candidate_generator=None): """Perform a CSS selection operation on the current element.""" tokens = selector.split() current_context = [self] if tokens[-1] in self._selector_combinators: raise ValueError( 'Final combinator "%s" is missing an argument.' % tokens[-1]) if self._select_debug: print('Running CSS selector "%s"' % selector) for index, token in enumerate(tokens): if self._select_debug: print(' Considering token "%s"' % token) recursive_candidate_generator = None tag_name = None if tokens[index-1] in self._selector_combinators: # This token was consumed by the previous combinator. Skip it. if self._select_debug: print(' Token was consumed by the previous combinator.') continue # Each operation corresponds to a checker function, a rule # for determining whether a candidate matches the # selector. Candidates are generated by the active # iterator. checker = None m = self.attribselect_re.match(token) if m is not None: # Attribute selector tag_name, attribute, operator, value = m.groups() checker = self._attribute_checker(operator, attribute, value) elif '#' in token: # ID selector tag_name, tag_id = token.split('#', 1) def id_matches(tag): return tag.get('id', None) == tag_id checker = id_matches elif '.' in token: # Class selector tag_name, klass = token.split('.', 1) classes = set(klass.split('.')) def classes_match(candidate): return classes.issubset(candidate.get('class', [])) checker = classes_match elif ':' in token: # Pseudo-class tag_name, pseudo = token.split(':', 1) if tag_name == '': raise ValueError( "A pseudo-class must be prefixed with a tag name.") pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo) found = [] if pseudo_attributes is not None: pseudo_type, pseudo_value = pseudo_attributes.groups() if pseudo_type == 'nth-of-type': try: pseudo_value = int(pseudo_value) except: raise NotImplementedError( 'Only numeric values are currently supported for the nth-of-type pseudo-class.') if pseudo_value < 1: raise ValueError( 'nth-of-type pseudo-class value must be at least 1.') class Counter(object): def __init__(self, destination): self.count = 0 self.destination = destination def nth_child_of_type(self, tag): self.count += 1 if self.count == self.destination: return True if self.count > self.destination: # Stop the generator that's sending us # these things. raise StopIteration() return False checker = Counter(pseudo_value).nth_child_of_type else: raise NotImplementedError( 'Only the following pseudo-classes are implemented: nth-of-type.') elif token == '*': # Star selector -- matches everything pass elif token == '>': # Run the next token as a CSS selector against the # direct children of each tag in the current context. recursive_candidate_generator = lambda tag: tag.children elif token == '~': # Run the next token as a CSS selector against the # siblings of each tag in the current context. recursive_candidate_generator = lambda tag: tag.next_siblings elif token == '+': # For each tag in the current context, run the next # token as a CSS selector against the tag's next # sibling that's a tag. def next_tag_sibling(tag): yield tag.find_next_sibling(True) recursive_candidate_generator = next_tag_sibling elif self.tag_name_re.match(token): # Just a tag name. tag_name = token else: raise ValueError( 'Unsupported or invalid CSS selector: "%s"' % token) if recursive_candidate_generator: # This happens when the selector looks like "> foo". # # The generator calls select() recursively on every # member of the current context, passing in a different # candidate generator and a different selector. # # In the case of "> foo", the candidate generator is # one that yields a tag's direct children (">"), and # the selector is "foo". next_token = tokens[index+1] def recursive_select(tag): if self._select_debug: print(' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)) print('-' * 40) for i in tag.select(next_token, recursive_candidate_generator): if self._select_debug: print('(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)) yield i if self._select_debug: print('-' * 40) _use_candidate_generator = recursive_select elif _candidate_generator is None: # By default, a tag's candidates are all of its # children. If tag_name is defined, only yield tags # with that name. if self._select_debug: if tag_name: check = "[any]" else: check = tag_name print(' Default candidate generator, tag name="%s"' % check) if self._select_debug: # This is redundant with later code, but it stops # a bunch of bogus tags from cluttering up the # debug log. def default_candidate_generator(tag): for child in tag.descendants: if not isinstance(child, Tag): continue if tag_name and not child.name == tag_name: continue yield child _use_candidate_generator = default_candidate_generator else: _use_candidate_generator = lambda tag: tag.descendants else: _use_candidate_generator = _candidate_generator new_context = [] new_context_ids = set([]) for tag in current_context: if self._select_debug: print(" Running candidate generator on %s %s" % ( tag.name, repr(tag.attrs))) for candidate in _use_candidate_generator(tag): if not isinstance(candidate, Tag): continue if tag_name and candidate.name != tag_name: continue if checker is not None: try: result = checker(candidate) except __HOLE__: # The checker has decided we should no longer # run the generator. break if checker is None or result: if self._select_debug: print(" SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))) if id(candidate) not in new_context_ids: # If a tag matches a selector more than once, # don't include it in the context more than once. new_context.append(candidate) new_context_ids.add(id(candidate)) elif self._select_debug: print(" FAILURE %s %s" % (candidate.name, repr(candidate.attrs))) current_context = new_context if self._select_debug: print("Final verdict:") for i in current_context: print(" %s %s" % (i.name, i.attrs)) return current_context # Old names for backwards compatibility
StopIteration
dataset/ETHPy150Open socketubs/pyhn/pyhn/lib/bs4_py3/element.py/Tag.select
def handle(self, *args, **options): filings_to_process = new_filing.objects.filter(data_is_processed=True, body_rows_superceded=True).exclude(ie_rows_processed=True).order_by('filing_number') for this_filing in filings_to_process: lines_present = this_filing.lines_present has_sked_E = False try: lines_present['E'] if int(lines_present['E']) > 0: has_sked_E = True except __HOLE__: continue if has_sked_E: #print "processing %s " % (this_filing.filing_number) #print lines_present, lines_present['E'] skedelines = SkedE.objects.filter(filing_number=this_filing.filing_number) for skede in skedelines: attach_committee_to_skedeline(skede) attach_ie_target(skede) # mark that we've been processed. this_filing.ie_rows_processed=True this_filing.save()
KeyError
dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/formdata/management/commands/process_skede_lines.py/Command.handle
def tokenize_single_comma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_comattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except __HOLE__: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type
IndexError
dataset/ETHPy150Open scipy/scipy/scipy/io/arff/arffread.py/tokenize_single_comma
def tokenize_single_wcomma(val): # XXX we match twice the same string (here and at the caller level). It is # stupid, but it is easier for now... m = r_wcomattrval.match(val) if m: try: name = m.group(1).strip() type = m.group(2).strip() except __HOLE__: raise ValueError("Error while tokenizing attribute") else: raise ValueError("Error while tokenizing single %s" % val) return name, type
IndexError
dataset/ETHPy150Open scipy/scipy/scipy/io/arff/arffread.py/tokenize_single_wcomma
def _loadarff(ofile): # Parse the header file try: rel, attr = read_header(ofile) except __HOLE__ as e: msg = "Error while parsing header, error was: " + str(e) raise ParseArffError(msg) # Check whether we have a string attribute (not supported yet) hasstr = False for name, value in attr: type = parse_type(value) if type == 'string': hasstr = True meta = MetaData(rel, attr) # XXX The following code is not great # Build the type descriptor descr and the list of convertors to convert # each attribute to the suitable type (which should match the one in # descr). # This can be used once we want to support integer as integer values and # not as numeric anymore (using masked arrays ?). acls2dtype = {'real': float, 'integer': float, 'numeric': float} acls2conv = {'real': safe_float, 'integer': safe_float, 'numeric': safe_float} descr = [] convertors = [] if not hasstr: for name, value in attr: type = parse_type(value) if type == 'date': date_format, datetime_unit = get_date_format(value) descr.append((name, "datetime64[%s]" % datetime_unit)) convertors.append(partial(safe_date, date_format=date_format, datetime_unit=datetime_unit)) elif type == 'nominal': n = maxnomlen(value) descr.append((name, 'S%d' % n)) pvalue = get_nom_val(value) convertors.append(partial(safe_nominal, pvalue=pvalue)) else: descr.append((name, acls2dtype[type])) convertors.append(safe_float) #dc.append(acls2conv[type]) #sdescr.append((name, acls2sdtype[type])) else: # How to support string efficiently ? Ideally, we should know the max # size of the string before allocating the numpy array. raise NotImplementedError("String attributes not supported yet, sorry") ni = len(convertors) def generator(row_iter, delim=','): # TODO: this is where we are spending times (~80%). I think things # could be made more efficiently: # - We could for example "compile" the function, because some values # do not change here. # - The function to convert a line to dtyped values could also be # generated on the fly from a string and be executed instead of # looping. # - The regex are overkill: for comments, checking that a line starts # by % should be enough and faster, and for empty lines, same thing # --> this does not seem to change anything. # 'compiling' the range since it does not change # Note, I have already tried zipping the converters and # row elements and got slightly worse performance. elems = list(range(ni)) for raw in row_iter: # We do not abstract skipping comments and empty lines for # performance reasons. if r_comment.match(raw) or r_empty.match(raw): continue row = raw.split(delim) yield tuple([convertors[i](row[i]) for i in elems]) a = generator(ofile) # No error should happen here: it is a bug otherwise data = np.fromiter(a, descr) return data, meta #----- # Misc #-----
ValueError
dataset/ETHPy150Open scipy/scipy/scipy/io/arff/arffread.py/_loadarff
def _id_from_path(path): try: return path.strip('/').split('/')[-1] except __HOLE__: return ''
IndexError
dataset/ETHPy150Open lektor/lektor/lektor/sourcesearch.py/_id_from_path
def __new__(cls, name, bases, d): state = d.get('initial_state') if state == None: for base in bases: try: state = base.initial_state break except AttributeError: pass before, after = [], [] for name, func in d.items(): try: after += [(start, end, func) for start, end in func.after] except AttributeError: pass try: before += [(start, end, func) for start, end in func.before] except __HOLE__: pass d['_after_transitions'] = after d['_before_transitions'] = before d['_state'] = state return type.__new__(cls, name, bases, d) # Python 2/3 Metaclass # http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
AttributeError
dataset/ETHPy150Open kyleconroy/statemachine/statemachine.py/MetaMachine.__new__
def create_transition(attr, from_state, to_state): def wrapper(f): try: getattr(f, attr).append((from_state, to_state)) except __HOLE__: setattr(f, attr, [(from_state, to_state)]) return f return wrapper
AttributeError
dataset/ETHPy150Open kyleconroy/statemachine/statemachine.py/create_transition
def get_mod_class(plugin): """ Converts 'lifestream.plugins.FeedPlugin' to ['lifestream.plugins', 'FeedPlugin'] """ try: dot = plugin.rindex('.') except __HOLE__: return plugin, '' return plugin[:dot], plugin[dot+1:]
ValueError
dataset/ETHPy150Open IanLewis/django-lifestream/lifestream/feeds.py/get_mod_class
def decodable(seq, encoding): try: u = seq.decode(encoding) except __HOLE__: return False else: return True
UnicodeDecodeError
dataset/ETHPy150Open thomasballinger/curtsies/curtsies/events.py/decodable
def get_key(bytes_, encoding, keynames='curtsies', full=False): """Return key pressed from bytes_ or None Return a key name or None meaning it's an incomplete sequence of bytes (more bytes needed to determine the key pressed) encoding is how the bytes should be translated to unicode - it should match the terminal encoding. keynames is a string describing how keys should be named: * curtsies uses unicode strings like <F8> * curses uses unicode strings similar to those returned by the Python ncurses window.getkey function, like KEY_F(8), plus a nonstandard representation of meta keys (bytes 128-255) because returning the corresponding unicode code point would be indistinguishable from the multibyte sequence that encodes that character in the current encoding * bytes returns the original bytes from stdin (NOT unicode) if full, match a key even if it could be a prefix to another key (useful for detecting a plain escape key for instance, since escape is also a prefix to a bunch of char sequences for other keys) Events are subclasses of Event, or unicode strings Precondition: get_key(prefix, keynames) is None for all proper prefixes of bytes. This means get_key should be called on progressively larger inputs (for 'asdf', first on 'a', then on 'as', then on 'asd' - until a non-None value is returned) """ if not all(isinstance(c, type(b'')) for c in bytes_): raise ValueError("get key expects bytes, got %r" % bytes_) # expects raw bytes if keynames not in ['curtsies', 'curses', 'bytes']: raise ValueError("keynames must be one of 'curtsies', 'curses' or 'bytes'") seq = b''.join(bytes_) if len(seq) > MAX_KEYPRESS_SIZE: raise ValueError('unable to decode bytes %r' % seq) def key_name(): if keynames == 'curses': if seq in CURSES_NAMES: # may not be here (and still not decodable) curses names incomplete return CURSES_NAMES[seq] # Otherwise, there's no special curses name for this try: return seq.decode(encoding) # for normal decodable text or a special curtsies sequence with bytes that can be decoded except __HOLE__: # this sequence can't be decoded with this encoding, so we need to represent the bytes if len(seq) == 1: return u'x%02X' % ord(seq) #TODO figure out a better thing to return here else: raise NotImplementedError("are multibyte unnameable sequences possible?") return u'bytes: ' + u'-'.join(u'x%02X' % ord(seq[i:i+1]) for i in range(len(seq))) #TODO if this isn't possible, return multiple meta keys as a paste event if paste events enabled elif keynames == 'curtsies': if seq in CURTSIES_NAMES: return CURTSIES_NAMES[seq] return seq.decode(encoding) #assumes that curtsies names are a subset of curses ones else: assert keynames == 'bytes' return seq key_known = seq in CURTSIES_NAMES or seq in CURSES_NAMES or decodable(seq, encoding) if full and key_known: return key_name() elif seq in KEYMAP_PREFIXES or could_be_unfinished_char(seq, encoding): return None # need more input to make up a full keypress elif key_known: return key_name() else: seq.decode(encoding) # this will raise a unicode error (they're annoying to raise ourselves) assert False, 'should have raised an unicode decode error'
UnicodeDecodeError
dataset/ETHPy150Open thomasballinger/curtsies/curtsies/events.py/get_key
def try_keys(): print('press a bunch of keys (not at the same time, but you can hit them pretty quickly)') import tty import termios import fcntl import os from .termhelpers import Cbreak def ask_what_they_pressed(seq, Normal): print('Unidentified character sequence!') with Normal: while True: r = raw_input("type 'ok' to prove you're not pounding keys ") if r.lower().strip() == 'ok': break while True: print('Press the key that produced %r again please' % (seq,)) retry = os.read(sys.stdin.fileno(), 1000) if seq == retry: break print("nope, that wasn't it") with Normal: name = raw_input('Describe in English what key you pressed: ') f = open('keylog.txt', 'a') f.write("%r is called %s\n" % (seq, name)) f.close() print('Thanks! Please open an issue at https://github.com/thomasballinger/curtsies/issues') print('or email [email protected]. Include this terminal history or keylog.txt.') print('You can keep pressing keys') with Cbreak(sys.stdin) as NoCbreak: while True: try: chars = os.read(sys.stdin.fileno(), 1000) print('---') print(repr(chars)) if chars in CURTSIES_NAMES: print(CURTSIES_NAMES[chars]) elif len(chars) == 1: print('literal') else: print('unknown!!!') ask_what_they_pressed(chars, NoCbreak) except __HOLE__: pass
OSError
dataset/ETHPy150Open thomasballinger/curtsies/curtsies/events.py/try_keys
def main(args): usage = "%prog [-s setupfile] [-o output_file_path] scriptfile [arg] ..." parser = optparse.OptionParser(usage=usage, version="%prog 1.0b2") parser.allow_interspersed_args = False parser.add_option('-l', '--line-by-line', action='store_true', help="Use the line-by-line profiler from the line_profiler module " "instead of Profile. Implies --builtin.") parser.add_option('-b', '--builtin', action='store_true', help="Put 'profile' in the builtins. Use 'profile.enable()' and " "'profile.disable()' in your code to turn it on and off, or " "'@profile' to decorate a single function, or 'with profile:' " "to profile a single section of code.") parser.add_option('-o', '--outfile', default=None, help="Save stats to <outfile>") parser.add_option('-s', '--setup', default=None, help="Code to execute before the code to profile") parser.add_option('-v', '--view', action='store_true', help="View the results of the profile in addition to saving it.") if not sys.argv[1:]: parser.print_usage() sys.exit(2) options, args = parser.parse_args() if not options.outfile: if options.line_by_line: extension = 'lprof' else: extension = 'prof' options.outfile = '%s.%s' % (os.path.basename(args[0]), extension) sys.argv[:] = args if options.setup is not None: # Run some setup code outside of the profiler. This is good for large # imports. setup_file = find_script(options.setup) __file__ = setup_file __name__ = '__main__' # Make sure the script's directory is on sys.path instead of just # kernprof.py's. sys.path.insert(0, os.path.dirname(setup_file)) ns = locals() execfile(setup_file, ns, ns) if options.line_by_line: import line_profiler prof = line_profiler.LineProfiler() options.builtin = True else: prof = ContextualProfile() if options.builtin: import __builtin__ __builtin__.__dict__['profile'] = prof script_file = find_script(sys.argv[0]) __file__ = script_file __name__ = '__main__' # Make sure the script's directory is on sys.path instead of just # kernprof.py's. sys.path.insert(0, os.path.dirname(script_file)) try: try: ns = locals() if options.builtin: execfile(script_file, ns, ns) else: prof.runctx('execfile(%r)' % (script_file,), ns, ns) except (KeyboardInterrupt, __HOLE__): pass finally: prof.dump_stats(options.outfile) print 'Wrote profile results to %s' % options.outfile if options.view: prof.print_stats()
SystemExit
dataset/ETHPy150Open certik/line_profiler/kernprof.py/main
@staticmethod def fromphases(p): for scm, phasename in p: try: parserforphase = inputparser.fromphase(phasename) if parserforphase.scm() == scm: return parserforphase else: continue except __HOLE__: pass raise NotImplementedError("Couldn't find phase matching" " conditions")
NotImplementedError
dataset/ETHPy150Open charignon/hooklib/hooklib_input.py/inputparser.fromphases
@staticmethod def fromphase(phase): """Factory method to return an appropriate input parser For example if the phase is 'post-update' and that the git env variables are set, we infer that we need a git postupdate inputparser""" phasemapping = { None: dummyinputparser, 'applypatch-msg': applypatchmsginputparser, 'pre-applypatch': preapplypatchinputparser, 'post-applypatch': postapplypatchinputparser, 'pre-commit': precommitinputparser, 'prepare-commit-msg': preparecommitmsginputparser, 'commit-msg': commitmsginputparser, 'post-commit': postcommitinputparser, 'pre-rebase': prerebaseinputparser, 'pre-push': prepushinputparser, 'pre-receive': prereceiveinputparser, 'update': updateinputparser, 'post-receive': postreceiveinputparser, 'post-update': postupdateinputparser, 'pre-auto-gc': preautogcinputparser, } try: return phasemapping[phase].findscm() except __HOLE__: raise NotImplementedError("Unsupported hook type %s" % phase)
KeyError
dataset/ETHPy150Open charignon/hooklib/hooklib_input.py/inputparser.fromphase
@register.tag(name='change_currency') def change_currency(parser, token): try: tag_name, current_price, new_currency = token.split_contents() except __HOLE__: tag_name = token.contents.split()[0] raise template.TemplateSyntaxError('%r tag requires exactly two arguments' % (tag_name)) return ChangeCurrencyNode(current_price, new_currency)
ValueError
dataset/ETHPy150Open panosl/django-currencies/currencies/templatetags/currency.py/change_currency
def load_template_source(template_name, template_dirs=None): """ Give capability to load template from specific directory """ try: return open(template_name).read(), template_name except __HOLE__: raise TemplateDoesNotExist, template_name
IOError
dataset/ETHPy150Open adlibre/Adlibre-DMS/adlibre_dms/libraries/adlibre/freeloader.py/load_template_source
def __call__(self, values): """Runs through each value and transform it with a mapped function.""" values = product(values, self._map) for value in map(self._mapped_func, filter(self._cmp_filter, values)): if isinstance(value, tuple) and len(value) == 2: yield value else: try: # TODO: Replace with "yield from" when dropping Python 2. for __ in value: yield __ except __HOLE__: # Non-iterable returned continue
TypeError
dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer/plugin/api/mapper.py/StreamMapper.__call__
def _get_fibre_image(network, cpores, vox_len, fibre_rad): r""" Produce image by filling in voxels along throat edges using Bresenham line Then performing distance transform on fibre voxels to erode the pore space """ cthroats = network.find_neighbor_throats(pores=cpores) # Below method copied from geometry model throat.vertices # Needed now as network may not have all throats assigned to geometry # i.e network['throat.vertices'] could return garbage verts = _sp.ndarray(network.num_throats(), dtype=object) for i in range(len(verts)): verts[i] = _sp.asarray(list(network["throat.vert_index"][i].values())) cverts = verts[cthroats] [vxmin, vxmax, vymin, vymax, vzmin, vzmax] = _get_vertex_range(cverts) # Translate vertices so that minimum occurs at the origin for index in range(len(cverts)): cverts[index] -= np.array([vxmin, vymin, vzmin]) # Find new size of image array cdomain = np.around(np.array([(vxmax-vxmin), (vymax-vymin), (vzmax-vzmin)]), 6) logger.info("Creating fibre domain range: " + str(np.around(cdomain, 5))) lx = np.int(np.around(cdomain[0]/vox_len)+1) ly = np.int(np.around(cdomain[1]/vox_len)+1) lz = np.int(np.around(cdomain[2]/vox_len)+1) # Try to create all the arrays we will need at total domain size try: pore_space = np.ones([lx, ly, lz], dtype=np.uint8) fibre_space = np.zeros(shape=[lx, ly, lz], dtype=np.uint8) dt = np.zeros([lx, ly, lz], dtype=float) # Only need one chunk cx = cy = cz = 1 chunk_len = np.max(np.shape(pore_space)) except: logger.info("Domain too large to fit into memory so chunking domain" "to process image, this may take some time") # Do chunking chunk_len = 100 if (lx > chunk_len): cx = np.ceil(lx/chunk_len).astype(int) else: cx = 1 if (ly > chunk_len): cy = np.ceil(ly/chunk_len).astype(int) else: cy = 1 if (lz > chunk_len): cz = np.ceil(lz/chunk_len).astype(int) else: cz = 1 # Get image of the fibres line_points = bresenham(cverts, vox_len/2) line_ints = (np.around((line_points/vox_len), 0)).astype(int) for x, y, z in line_ints: try: pore_space[x][y][z] = 0 except __HOLE__: logger.warning("Some elements in image processing are out" + "of bounds") num_chunks = np.int(cx*cy*cz) cnum = 1 for ci in range(cx): for cj in range(cy): for ck in range(cz): # Work out chunk range logger.info("Processing Fibre Chunk: "+str(cnum)+" of " + str(num_chunks)) cxmin = ci*chunk_len cxmax = np.int(np.ceil((ci+1)*chunk_len + 5*fibre_rad)) cymin = cj*chunk_len cymax = np.int(np.ceil((cj+1)*chunk_len + 5*fibre_rad)) czmin = ck*chunk_len czmax = np.int(np.ceil((ck+1)*chunk_len + 5*fibre_rad)) # Don't overshoot if cxmax > lx: cxmax = lx if cymax > ly: cymax = ly if czmax > lz: czmax = lz dt = ndimage.distance_transform_edt(pore_space[cxmin:cxmax, cymin:cymax, czmin:czmax]) fibre_space[cxmin:cxmax, cymin:cymax, czmin:czmax][dt <= fibre_rad] = 0 fibre_space[cxmin:cxmax, cymin:cymax, czmin:czmax][dt > fibre_rad] = 1 del dt cnum += 1 del pore_space return fibre_space
IndexError
dataset/ETHPy150Open PMEAL/OpenPNM/OpenPNM/Geometry/models/pore_volume.py/_get_fibre_image
def in_hull_volume(network, geometry, fibre_rad, vox_len=1e-6, **kwargs): r""" Work out the voxels inside the convex hull of the voronoi vertices of each pore """ Np = network.num_pores() geom_pores = geometry.map_pores(network, geometry.pores()) volume = _sp.zeros(Np) pore_vox = _sp.zeros(Np, dtype=int) fibre_vox = _sp.zeros(Np, dtype=int) voxel = vox_len**3 try: nbps = network.pores('boundary', mode='not') except __HOLE__: # Boundaries have not been generated nbps = network.pores() # Voxel length fibre_rad = np.around((fibre_rad-(vox_len/2))/vox_len, 0).astype(int) # Get the fibre image fibre_image = _get_fibre_image(network, geom_pores, vox_len, fibre_rad) # Save as private variables geometry._fibre_image = fibre_image hull_image = np.ones_like(fibre_image, dtype=np.uint16)*-1 geometry._hull_image = hull_image for pore in nbps: logger.info("Processing Pore: "+str(pore+1)+" of "+str(len(nbps))) verts = np.asarray([i for i in network["pore.vert_index"][pore].values()]) verts = np.asarray(misc.unique_list(np.around(verts, 6))) verts /= vox_len pore_vox[pore], fibre_vox[pore] = inhull(geometry, verts, pore) volume = pore_vox*voxel geometry["pore.fibre_voxels"] = fibre_vox[geom_pores] geometry["pore.pore_voxels"] = pore_vox[geom_pores] return volume[geom_pores]
KeyError
dataset/ETHPy150Open PMEAL/OpenPNM/OpenPNM/Geometry/models/pore_volume.py/in_hull_volume
def get(name): try: return registry[name] except __HOLE__: raise KeyError( "Object named '{}' is not registered in ramses " "registry".format(name))
KeyError
dataset/ETHPy150Open ramses-tech/ramses/ramses/registry.py/get
@rest_api.post('/imagefactory/<image_collection>') @rest_api.post('/imagefactory/base_images/<base_image_id>/<image_collection>') @rest_api.post('/imagefactory/base_images/<base_image_id>/target_images/<target_image_id>/<image_collection>') @rest_api.post('/imagefactory/target_images/<target_image_id>/<image_collection>') @log_request @oauth_protect @check_accept_header def create_image(image_collection, base_image_id=None, target_image_id=None): try: image_type = image_collection[0:-1] content_type = request.headers.get('Content-Type') form_data = form_data_for_content_type(content_type) if(('application/x-www-form-urlencoded' in content_type) or ('multipart/form-data' in content_type)): request_data = form_data else: request_data = form_data.get(image_type) if(not request_data): raise HTTPResponse(status=400, output='%s not found in request.' % image_type) req_base_img_id = request_data.get('base_image_id') req_target_img_id = request_data.get('target_image_id') base_img_id = req_base_img_id if req_base_img_id else base_image_id target_img_id = req_target_img_id if req_target_img_id else target_image_id if(image_collection == 'base_images'): builder = BuildDispatcher().builder_for_base_image(template=request_data.get('template'), parameters=request_data.get('parameters')) image = builder.base_image elif(image_collection == 'target_images'): builder = BuildDispatcher().builder_for_target_image(target=request_data.get('target'), image_id=base_img_id, template=request_data.get('template'), parameters=request_data.get('parameters')) image = builder.target_image elif(image_collection == 'provider_images'): _provider = request_data.get('provider') _credentials = request_data.get('credentials') _target = request_data.get('target') if(_provider and _credentials and _target): builder = BuildDispatcher().builder_for_provider_image(provider=_provider, credentials=_credentials, target=_target, image_id=target_img_id, template=request_data.get('template'), parameters=request_data.get('parameters')) image = builder.provider_image else: _credentials = 'REDACTED' if _credentials else None raise HTTPResponse(status=400, output="Missing key/value pair: provider(%s), credentials(%s), target(%s)" % (_provider, _credentials, _target)) else: raise HTTPResponse(status=404, output="%s not found" % image_collection) _response = {'_type':type(image).__name__, 'id':image.identifier, 'href':'%s/%s' % (request.url, image.identifier)} for key in image.metadata(): if key not in ('identifier', 'data'): _response[key] = getattr(image, key, None) response.status = 202 return converted_response({image_collection[0:-1]:_response}) except __HOLE__ as e: log.exception(e) raise HTTPResponse(status=400, output='Missing value for key: %s' % e) except Exception as e: log.exception(e) raise HTTPResponse(status=500, output=e)
KeyError
dataset/ETHPy150Open redhat-imaging/imagefactory/imgfac/rest/RESTv2.py/create_image
@rest_api.get('/imagefactory/<collection_type>/<image_id>') @rest_api.get('/imagefactory/base_images/<base_image_id>/<collection_type>/<image_id>') @rest_api.get('/imagefactory/base_images/<base_image_id>/target_images/<target_image_id>/<collection_type>/<image_id>') @rest_api.get('/imagefactory/target_images/<target_image_id>/<collection_type>/<image_id>') @log_request @oauth_protect @check_accept_header def image_with_id(collection_type, image_id, base_image_id=None, target_image_id=None, provider_image_id=None): try: img_class = IMAGE_TYPES[collection_type] if img_class: fetch_spec = {'type': img_class, 'identifier': image_id} try: image = PersistentImageManager.default_manager().images_from_query(fetch_spec)[0] _type = type(image).__name__ _response = {'_type': _type, 'id': image.identifier, 'href': request.url} for key in image.metadata(): if key not in ('identifier', 'data', 'base_image_id', 'target_image_id'): _response[key] = getattr(image, key, None) api_url = '%s://%s/imagefactory' % (request.urlparts[0], request.urlparts[1]) if (_type == "BaseImage"): _objtype = 'base_image' _response['target_images'] = list_images('target_images', base_image_id=image.identifier, list_url='%s/target_images' % api_url) elif (_type == "TargetImage"): _objtype = 'target_image' base_image_id = image.base_image_id if (base_image_id): base_image_href = '%s/base_images/%s' % (api_url, base_image_id) base_image_dict = {'_type': 'BaseImage', 'id': base_image_id, 'href': base_image_href} _response['base_image'] = base_image_dict else: _response['base_image'] = None _response['provider_images'] = list_images('provider_images', target_image_id=image.identifier, list_url='%s/provider_images' % api_url) elif (_type == "ProviderImage"): _objtype = 'provider_image' target_image_id = image.target_image_id if (target_image_id): target_image_href = '%s/target_images/%s' % (api_url, target_image_id) target_image_dict = {'_type': 'TargetImage', 'id': target_image_id, 'href': target_image_href} _response['target_image'] = target_image_dict else: _response['target_image'] = None else: log.error("Returning HTTP status 500 due to unknown image type: %s" % _type) raise HTTPResponse(status=500, output='Bad type for found object: %s' % _type) response.status = 200 return converted_response({_objtype: _response}) except __HOLE__ as e: log.warning(e) raise HTTPResponse(status=404, output='No %s found with id: %s' % (img_class, image_id)) else: raise HTTPResponse(status=404, output='Unknown resource type: %s' % collection_type) except KeyError as e: if collection_type == 'plugins': return get_plugins(plugin_id=image_id) else: log.exception(e) raise HTTPResponse(status=500, output=e) except Exception as e: log.exception(e) raise HTTPResponse(status=500, output=e)
IndexError
dataset/ETHPy150Open redhat-imaging/imagefactory/imgfac/rest/RESTv2.py/image_with_id
def set_indent(self, indent): try: self._indent = int(indent) except __HOLE__: raise ValueError("Cannot convert indent value '%s' to an integer" % indent)
ValueError
dataset/ETHPy150Open ombre42/robotframework-sudslibrary/src/SudsLibrary/soaplogging.py/_SoapLogger.set_indent
def mkdirs(path): if not os.path.isdir(path): try: os.makedirs(path) except __HOLE__ as err: if err.errno != errno.EEXIST or not os.path.isdir(path): raise
OSError
dataset/ETHPy150Open zerovm/zerovm-cli/zvshlib/tests/functional/tests.py/mkdirs
def safe_decode(text, incoming=None, errors='strict'): """Decodes incoming str using `incoming` if they're not already unicode. :param incoming: Text's current encoding :param errors: Errors handling policy. See here for valid values http://docs.python.org/2/library/codecs.html :returns: text or a unicode `incoming` encoded representation of it. :raises TypeError: If text is not an instance of str """ if not isinstance(text, six.string_types): raise TypeError("%s can't be decoded" % type(text)) if isinstance(text, six.text_type): return text if not incoming: incoming = (sys.stdin.encoding or sys.getdefaultencoding()) try: return text.decode(incoming, errors) except __HOLE__: # Note(flaper87) If we get here, it means that # sys.stdin.encoding / sys.getdefaultencoding # didn't return a suitable encoding to decode # text. This happens mostly when global LANG # var is not set correctly and there's no # default encoding. In this case, most likely # python will use ASCII or ANSI encoders as # default encodings but they won't be capable # of decoding non-ASCII characters. # # Also, UTF-8 is being used since it's an ASCII # extension. return text.decode('utf-8', errors)
UnicodeDecodeError
dataset/ETHPy150Open openstack/python-monascaclient/monascaclient/openstack/common/strutils.py/safe_decode
def get_price_filters(category, product_filter, price_filter, manufacturer_filter): """ Creates price filter based on the min and max prices of the category's products """ # If a price filter is set we return just this. if price_filter: return { "show_reset": True, "min": locale.format("%.2f", price_filter["min"]), "max": locale.format("%.2f", price_filter["max"]), "disabled": False, } # Base are the filtered products products = get_filtered_products_for_category(category, product_filter, price_filter, None, manufacturer_filter) if not products: return [] all_products = lfs.catalog.models.Product.objects.filter(Q(pk__in=products) | (Q(parent__in=products) & Q(active=True))) res = all_products.aggregate(min_price=Min('effective_price'), max_price=Max('effective_price')) pmin, pmax = res['min_price'], res['max_price'] disabled = (pmin and pmax) is None try: pmin = locale.format("%.2f", pmin) except __HOLE__: pmin = 0.0 try: pmax = locale.format("%.2f", pmax) except TypeError: pmax = 0.0 return { "show_reset": False, "min": pmin, "max": pmax, "disabled": disabled, }
TypeError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/catalog/utils.py/get_price_filters
def get_product_filters(category, product_filter, price_filter, manufacturer_filter, sorting): """Returns the next product filters based on products which are in the given category and within the result set of the current filters. """ mapping_manager = MappingCache() properties_mapping = get_property_mapping() options_mapping = get_option_mapping() property_ids = _get_property_ids() product_ids = _get_product_ids(category) set_filters = dict(product_filter) ########## Number Fields ################################################### number_fields_dict = {} if property_ids and product_ids: cursor = connection.cursor() cursor.execute("""SELECT property_group_id, property_id, min(value_as_float), max(value_as_float) FROM catalog_productpropertyvalue WHERE type=%s AND product_id IN (%s) AND property_id IN (%s) GROUP BY property_group_id, property_id""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids)) for row in cursor.fetchall(): property_group_id = row[0] property_id = row[1] prop = properties_mapping[property_id] if prop.is_select_field or prop.is_text_field or not prop.filterable: continue # cache property groups for later use property_group = mapping_manager.get(lfs.catalog.models.PropertyGroup, property_group_id) key = '{0}_{1}'.format(property_group_id, property_id) if key in product_filter.get("number-filter", {}): pmin, pmax = product_filter.get("number-filter").get(key)['value'][0:2] show_reset = True else: pmin, pmax = row[2:4] show_reset = False try: pmin = locale.format("%.2f", float(pmin)) except __HOLE__: pmin = 0.0 try: pmax = locale.format("%.2f", float(pmax)) except TypeError: pmax = 0.0 property_group_dict = number_fields_dict.setdefault(property_group_id, {'property_group': property_group, 'items': []}) property_group_dict['items'].append({ "id": property_id, "property_group_id": property_group_id, "position": prop.position, "object": prop, "name": prop.name, "title": prop.title, "unit": prop.unit, "show_reset": show_reset, "show_quantity": True, "items": {"min": pmin, "max": pmax} }) # convert to list ordered by property group name number_fields = number_fields_dict.values() number_fields = sorted(number_fields, key=lambda a: a["property_group"].name) for pg in number_fields: pg['items'] = sorted(pg['items'], key=lambda a: a['name']) ########## Select Fields & Text Fields ##################################### select_fields_dict = {} if property_ids and product_ids: cursor = connection.cursor() cursor.execute("""SELECT property_group_id, property_id, value FROM catalog_productpropertyvalue WHERE type=%s AND product_id IN (%s) AND property_id IN (%s) GROUP BY property_group_id, property_id, value""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids)) for row in cursor.fetchall(): property_group_id = row[0] property_id = row[1] value = row[2] prop = properties_mapping[property_id] if prop.is_number_field or not prop.filterable: continue # use property group cache property_group = mapping_manager.get(lfs.catalog.models.PropertyGroup, property_group_id) property_group_dict = select_fields_dict.setdefault(property_group_id, {'property_group': property_group, 'properties': {}}) properties = property_group_dict['properties'] if prop.is_select_field: name = options_mapping[value].name position = options_mapping[value].position else: name = value position = 10 if name == value and name == '': continue # initialize list of property values properties.setdefault(property_id, []) properties[property_id].append({ "id": property_id, "property_group_id": property_group_id, "value": value, "name": name, "title": prop.title, "position": position, "show_quantity": True, }) # Creates the filters to count the existing products per property option, # which is used within the filter portlet new_product_filter = {} if product_filter.get("number-filter"): new_product_filter["number-filter"] = product_filter["number-filter"] for property_group_id, property_group_dict in select_fields_dict.items(): properties = property_group_dict['properties'] for property_id, options in properties.items(): key = '{0}_{1}'.format(property_group_id, property_id) for option in options: # The option in question is used at any rate new_product_filter["select-filter"] = {key: {'property_id': property_id, 'property_group_id': property_group_id, 'value': option["value"]}} # All checked options of all other properties is also used for f0, f1 in product_filter.get("select-filter", {}).items(): print f0, f1, key if f0 != key: new_product_filter["select-filter"][f0] = f1 # Tests if the option is checked if (f0 == key) and (option["value"] in f1['value'].split("|")): option["checked"] = True option["quantity"] = len(get_filtered_products_for_category(category, new_product_filter, price_filter, None)) # Transform the property groups and properties inside into lists to be able to iterate over these in template property_groups_list = select_fields_dict.values() for property_group_dict in property_groups_list: properties = property_group_dict['properties'] property_group_id = property_group_dict['property_group'].pk result = [] # Transform the group properties into a list of dicts for property_id, items in properties.items(): prop = properties_mapping[property_id] items.sort(lambda a, b: cmp(a["position"], b["position"])) # Move items with zero quantity to the end of the list for x in range(0, len(items)): if items[x]["quantity"] == 0: items.insert(len(items), items.pop(x)) result.append({ "id": property_id, "property_group_id": property_group_id, "position": prop.position, "unit": prop.unit, "show_reset": '%s_%s' % (property_group_id, property_id) in set_filters.get('select-filter', {}).keys(), "name": prop.name, "title": prop.title, "items": items, }) result = sorted(result, key=lambda a: a["position"]) property_group_dict['properties'] = result property_groups_list = sorted(property_groups_list, key=lambda a: a['property_group'].name) return { "select_fields": property_groups_list, "number_fields": number_fields, }
TypeError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/catalog/utils.py/get_product_filters
def _calculate_steps(product_ids, property, min, max): """Calculates filter steps. **Parameters** product_ids The product_ids for which the steps are calculated. List of ids. property The property for which the steps are calculated. Instance of Property. min / max The min and max value of all steps. Must be a Float. """ try: min = float(min) max = float(max) except TypeError: return [] result = [] filter_steps = lfs.catalog.models.FilterStep.objects.filter(property=property.id) if property.is_steps_step_type: for i, step in enumerate(filter_steps[:len(filter_steps) - 1]): min = step.start if i != 0: min += 1.0 max = filter_steps[i + 1].start result.append({ "min": min, "max": max, "quantity": _calculate_quantity(product_ids, property.id, min, max) }) else: if property.is_automatic_step_type: if max == min: step = max else: diff = max - min step = diff / 3 # TODO: Should this be variable? if step >= 0 and step < 2: step = 1 elif step >= 2 and step < 6: step = 5 elif step >= 6 and step < 11: step = 10 elif step >= 11 and step < 51: step = 50 elif step >= 51 and step < 101: step = 100 elif step >= 101 and step < 501: step = 500 elif step >= 501 and step < 1001: step = 1000 elif step >= 1000 and step < 5001: step = 500 elif step >= 5001 and step < 10001: step = 1000 else: step = property.step for n, i in enumerate(range(0, int(max), step)): if i > max: break min = i + 1 max = i + step result.append({ "min": min, "max": max, "quantity": _calculate_quantity(product_ids, property.id, min, max), }) if property.display_no_results: return result else: # Remove entries with zero products new_result = [] for n, f in enumerate(result): if f["quantity"] == 0: try: result[n + 1]["min"] = f["min"] except __HOLE__: pass continue new_result.append(f) return new_result
IndexError
dataset/ETHPy150Open diefenbach/django-lfs/lfs/catalog/utils.py/_calculate_steps
def get_client(hub=None, **kwargs): hub = hub or get_event_loop() try: return hub._current_http_client except __HOLE__: client = hub._current_http_client = Client(hub, **kwargs) return client
AttributeError
dataset/ETHPy150Open celery/kombu/kombu/async/http/__init__.py/get_client
def _get_win_folder_with_pywin32(csidl_name): from win32com.shell import shellcon, shell dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) # Try to make this a unicode path because SHGetFolderPath does # not return unicode strings when there is unicode data in the # path. try: dir = unicode(dir) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in dir: if ord(c) > 255: has_high_char = True break if has_high_char: try: import win32api dir = win32api.GetShortPathName(dir) except __HOLE__: pass except UnicodeError: pass return dir
ImportError
dataset/ETHPy150Open ODM2/ODMToolsPython/odmtools/lib/Appdirs/appdirs.py/_get_win_folder_with_pywin32
def draw_title(self, universe, game_state): self.score.delete("title") if not game_state: return center = self.mesh_graph.screen_width // 2 try: team_time = game_state["team_time"] except (KeyError, TypeError): team_time = [0, 0] left_team = "(%.2f) %s %d " % (team_time[0], game_state["team_name"][0], universe.teams[0].score) right_team = " %d %s (%.2f)" % (universe.teams[1].score, game_state["team_name"][1], team_time[1]) font_size = guess_size(left_team+':'+right_team, self.mesh_graph.screen_width, 30, rel_size = +1) def status(team_idx): try: return "Timeouts: %i, Killed: %i" % (game_state["timeout_teams"][team_idx], game_state["times_killed"][team_idx]) except __HOLE__: return "" left_status = status(0) right_status = status(1) status_font_size = max(font_size - 3, 3) self.score.create_text(center, 15, text=left_team, font=(None, font_size), fill=col(94, 158, 217), tag="title", anchor=tkinter.E) self.score.create_text(center, 15, text=":", font=(None, font_size), tag="title", anchor=tkinter.CENTER) self.score.create_text(center+2, 15, text=right_team, font=(None, font_size), fill=col(235, 90, 90), tag="title", anchor=tkinter.W) self.score.create_text(center, 35, text="|", font=(None, font_size), tag="title", anchor=tkinter.CENTER) self.score.create_text(center, 35, text=left_status + " ", font=(None, status_font_size), tag="title", anchor=tkinter.E) self.score.create_text(center+1, 35, text=" " + right_status, font=(None, status_font_size), tag="title", anchor=tkinter.W)
TypeError
dataset/ETHPy150Open ASPP/pelita/pelita/ui/tk_canvas.py/UiCanvas.draw_title
def draw_maze(self, universe): if not self.size_changed: return self.canvas.delete("wall") for position, wall in universe.maze.items(): model_x, model_y = position if wall: wall_item = Wall(self.mesh_graph, model_x, model_y) wall_item.wall_neighbours = [] for dx in [-1, 0, 1]: for dy in [-1, 0, 1]: try: if universe.maze[model_x + dx, model_y + dy]: wall_item.wall_neighbours.append( (dx, dy) ) except __HOLE__: pass wall_item.draw(self.canvas)
IndexError
dataset/ETHPy150Open ASPP/pelita/pelita/ui/tk_canvas.py/UiCanvas.draw_maze
def _after(self, delay, fun, *args): """ Execute fun(*args) after delay milliseconds. # Patched to quit after `KeyboardInterrupt`s. """ def wrapped_fun(): try: fun(*args) except __HOLE__: _logger.info("Detected KeyboardInterrupt. Exiting.") self.quit() self.master.after(delay, wrapped_fun)
KeyboardInterrupt
dataset/ETHPy150Open ASPP/pelita/pelita/ui/tk_canvas.py/TkApplication._after
def _check_speed_button_state(self): try: # self.ui_canvas.button_game_speed_faster # may not be available yet (or may be None). # If this is the case, we’ll do nothing at all. if self._delay <= self._min_delay: self.ui_canvas.button_game_speed_faster.config(state=tkinter.DISABLED) else: self.ui_canvas.button_game_speed_faster.config(state=tkinter.NORMAL) except __HOLE__: pass
AttributeError
dataset/ETHPy150Open ASPP/pelita/pelita/ui/tk_canvas.py/TkApplication._check_speed_button_state
def _get_storage_path(path, app_id): """Returns a path to the directory where stub data can be stored.""" _, _, app_id = app_id.replace(':', '_').rpartition('~') if path is None: for path in _generate_storage_paths(app_id): try: os.mkdir(path, 0700) except __HOLE__, e: if e.errno == errno.EEXIST: # Check that the directory is only accessable by the current user to # protect against an attacker creating the directory in advance in # order to access any created files. Windows has per-user temporary # directories and st_mode does not include per-user permission # information so assume that it is safe. if sys.platform == 'win32' or ( (os.stat(path).st_mode & 0777) == 0700 and os.path.isdir(path)): return path else: continue raise else: return path elif not os.path.exists(path): os.mkdir(path) return path elif not os.path.isdir(path): raise IOError('the given storage path %r is a file, a directory was ' 'expected' % path) else: return path
OSError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/devappserver2.py/_get_storage_path
def __call__(self, value): try: port = int(value) except __HOLE__: raise argparse.ArgumentTypeError('Invalid port: %r' % value) if port < self._min_port or port >= (1 << 16): raise argparse.ArgumentTypeError('Invalid port: %d' % port) return port
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/devappserver2.py/PortParser.__call__
def parse_per_module_option( value, value_type, value_predicate, single_bad_type_error, single_bad_predicate_error, multiple_bad_type_error, multiple_bad_predicate_error, multiple_duplicate_module_error): """Parses command line options that may be specified per-module. Args: value: A str containing the flag value to parse. Two formats are supported: 1. A universal value (may not contain a colon as that is use to indicate a per-module value). 2. Per-module values. One or more comma separated module-value pairs. Each pair is a module_name:value. An empty module-name is shorthand for "default" to match how not specifying a module name in the yaml is the same as specifying "module: default". value_type: a callable that converts the string representation of the value to the actual value. Should raise ValueError if the string can not be converted. value_predicate: a predicate to call on the converted value to validate the converted value. Use "lambda _: True" if all values are valid. single_bad_type_error: the message to use if a universal value is provided and value_type throws a ValueError. The message must consume a single format parameter (the provided value). single_bad_predicate_error: the message to use if a universal value is provided and value_predicate returns False. The message does not get any format parameters. multiple_bad_type_error: the message to use if a per-module value either does not have two values separated by a single colon or if value_types throws a ValueError on the second string. The message must consume a single format parameter (the module_name:value pair). multiple_bad_predicate_error: the message to use if a per-module value if value_predicate returns False. The message must consume a single format parameter (the module name). multiple_duplicate_module_error: the message to use if the same module is repeated. The message must consume a single formater parameter (the module name). Returns: Either a single value of value_type for universal values or a dict of str->value_type for per-module values. Raises: argparse.ArgumentTypeError: the value is invalid. """ if ':' not in value: try: single_value = value_type(value) except ValueError: raise argparse.ArgumentTypeError(single_bad_type_error % value) else: if not value_predicate(single_value): raise argparse.ArgumentTypeError(single_bad_predicate_error) return single_value else: module_to_value = {} for module_value in value.split(','): try: module_name, single_value = module_value.split(':') single_value = value_type(single_value) except __HOLE__: raise argparse.ArgumentTypeError(multiple_bad_type_error % module_value) else: module_name = module_name.strip() if not module_name: module_name = appinfo.DEFAULT_MODULE if module_name in module_to_value: raise argparse.ArgumentTypeError( multiple_duplicate_module_error % module_name) if not value_predicate(single_value): raise argparse.ArgumentTypeError( multiple_bad_predicate_error % module_name) module_to_value[module_name] = single_value return module_to_value
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/devappserver2.py/parse_per_module_option
def _clear_datastore_storage(datastore_path): """Delete the datastore storage file at the given path.""" # lexists() returns True for broken symlinks, where exists() returns False. if os.path.lexists(datastore_path): try: os.remove(datastore_path) except __HOLE__, e: logging.warning('Failed to remove datastore file %r: %s', datastore_path, e)
OSError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/devappserver2.py/_clear_datastore_storage
def _clear_prospective_search_storage(prospective_search_path): """Delete the perspective search storage file at the given path.""" # lexists() returns True for broken symlinks, where exists() returns False. if os.path.lexists(prospective_search_path): try: os.remove(prospective_search_path) except __HOLE__, e: logging.warning('Failed to remove prospective search file %r: %s', prospective_search_path, e)
OSError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/devappserver2.py/_clear_prospective_search_storage
def _clear_search_indexes_storage(search_index_path): """Delete the search indexes storage file at the given path.""" # lexists() returns True for broken symlinks, where exists() returns False. if os.path.lexists(search_index_path): try: os.remove(search_index_path) except __HOLE__, e: logging.warning('Failed to remove search indexes file %r: %s', search_index_path, e)
OSError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/devappserver2.py/_clear_search_indexes_storage
def delete(self, name): try: del self.values[name] except __HOLE__: pass else: self.mutated()
KeyError
dataset/ETHPy150Open topazproject/topaz/topaz/celldict.py/CellDict.delete
def render(self, context): from django.utils.text import normalize_newlines import base64 context.push() if self.obj_id_lookup_var is not None: try: self.obj_id = template.resolve_variable(self.obj_id_lookup_var, context) except template.VariableDoesNotExist: return '' # Validate that this object ID is valid for this content-type. # We only have to do this validation if obj_id_lookup_var is provided, # because do_comment_form() validates hard-coded object IDs. try: self.content_type.get_object_for_this_type(pk=self.obj_id) except __HOLE__: context['display_form'] = False else: context['display_form'] = True else: context['display_form'] = True context['target'] = '%s:%s' % (self.content_type.id, self.obj_id) options = [] for var, abbr in (('photos_required', PHOTOS_REQUIRED), ('photos_optional', PHOTOS_OPTIONAL), ('ratings_required', RATINGS_REQUIRED), ('ratings_optional', RATINGS_OPTIONAL), ('is_public', IS_PUBLIC)): context[var] = getattr(self, var) if getattr(self, var): options.append(abbr) context['options'] = ','.join(options) if self.free: context['hash'] = Comment.objects.get_security_hash(context['options'], '', '', context['target']) default_form = loader.get_template(FREE_COMMENT_FORM) else: context['photo_options'] = self.photo_options context['rating_options'] = normalize_newlines(base64.encodestring(self.rating_options).strip()) if self.rating_options: context['rating_range'], context['rating_choices'] = Comment.objects.get_rating_options(self.rating_options) context['hash'] = Comment.objects.get_security_hash(context['options'], context['photo_options'], context['rating_options'], context['target']) default_form = loader.get_template(COMMENT_FORM) output = default_form.render(context) context.pop() return output
ObjectDoesNotExist
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/comments/templatetags/comments.py/CommentFormNode.render
def __call__(self, parser, token): tokens = token.contents.split() if len(tokens) < 4: raise template.TemplateSyntaxError, "%r tag requires at least 3 arguments" % tokens[0] if tokens[1] != 'for': raise template.TemplateSyntaxError, "Second argument in %r tag must be 'for'" % tokens[0] try: package, module = tokens[2].split('.') except __HOLE__: # unpack list of wrong size raise template.TemplateSyntaxError, "Third argument in %r tag must be in the format 'package.module'" % tokens[0] try: content_type = ContentType.objects.get(app_label__exact=package, model__exact=module) except ContentType.DoesNotExist: raise template.TemplateSyntaxError, "%r tag has invalid content-type '%s.%s'" % (tokens[0], package, module) obj_id_lookup_var, obj_id = None, None if tokens[3].isdigit(): obj_id = tokens[3] try: # ensure the object ID is valid content_type.get_object_for_this_type(pk=obj_id) except ObjectDoesNotExist: raise template.TemplateSyntaxError, "%r tag refers to %s object with ID %s, which doesn't exist" % (tokens[0], content_type.name, obj_id) else: obj_id_lookup_var = tokens[3] kwargs = {} if len(tokens) > 4: if tokens[4] != 'with': raise template.TemplateSyntaxError, "Fourth argument in %r tag must be 'with'" % tokens[0] for option, args in zip(tokens[5::2], tokens[6::2]): if option in ('photos_optional', 'photos_required') and not self.free: # VALIDATION ############################################## option_list = args.split(',') if len(option_list) % 3 != 0: raise template.TemplateSyntaxError, "Incorrect number of comma-separated arguments to %r tag" % tokens[0] for opt in option_list[::3]: if not opt.isalnum(): raise template.TemplateSyntaxError, "Invalid photo directory name in %r tag: '%s'" % (tokens[0], opt) for opt in option_list[1::3] + option_list[2::3]: if not opt.isdigit() or not (MIN_PHOTO_DIMENSION <= int(opt) <= MAX_PHOTO_DIMENSION): raise template.TemplateSyntaxError, "Invalid photo dimension in %r tag: '%s'. Only values between %s and %s are allowed." % (tokens[0], opt, MIN_PHOTO_DIMENSION, MAX_PHOTO_DIMENSION) # VALIDATION ENDS ######################################### kwargs[option] = True kwargs['photo_options'] = args elif option in ('ratings_optional', 'ratings_required') and not self.free: # VALIDATION ############################################## if 2 < len(args.split('|')) > 9: raise template.TemplateSyntaxError, "Incorrect number of '%s' options in %r tag. Use between 2 and 8." % (option, tokens[0]) if re.match('^scale:\d+\-\d+\:$', args.split('|')[0]): raise template.TemplateSyntaxError, "Invalid 'scale' in %r tag's '%s' options" % (tokens[0], option) # VALIDATION ENDS ######################################### kwargs[option] = True kwargs['rating_options'] = args elif option in ('is_public'): kwargs[option] = (args == 'true') else: raise template.TemplateSyntaxError, "%r tag got invalid parameter '%s'" % (tokens[0], option) return CommentFormNode(content_type, obj_id_lookup_var, obj_id, self.free, **kwargs)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/comments/templatetags/comments.py/DoCommentForm.__call__
def __call__(self, parser, token): tokens = token.contents.split() # Now tokens is a list like this: # ['get_comment_list', 'for', 'lcom.eventtimes', 'event.id', 'as', 'comment_list'] if len(tokens) != 6: raise template.TemplateSyntaxError, "%r tag requires 5 arguments" % tokens[0] if tokens[1] != 'for': raise template.TemplateSyntaxError, "Second argument in %r tag must be 'for'" % tokens[0] try: package, module = tokens[2].split('.') except ValueError: # unpack list of wrong size raise template.TemplateSyntaxError, "Third argument in %r tag must be in the format 'package.module'" % tokens[0] try: content_type = ContentType.objects.get(app_label__exact=package, model__exact=module) except ContentType.DoesNotExist: raise template.TemplateSyntaxError, "%r tag has invalid content-type '%s.%s'" % (tokens[0], package, module) var_name, obj_id = None, None if tokens[3].isdigit(): obj_id = tokens[3] try: # ensure the object ID is valid content_type.get_object_for_this_type(pk=obj_id) except __HOLE__: raise template.TemplateSyntaxError, "%r tag refers to %s object with ID %s, which doesn't exist" % (tokens[0], content_type.name, obj_id) else: var_name = tokens[3] if tokens[4] != 'as': raise template.TemplateSyntaxError, "Fourth argument in %r must be 'as'" % tokens[0] return CommentCountNode(package, module, var_name, obj_id, tokens[5], self.free)
ObjectDoesNotExist
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/comments/templatetags/comments.py/DoCommentCount.__call__
def __call__(self, parser, token): tokens = token.contents.split() # Now tokens is a list like this: # ['get_comment_list', 'for', 'lcom.eventtimes', 'event.id', 'as', 'comment_list'] if not len(tokens) in (6, 7): raise template.TemplateSyntaxError, "%r tag requires 5 or 6 arguments" % tokens[0] if tokens[1] != 'for': raise template.TemplateSyntaxError, "Second argument in %r tag must be 'for'" % tokens[0] try: package, module = tokens[2].split('.') except ValueError: # unpack list of wrong size raise template.TemplateSyntaxError, "Third argument in %r tag must be in the format 'package.module'" % tokens[0] try: content_type = ContentType.objects.get(app_label__exact=package,model__exact=module) except ContentType.DoesNotExist: raise template.TemplateSyntaxError, "%r tag has invalid content-type '%s.%s'" % (tokens[0], package, module) var_name, obj_id = None, None if tokens[3].isdigit(): obj_id = tokens[3] try: # ensure the object ID is valid content_type.get_object_for_this_type(pk=obj_id) except __HOLE__: raise template.TemplateSyntaxError, "%r tag refers to %s object with ID %s, which doesn't exist" % (tokens[0], content_type.name, obj_id) else: var_name = tokens[3] if tokens[4] != 'as': raise template.TemplateSyntaxError, "Fourth argument in %r must be 'as'" % tokens[0] if len(tokens) == 7: if tokens[6] != 'reversed': raise template.TemplateSyntaxError, "Final argument in %r must be 'reversed' if given" % tokens[0] ordering = "-" else: ordering = "" return CommentListNode(package, module, var_name, obj_id, tokens[5], self.free, ordering) # registration comments
ObjectDoesNotExist
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/contrib/comments/templatetags/comments.py/DoGetCommentList.__call__
def __call__(self, admin_class, request, queryset): errors = defaultdict(list) successfully_executed = [] for instance in queryset: try: self.validate(instance) except __HOLE__ as e: errors[str(e)].append(instance) else: self.executor.execute(instance) successfully_executed.append(instance) if successfully_executed: message = 'Operation was successfully scheduled for %s instances: %s' % ( len(successfully_executed), ', '.join([str(i) for i in successfully_executed])) admin_class.message_user(request, message) for error, instances in errors.items(): message = 'Failed to schedule operation for %s instances: %s. Error: %s' % ( len(instances), ', '.join([str(i) for i in instances]), error) admin_class.message_user(request, message, level=messages.ERROR)
ValidationError
dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/core/admin.py/ExecutorAdminAction.__call__
def doPrivmsg(self, irc, msg): if ircmsgs.isCtcp(msg) and not ircmsgs.isAction(msg): return if irc.isChannel(msg.args[0]): channel = msg.args[0] said = ircmsgs.prettyPrint(msg) self.db.update(channel, msg.nick, said) self.anydb.update(channel, msg.nick, said) try: id = ircdb.users.getUserId(msg.prefix) self.db.update(channel, id, said) self.anydb.update(channel, id, said) except __HOLE__: pass # Not in the database.
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Seen/plugin.py/Seen.doPrivmsg
def doPart(self, irc, msg): channel = msg.args[0] said = ircmsgs.prettyPrint(msg) self.anydb.update(channel, msg.nick, said) try: id = ircdb.users.getUserId(msg.prefix) self.anydb.update(channel, id, said) except __HOLE__: pass # Not in the database.
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Seen/plugin.py/Seen.doPart
def doQuit(self, irc, msg): said = ircmsgs.prettyPrint(msg) try: id = ircdb.users.getUserId(msg.prefix) except __HOLE__: id = None # Not in the database. for channel in msg.tagged('channels'): self.anydb.update(channel, msg.nick, said) if id is not None: self.anydb.update(channel, id, said)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Seen/plugin.py/Seen.doQuit
def doMode(self, irc, msg): # Filter out messages from network Services if msg.nick: try: id = ircdb.users.getUserId(msg.prefix) except __HOLE__: id = None # Not in the database. channel = msg.args[0] said = ircmsgs.prettyPrint(msg) self.anydb.update(channel, msg.nick, said) if id is not None: self.anydb.update(channel, id, said)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Seen/plugin.py/Seen.doMode
def _seen(self, irc, channel, name, any=False): if any: db = self.anydb else: db = self.db try: results = [] if '*' in name: if (len(name.replace('*', '')) < self.registryValue('minimumNonWildcard', channel)): irc.error(_('Not enough non-wildcard characters.'), Raise=True) results = db.seenWildcard(channel, name) else: results = [[name, db.seen(channel, name)]] if len(results) == 1: (nick, info) = results[0] (when, said) = info reply = format(_('%s was last seen in %s %s ago'), nick, channel, utils.timeElapsed(time.time()-when)) if self.registryValue('showLastMessage', channel): if minisix.PY2: said = said.decode('utf8') reply = _('%s: %s') % (reply, said) irc.reply(reply) elif len(results) > 1: L = [] for (nick, info) in results: (when, said) = info L.append(format(_('%s (%s ago)'), nick, utils.timeElapsed(time.time()-when))) irc.reply(format(_('%s could be %L'), name, (L, _('or')))) else: irc.reply(format(_('I haven\'t seen anyone matching %s.'), name)) except __HOLE__: irc.reply(format(_('I have not seen %s.'), name))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Seen/plugin.py/Seen._seen
def _last(self, irc, channel, any=False): if any: db = self.anydb else: db = self.db try: (when, said) = db.seen(channel, '<last>') reply = format(_('Someone was last seen in %s %s ago'), channel, utils.timeElapsed(time.time()-when)) if self.registryValue('showLastMessage', channel): reply = _('%s: %s') % (reply, said) irc.reply(reply) except __HOLE__: irc.reply(_('I have never seen anyone.'))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Seen/plugin.py/Seen._last
def _user(self, irc, channel, user, any=False): if any: db = self.anydb else: db = self.db try: (when, said) = db.seen(channel, user.id) reply = format(_('%s was last seen in %s %s ago'), user.name, channel, utils.timeElapsed(time.time()-when)) if self.registryValue('showLastMessage', channel): reply = _('%s: %s') % (reply, said) irc.reply(reply) except __HOLE__: irc.reply(format(_('I have not seen %s.'), user.name))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Seen/plugin.py/Seen._user
def AppendSource(self, type_indicator, attributes): """Appends a source. If you want to implement your own source type you should create a subclass in source_type.py and change the AppendSource method to handle the new subclass. This function raises FormatError if an unsupported source type indicator is encountered. Args: type_indicator: the source type indicator. attributes: a dictionary containing the source attributes. Returns: The source type object (instance of SourceType) or None if the type indicator is not supported. Raises: FormatError: if the type indicator is not set or unsupported, or if required attributes are missing. """ if not type_indicator: raise errors.FormatError(u'Missing type indicator.') source_type_class = None if type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP: source_type_class = source_type.ArtifactGroupSourceType elif type_indicator == definitions.TYPE_INDICATOR_COMMAND: source_type_class = source_type.CommandSourceType elif type_indicator == definitions.TYPE_INDICATOR_COMMAND: source_type_class = source_type.CommandCollectorDefinition elif type_indicator == definitions.TYPE_INDICATOR_DIRECTORY: source_type_class = source_type.DirectorySourceType elif type_indicator == definitions.TYPE_INDICATOR_FILE: source_type_class = source_type.FileSourceType elif type_indicator == definitions.TYPE_INDICATOR_PATH: source_type_class = source_type.PathSourceType elif type_indicator == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY: source_type_class = source_type.WindowsRegistryKeySourceType elif type_indicator == definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE: source_type_class = source_type.WindowsRegistryValueSourceType elif type_indicator == definitions.TYPE_INDICATOR_WMI_QUERY: source_type_class = source_type.WMIQuerySourceType else: raise errors.FormatError( u'Unsupported type indicator: {0}.'.format(type_indicator)) try: source_object = source_type_class(**attributes) except (__HOLE__, AttributeError) as e: raise errors.FormatError( "Invalid artifact definition for {0}: {1}".format(self.name, e)) self.sources.append(source_object) return source_object
TypeError
dataset/ETHPy150Open ForensicArtifacts/artifacts/artifacts/artifact.py/ArtifactDefinition.AppendSource
def find_duplicates(files): primary = files[0] others = files[1:len(files)] for package in primary['packages'].copy(): for other in others: otherpacks = other['packages'] try: dupe = otherpacks[package] except __HOLE__: pass else: this = { 'file': primary['name'], 'package': package, 'version': primary['packages'][package] } that = { 'file': other['name'], 'package': package, 'version': other['packages'][package] } click.echo( "Found {} in {} and {}".format( package, this['file'], that['file'])) value = click.prompt( """[0]Keep both\n[1] Keep {} in {}\n[2] Keep {} in {}\n""".format( this['version'], this['file'], that['version'], that['file']), type=int ) if value == 1: del otherpacks[package] other['updated'].append(package) other['removed'].append(package) elif value == 2: del primary['packages'][package] primary['updated'].append(package) primary['removed'].append(package)
KeyError
dataset/ETHPy150Open hactar-is/olaf/olaf/cli.py/find_duplicates
def add_installed(files, unreqed): unique = [dict(y) for y in set(tuple(x.items()) for x in unreqed)] for package_d in unique: for package, version in package_d.items(): # import pdb; pdb.set_trace() click.echo( u'{} is installed but not in any requirements file'.format( package)) pstr = '[0] Ignore\n' for index, rf in enumerate(files): pstr += '[{}] Add to {}\n'.format(index + 1, rf['name']) value = click.prompt(pstr, type=int) try: req_file = files[value - 1] except __HOLE__: click.echo('Not a valid selection soz.') else: req_file['packages'][package] = version req_file['updated'].append(package)
IndexError
dataset/ETHPy150Open hactar-is/olaf/olaf/cli.py/add_installed
def jam(filename): packages = {} try: with open(filename, 'r') as infile: infile.seek(0) for line in infile.readlines(): if line.startswith(u'-e '): package, version = (line.strip(), '') packages[package] = version elif '==' in line: package, version = line.strip().split(u'==') packages[package] = version else: packages[line.strip()] = 'latest' except __HOLE__: click.echo('File {} not found.'.format(filename)) infile.close() return packages
IOError
dataset/ETHPy150Open hactar-is/olaf/olaf/cli.py/jam
def rewrite(req): try: with open(req['name'], 'w') as outfile: outfile.seek(0) outfile.truncate() packages = collections.OrderedDict( sorted(req['packages'].items(), key=lambda t: str(t[0])) ) for k, v in packages.items(): outfile.write('{}\n'.format(lineout(str(k), str(v)))) except __HOLE__: click.echo('File {} not found.'.format(req['name'])) outfile.close() click.echo('Updated {} with packages \n\t{}'.format( req['name'], '\n\t'.join([str(_) for _ in req['updated']]))) for item in req['removed']: click.echo(click.style('# Removed package {}'.format(item), fg='yellow'))
IOError
dataset/ETHPy150Open hactar-is/olaf/olaf/cli.py/rewrite
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'vsphere', vm_['profile'], vm_=vm_) is False: return False except __HOLE__: pass # Since using "provider: <provider-engine>" is deprecated, alias provider # to use driver: "driver: <provider-engine>" if 'provider' in vm_: vm_['driver'] = vm_.pop('provider') salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport'] ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) conn = get_conn() salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': vm_}, transport=__opts__['transport'] ) folder = config.get_cloud_config_value( 'folder', vm_, __opts__, default=None ) resourcepool = config.get_cloud_config_value( 'resourcepool', vm_, __opts__, default=None ) datastore = config.get_cloud_config_value( 'datastore', vm_, __opts__, default=None ) host = config.get_cloud_config_value( 'host', vm_, __opts__, default=None ) template = config.get_cloud_config_value( 'template', vm_, __opts__, default=False ) clone_kwargs = { 'name': vm_['name'], 'folder': folder, 'resourcepool': resourcepool, 'datastore': datastore, 'host': host, 'template': template, } log.debug('clone_kwargs are set to {0}'.format( pprint.pformat(clone_kwargs)) ) try: template = conn.get_vm_by_name(vm_['image']) new_instance = template.clone(**clone_kwargs) data = new_instance.get_properties() # pylint: disable=W0612 except Exception as exc: # pylint: disable=W0703 log.error( 'Error creating {0} on vSphere\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n{1}'.format( vm_['name'], str(exc) ), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False deploy_kwargs = None if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_kwargs = _deploy(vm_) ret = show_instance(name=vm_['name'], call='action') show_deploy_args = config.get_cloud_config_value( 'show_deploy_args', vm_, __opts__, default=False ) if show_deploy_args: ret['deploy_kwargs'] = deploy_kwargs log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug( '\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(ret) ) ) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport'] ) return ret
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/cloud/clouds/vsphere.py/create