function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def _request(self, line, f): args = line.split(None, 2) try: peer = args[0] method = args[1] params = eval(args[2]) except: print("argument error") return try: p = peers[peer] except __HOLE__: print("unknown peer %s" % peer) return try: f(p, method, params) except rpc.RPCError as e: print("RPC ERROR %s" % e) except EOFError: print("disconnected")
KeyError
dataset/ETHPy150Open osrg/ryu/ryu/cmd/rpc_cli.py/Cmd._request
@messaging.expected_exceptions(exception.InstanceNotFound) def get_instance_nw_info(self, context, instance_id, rxtx_factor, host, instance_uuid=None, **kwargs): """Creates network info list for instance. called by allocate_for_instance and network_api context needs to be elevated :returns: network info list [(network,info),(network,info)...] where network = dict containing pertinent data from a network db object and info = dict containing pertinent networking data """ if not uuidutils.is_uuid_like(instance_id): instance_id = instance_uuid instance_uuid = instance_id LOG.debug('Get instance network info', instance_uuid=instance_uuid) try: fixed_ips = objects.FixedIPList.get_by_instance_uuid( context, instance_uuid) except exception.FixedIpNotFoundForInstance: fixed_ips = [] LOG.debug('Found %d fixed IPs associated to the instance in the ' 'database.', len(fixed_ips), instance_uuid=instance_uuid) nw_info = network_model.NetworkInfo() vifs = collections.OrderedDict() for fixed_ip in fixed_ips: vif = fixed_ip.virtual_interface if not vif: LOG.warning(_LW('No VirtualInterface for FixedIP: %s'), str(fixed_ip.address), instance_uuid=instance_uuid) continue if not fixed_ip.network: LOG.warning(_LW('No Network for FixedIP: %s'), str(fixed_ip.address), instance_uuid=instance_uuid) continue if vif.uuid in vifs: current = vifs[vif.uuid] else: current = { 'id': vif.uuid, 'type': network_model.VIF_TYPE_BRIDGE, 'address': vif.address, } vifs[vif.uuid] = current net_dict = self._get_network_dict(fixed_ip.network) network = network_model.Network(**net_dict) subnets = self._get_subnets_from_network(context, fixed_ip.network, host) network['subnets'] = subnets current['network'] = network try: current['rxtx_cap'] = (fixed_ip.network['rxtx_base'] * rxtx_factor) except (__HOLE__, KeyError): pass if fixed_ip.network.cidr_v6 and vif.address: # NOTE(vish): I strongly suspect the v6 subnet is not used # anywhere, but support it just in case # add the v6 address to the v6 subnet address = ipv6.to_global(fixed_ip.network.cidr_v6, vif.address, fixed_ip.network.project_id) model_ip = network_model.FixedIP(address=address) current['network']['subnets'][1]['ips'].append(model_ip) # add the v4 address to the v4 subnet model_ip = network_model.FixedIP(address=str(fixed_ip.address)) for ip in fixed_ip.floating_ips: floating_ip = network_model.IP(address=str(ip['address']), type='floating') model_ip.add_floating_ip(floating_ip) current['network']['subnets'][0]['ips'].append(model_ip) for vif in vifs.values(): nw_info.append(network_model.VIF(**vif)) LOG.debug('Built network info: |%s|', nw_info, instance_uuid=instance_uuid) return nw_info
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/network/manager.py/NetworkManager.get_instance_nw_info
def _allocate_mac_addresses(self, context, instance_uuid, networks, macs): """Generates mac addresses and creates vif rows in db for them.""" # make a copy we can mutate if macs is not None: available_macs = set(macs) for network in networks: if macs is None: self._add_virtual_interface(context, instance_uuid, network['id']) else: try: mac = available_macs.pop() except __HOLE__: raise exception.VirtualInterfaceCreateException() self._add_virtual_interface(context, instance_uuid, network['id'], mac)
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/network/manager.py/NetworkManager._allocate_mac_addresses
@staticmethod def _convert_int_args(kwargs): int_args = ("network_size", "num_networks", "vlan_start", "vpn_start") for key in int_args: try: value = kwargs.get(key) if value is None: continue kwargs[key] = int(value) except __HOLE__: raise exception.InvalidIntValue(key=key)
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/network/manager.py/NetworkManager._convert_int_args
def create_networks(self, context, **kwargs): """Create networks based on parameters.""" self._convert_int_args(kwargs) kwargs["vlan_start"] = kwargs.get("vlan_start") or CONF.vlan_start kwargs["num_networks"] = (kwargs.get("num_networks") or CONF.num_networks) kwargs["network_size"] = (kwargs.get("network_size") or CONF.network_size) # Check that num_networks + vlan_start is not > 4094, fixes lp708025 if kwargs["num_networks"] + kwargs["vlan_start"] > 4094: raise ValueError(_('The sum between the number of networks and' ' the vlan start cannot be greater' ' than 4094')) # Check that vlan is not greater than 4094 or less then 1 vlan_num = kwargs.get("vlan", None) if vlan_num is not None: try: vlan_num = int(vlan_num) except __HOLE__: raise ValueError(_("vlan must be an integer")) if vlan_num > 4094: raise ValueError(_('The vlan number cannot be greater than' ' 4094')) if vlan_num < 1: raise ValueError(_('The vlan number cannot be less than 1')) # check that num networks and network size fits in fixed_net fixed_net = netaddr.IPNetwork(kwargs['cidr']) if fixed_net.size < kwargs['num_networks'] * kwargs['network_size']: raise ValueError(_('The network range is not ' 'big enough to fit %(num_networks)s networks. Network ' 'size is %(network_size)s') % kwargs) kwargs['bridge_interface'] = (kwargs.get('bridge_interface') or CONF.vlan_interface) LOG.debug('Create network: |%s|', kwargs) return NetworkManager.create_networks( self, context, vpn=True, **kwargs)
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/network/manager.py/VlanManager.create_networks
def getProcessResourceUsage(self, pid_list): try: results = {} for pid in pid_list: result = {} results[str(pid)] = result try: proc = psutil.Process(pid) except: LOG.exception("Process %s:", pid) continue result["fileno"] = proc.num_fds() try: proc.rlimit except __HOLE__: max_fileno = -1 max_vmsize = -1 else: max_fileno = proc.rlimit(psutil.RLIMIT_NOFILE)[0] max_vmsize = proc.rlimit(psutil.RLIMIT_AS)[0] if max_fileno != -1: result["max_fileno"] = max_fileno result["numconnections"] = len(proc.connections('all')) result["numfiles"] = len(proc.open_files()) if max_vmsize != -1: result["max_vmsize"] = str(max_vmsize) result["vmsize"] = str(proc.memory_info()[1]) result["numchildren"] = len(proc.children()) result["numthreads"] = proc.num_threads() result["cpu"] = ",".join(str(x) for x in [time.time()] + list(proc.cpu_times())) result["diskio"] = ",".join(str(x) for x in [time.time()] + list(proc.io_counters())) return results except: LOG.exception("Error") return {}
AttributeError
dataset/ETHPy150Open GambitResearch/suponoff/suponoff-monhelper.py/MonHelperRPCInterface.getProcessResourceUsage
def tailFile(filename, offset, length): """ Read length bytes from the file named by filename starting at offset, automatically increasing offset and setting overflow flag if log size has grown beyond (offset + length). If length bytes are not available, as many bytes as are available are returned. """ overflow = False try: f = open(filename, 'rb') f.seek(0, 2) sz = f.tell() if sz > (offset + length): overflow = True offset = sz - 1 if (offset + length) > sz: if offset > (sz - 1): length = 0 offset = sz - length if offset < 0: offset = 0 if length < 0: length = 0 if length == 0: data = '' else: f.seek(offset) data = f.read(length) offset = sz return [data, offset, overflow] except (OSError, __HOLE__): return ['', offset, False] # copied from supervisor.options
IOError
dataset/ETHPy150Open GambitResearch/suponoff/suponoff-monhelper.py/tailFile
def __init__(self, master): self.master = master self.emulator = None self.conf_dict = {} self.conf_name = tk.StringVar() self.conf_frame = None master.title("Wireless Network Reproduction") master.protocol("WM_DELETE_WINDOW", self.exit_func) # first check root privilege if os.getuid() != 0: self.master.withdraw() showerror('Privilege Error', 'You should run this program as root.') self.master.destroy() return # then find the current activate network interface self.iface = '<network device name>' self.decide_iface() self.default_rule = 'ip from any to any via %s' % self.iface self.inbound_list = [] self.outbound_list = [] self.filter_str = tk.StringVar(value=self.default_rule) self.proc_str = tk.StringVar(value=self.prompt_str) self.dev_str = tk.StringVar() self.dump_pos = tk.StringVar() self.divert_unknown = tk.IntVar(value=1) self.start_btn = None self.filter_entry = None self.proc_entry = None self.dev_entry = None self.mode = tk.IntVar(self.LOCAL_MODE) self.init_GUI() try: Emulator() except __HOLE__: def close_func(): self.master.quit() self.master.destroy() self.master.withdraw() top = tk.Toplevel(self.master) top.title('Kernel Extension Error') tk.Message(top, text=self.kext_errmsg)\ .pack(side=tk.TOP, fill=tk.BOTH, expand=True) tk.Button(top, text="Close", command=close_func).pack(side=tk.TOP) top.protocol("WM_DELETE_WINDOW", close_func) except Exception as e: self.master.withdraw() showerror('Emulator Loading Error', e.message) self.master.destroy()
OSError
dataset/ETHPy150Open FinalTheory/wireless-network-reproduction/macdivert/emulator.py/EmulatorGUI.__init__
def encode(self, domain, attribute, value): try: field = self.fields[attribute] except __HOLE__: return value else: return field.encode(value)
KeyError
dataset/ETHPy150Open saymedia/python-simpledb/simpledb/models.py/FieldEncoder.encode
def decode(self, domain, attribute, value): try: field = self.fields[attribute] except __HOLE__: return value else: return field.decode(value)
KeyError
dataset/ETHPy150Open saymedia/python-simpledb/simpledb/models.py/FieldEncoder.decode
def _get_service_file(squadron_dir, service_name, service_ver, filename, on_error=None, config=None): """ Grabs the named service file in a service directory Keyword arguments: squadron_dir -- base directory service_name -- the name of the service service_ver -- the version of the service filename -- the name of the service file without the extension empty_on_error -- if true, returns an empty dict instead of raising error config -- if a dict, uses it to template the file before loading it """ ex = None for ext in extensions: try: serv_dir = os.path.join(squadron_dir, 'services', service_name, service_ver) service_file = os.path.join(serv_dir, filename + ext) if config: loader = FileLoader(squadron_dir) template = loader.load_template(service_file) return yaml.load(template.render(config, loader=loader)) else: with open(service_file, 'r') as sfile: return yaml.load(sfile.read()) except (__HOLE__, IOError) as e: if e.errno == errno.ENOENT: ex = e else: raise e if on_error is not None: return on_error raise ex
OSError
dataset/ETHPy150Open gosquadron/squadron/squadron/commit.py/_get_service_file
def _get_config(conf_dir, service): """ Gets the service configuration Keyword arguments: conf_dir -- the location of the configuration directory service -- the name of the service """ ex = None for ext in extensions: try: with open(os.path.join(conf_dir, service + ext), 'r') as cfile: return yaml.load(cfile.read()) except (__HOLE__, IOError) as e: if e.errno == errno.ENOENT: ex = e else: raise e raise ex
OSError
dataset/ETHPy150Open gosquadron/squadron/squadron/commit.py/_get_config
def has_key(self, key): try: value = self[key] except __HOLE__: return False return True
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/UserDict.py/DictMixin.has_key
def setdefault(self, key, default=None): try: return self[key] except __HOLE__: self[key] = default return default
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/UserDict.py/DictMixin.setdefault
def pop(self, key, *args): if len(args) > 1: raise TypeError, "pop expected at most 2 arguments, got "\ + repr(1 + len(args)) try: value = self[key] except __HOLE__: if args: return args[0] raise del self[key] return value
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/UserDict.py/DictMixin.pop
def popitem(self): try: k, v = self.iteritems().next() except __HOLE__: raise KeyError, 'container is empty' del self[k] return (k, v)
StopIteration
dataset/ETHPy150Open babble/babble/include/jython/Lib/UserDict.py/DictMixin.popitem
def get(self, key, default=None): try: return self[key] except __HOLE__: return default
KeyError
dataset/ETHPy150Open babble/babble/include/jython/Lib/UserDict.py/DictMixin.get
def delete_files(main_window, forever): """Delete the selected files.""" iconview = main_window.iconview selected = iconview.get_selected_items() if selected and len(selected)>1: if forever: message = _('Would you like to permanently delete the ' 'selected images?') else: message = _('Would you like to delete the selected images?') dlg = gtk.MessageDialog(type=gtk.MESSAGE_QUESTION, buttons=gtk.BUTTONS_YES_NO, message_format=message) response = dlg.run() dlg.destroy() if response != gtk.RESPONSE_YES: return banned = open(os.path.expanduser('~/.webilder/banned_photos'), 'a') model = iconview.get_model() monitor = main_window.collection_monitor if monitor['monitor'] is not None: gnomevfs.monitor_cancel(monitor['monitor']) monitor['monitor'] = None for path in selected: iterator = model.get_iter(path) data = model.get_value(iterator, IV_DATA_COLUMN) for fname in (data['filename'], data['info_file'], data['thumb']): try: os.remove(fname) except (IOError, __HOLE__): pass if forever: banned.write(os.path.basename(data['filename'])+'\n') model.remove(iterator) if monitor['dir']: monitor['monitor'] = gnomevfs.monitor_add( monitor['dir'], gnomevfs.MONITOR_DIRECTORY, main_window.collection_directory_changed) banned.close()
OSError
dataset/ETHPy150Open thesamet/webilder/src/webilder/WebilderDesktop.py/delete_files
def import_files(files): success_count = 0 for afile in files: try: success_count += wbz_handler.handle_file(afile) except (__HOLE__, KeyError, ValueError), e: mbox = gtk.MessageDialog(type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK) mbox.set_title(_("File import error.")) mbox.set_markup(_("Could not import '%s': %s") % (afile, e)) mbox.run() mbox.destroy() if success_count: mbox = gtk.MessageDialog(type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK) mbox.set_title(_("Import complete.")) mbox.set_markup(_("%d photos have been added to your collection.") % success_count) mbox.run() mbox.destroy()
IOError
dataset/ETHPy150Open thesamet/webilder/src/webilder/WebilderDesktop.py/import_files
def test_lastWriteReceived(self): """ Verify that a write made directly to stdout using L{os.write} after StandardIO has finished is reliably received by the process reading that stdout. """ p = StandardIOTestProcessProtocol() # Note: the OS X bug which prompted the addition of this test # is an apparent race condition involving non-blocking PTYs. # Delaying the parent process significantly increases the # likelihood of the race going the wrong way. If you need to # fiddle with this code at all, uncommenting the next line # will likely make your life much easier. It is commented out # because it makes the test quite slow. # p.onConnection.addCallback(lambda ign: __import__('time').sleep(5)) try: self._spawnProcess( p, 'stdio_test_lastwrite.py', UNIQUE_LAST_WRITE_STRING, usePTY=True) except __HOLE__, e: # Some platforms don't work with usePTY=True raise unittest.SkipTest(str(e)) def processEnded(reason): """ Asserts that the parent received the bytes written by the child immediately after the child starts. """ self.assertTrue( p.data[1].endswith(UNIQUE_LAST_WRITE_STRING), "Received %r from child, did not find expected bytes." % ( p.data,)) reason.trap(error.ProcessDone) return self._requireFailure(p.onCompletion, processEnded)
ValueError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/test/test_stdio.py/StandardInputOutputTestCase.test_lastWriteReceived
def _lookup_obs(): c = None for mod, cls in OBS_PROVIDERS: m_name = 'watchdog.observers.%s' % mod try: c = import_module(cls, m_name) except (ImportError, __HOLE__): # more exceptions? continue return c
AttributeError
dataset/ETHPy150Open yildizberkay/MongoApp/libs/watchdog/observers/__init__.py/_lookup_obs
def crowding_replacement(random, population, parents, offspring, args): """Performs crowding replacement as a form of niching. This function performs crowding replacement, which means that the members of the population are replaced one-at-a-time with each of the offspring. A random sample of `crowding_distance` individuals is pulled from the current population, and the closest individual to the current offspring (where "closest" is determined by the `distance_function`) is replaced by that offspring, if the offspring is better. It is possible for one offspring to replace an earlier offspring in the same generation, given the random sample that is taken of the current survivors for each offspring. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments Optional keyword arguments in args: - *distance_function* -- a function that accepts two candidate solutions and returns the distance between them (default Euclidean L2 distance) - *crowding_distance* -- a positive integer representing the number of closest solutions to consider as a "crowd" (default 2) """ def distance(x, y): return math.sqrt(sum([(a - b)**2 for a, b in zip(x, y)])) try: distance_function = args['distance_function'] except __HOLE__: distance_function = distance args['distance_function'] = distance_function crowding_distance = args.setdefault('crowding_distance', 2) survivors = population for o in offspring: pool = random.sample(survivors, crowding_distance) closest = min(pool, key=lambda x: distance_function(o.candidate, x.candidate)) if o > closest: survivors.remove(closest) survivors.append(o) return survivors #------------------------------------------- # Algorithm-specific Replacement Strategies #-------------------------------------------
KeyError
dataset/ETHPy150Open aarongarrett/inspyred/inspyred/ec/replacers.py/crowding_replacement
def simulated_annealing_replacement(random, population, parents, offspring, args): """Replaces population using the simulated annealing schedule. This function performs simulated annealing replacement based on a temperature and a cooling rate. These can be specified by the keyword arguments `temperature`, which should be the initial temperature, and `cooling_rate`, which should be the coefficient by which the temperature is reduced. If these keyword arguments are not present, then the function will attempt to base the cooling schedule either on the ratio of evaluations to the maximum allowed evaluations or on the ratio of generations to the maximum allowed generations. Each of these ratios is of the form ``(max - current)/max`` so that the cooling schedule moves smoothly from 1 to 0. .. Arguments: random -- the random number generator object population -- the population of individuals parents -- the list of parent individuals offspring -- the list of offspring individuals args -- a dictionary of keyword arguments Optional keyword arguments in args: - *temperature* -- the initial temperature - *cooling_rate* -- a real-valued coefficient in the range (0, 1) by which the temperature should be reduced """ try: temp = args['temperature'] cooling_rate = args['cooling_rate'] temp = temp * cooling_rate args['temperature'] = temp except __HOLE__: try: num_evals = args['_ec'].num_evaluations max_evals = args['max_evaluations'] temp = float(max_evals - num_evals) / float(max_evals) except KeyError: num_gens = args['_ec'].num_generations max_gens = args['max_generations'] temp = 1 - float(max_gens - num_gens) / float(max_gens) new_pop = [] for p, o in zip(parents, offspring): if o >= p: new_pop.append(o) elif temp > 0 and random.random() < math.exp(-abs(p.fitness - o.fitness) / float(temp)): new_pop.append(o) else: new_pop.append(p) return new_pop
KeyError
dataset/ETHPy150Open aarongarrett/inspyred/inspyred/ec/replacers.py/simulated_annealing_replacement
def execute(self, argv): options, args = self.parseOptions(argv) self.setUp(options) if options.interactive: while True: try: input = raw_input(">>> ") except (EOFError, KeyboardInterrupt): self.stdout.write("\nBye.\n") break inStream = antlr3.ANTLRStringStream(input) self.parseStream(options, inStream) else: if options.input is not None: inStream = antlr3.ANTLRStringStream(options.input) elif len(args) == 1 and args[0] != '-': inStream = antlr3.ANTLRFileStream( args[0], encoding=options.encoding ) else: inStream = antlr3.ANTLRInputStream( self.stdin, encoding=options.encoding ) if options.profile: try: import cProfile as profile except __HOLE__: import profile profile.runctx( 'self.parseStream(options, inStream)', globals(), locals(), 'profile.dat' ) import pstats stats = pstats.Stats('profile.dat') stats.strip_dirs() stats.sort_stats('time') stats.print_stats(100) elif options.hotshot: import hotshot profiler = hotshot.Profile('hotshot.dat') profiler.runctx( 'self.parseStream(options, inStream)', globals(), locals() ) else: self.parseStream(options, inStream)
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/antlr3/antlr3/main.py/_Main.execute
def populate_obj(self, obj, name): values = getattr(obj, name, None) try: ivalues = iter(values) except __HOLE__: ivalues = iter([]) candidates = itertools.chain(ivalues, itertools.repeat(None)) _fake = type(str('_fake'), (object, ), {}) output = [] for field, data in zip(self.entries, candidates): if not self.should_delete(field): fake_obj = _fake() fake_obj.data = data field.populate_obj(fake_obj, 'data') output.append(fake_obj.data) setattr(obj, name, output)
TypeError
dataset/ETHPy150Open flask-admin/flask-admin/flask_admin/model/fields.py/InlineFieldList.populate_obj
def __getitem__(self, x): try: return DataUser.__getitem__(self, x) except __HOLE__: raise Exception( "You attempted to get the value `%s' from `%s'. It isn't here." " Perhaps you misspelled the name of a Property?" % (x, self))
KeyError
dataset/ETHPy150Open openworm/PyOpenWorm/PyOpenWorm/dataObject.py/DataObject.__getitem__
def oid(identifier_or_rdf_type, rdf_type=None): """ Create an object from its rdf type Parameters ---------- identifier_or_rdf_type : :class:`str` or :class:`rdflib.term.URIRef` If `rdf_type` is provided, then this value is used as the identifier for the newly created object. Otherwise, this value will be the :attr:`rdf_type` of the object used to determine the Python type and the object's identifier will be randomly generated. rdf_type : :class:`str`, :class:`rdflib.term.URIRef`, :const:`False` If provided, this will be the :attr:`rdf_type` of the newly created object. Returns ------- The newly created object """ identifier = identifier_or_rdf_type if rdf_type is None: rdf_type = identifier_or_rdf_type identifier = None L.debug("oid making a {} with ident {}".format(rdf_type, identifier)) c = None try: c = RDFTypeTable[rdf_type] except __HOLE__: c = DataObject # if its our class name, then make our own object # if there's a part after that, that's the property name o = None if identifier is not None: o = c(ident=identifier) else: o = c() return o
KeyError
dataset/ETHPy150Open openworm/PyOpenWorm/PyOpenWorm/dataObject.py/oid
def get_most_specific_rdf_type(types): """ Gets the most specific rdf_type. Returns the URI corresponding to the lowest in the DataObject class hierarchy from among the given URIs. """ most_specific_type = DataObject for x in types: try: class_object = RDFTypeTable[x] if issubclass(class_object, most_specific_type): most_specific_type = class_object except __HOLE__: L.warn( """A Python class corresponding to the type URI "{}" couldn't be found. You may want to import the module containing the class as well as add additional type annotations in order to resolve your objects to a more precise type.""".format(x)) return most_specific_type.rdf_type
KeyError
dataset/ETHPy150Open openworm/PyOpenWorm/PyOpenWorm/dataObject.py/get_most_specific_rdf_type
def dbworker(self): quoteDbName = self.connection.quoteIdentifier(self.db) self.progress.emit(0, 100, "Starting...") if self.compression == "": opener = open elif self.compression == "zip": opener = ZipFile elif self.compression == "gz": opener = GzipFile elif self.compression == "bz2": opener = BZ2File with opener(self.destfile, "w") as f: try: cursor = self.connection.cursor() cursor.execute("SHOW FULL TABLES IN %s WHERE Table_type='BASE TABLE'" % quoteDbName) tables = [row[0] for row in cursor.fetchall()] self.steps = 1 if self.dumpTables: self.steps += len(tables) if self.dumpViews: cursor.execute("SHOW FULL TABLES IN %s WHERE Table_type='VIEW'" % quoteDbName) views = [row[0] for row in cursor.fetchall()] self.steps += len(views) else: views = [] cursor.execute("SHOW VARIABLES LIKE 'version';") row = cursor.fetchone() serverVersion = row[1] f.write("""-- {appName} {appVersion} -- -- Host: {host} Database: {db} -- ------------------------------------------------------ -- Server version {serverVersion} /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; /*!40101 SET NAMES utf8 */; /*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; /*!40103 SET TIME_ZONE='+00:00' */; /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; /*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; /*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;""".format( appName=application.name, appVersion=application.version, host=self.connection.host, db=self.db, serverVersion=serverVersion, )) for table in tables: quoteTable = self.connection.quoteIdentifier(table) self.advance("Dumping table %s" % table) if self.dumpTables: cursor.execute("SHOW CREATE TABLE %s.%s;" % (quoteDbName, quoteTable)) row = cursor.fetchone() create = row[1] f.write(""" -- -- Table structure for table {table} -- DROP TABLE IF EXISTS {table}; /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; {create}; /*!40101 SET character_set_client = @saved_cs_client */;""".format( table=quoteTable, create=create, )) if self.dumpTriggers: query = "SHOW TRIGGERS IN %s WHERE `Table` = ?;" % (quoteDbName,) cursor.execute(query.replace("?", "%s"), (table,)) triggers = cursor.fetchall() for trigger in triggers: definer = trigger[7].split('@', 2) f.write("""/*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; /*!50003 SET character_set_client = utf8 */ ; /*!50003 SET character_set_results = utf8 */ ; /*!50003 SET collation_connection = utf8_general_ci */ ; /*!50003 SET @saved_sql_mode = @@sql_mode */ ; /*!50003 SET sql_mode = '' */ ; DELIMITER ;; /*!50003 CREATE*/ /*!50017 DEFINER={definer}@{definerHost}*/ /*!50003 TRIGGER {trigger} {timing} {event} ON {table} FOR EACH ROW {statement} */;; DELIMITER ; /*!50003 SET sql_mode = @saved_sql_mode */ ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ;""".format( trigger=self.connection.quoteIdentifier(trigger[0]), definer=self.connection.quoteIdentifier(definer[0]), definerHost=self.connection.quoteIdentifier(definer[1]), table=quoteTable, statement=trigger[3], timing=trigger[4], event=trigger[1], )) if self.dumpData: cursor.execute("SELECT COUNT(*) FROM %s.%s;" % (quoteDbName, quoteTable)) count = cursor.fetchone()[0] if self.limitDumpData: count = min(count, self.limitDumpData) if count: self.subProgress.emit(0, count, "Dumping rows of table %s" % quoteTable) f.write(""" -- -- Dumping data for table {table} -- LOCK TABLES {table} WRITE; /*!40000 ALTER TABLE {table} DISABLE KEYS */; INSERT INTO {table} VALUES """.format(table=quoteTable)) limit = " LIMIT %d" % self.limitDumpData if self.limitDumpData else "" rownum = 0 for row in self.connection.iterall("SELECT * FROM %s.%s%s;" % (quoteDbName, quoteTable, limit), cursor=cursor): rownum += 1 datarow = [] for i, cell in enumerate(row): if cell is None: datarow.append("NULL") elif cursor.description[i][1] in MySQLdb.BINARY: if type(cell) is unicode: cell = cell.encode("utf-8") datarow.append("0x%s" % cell.encode("hex")) elif isinstance(cell, basestring): try: datarow.append("'%s'" % self.connection.escapeString(cell.encode("utf-8"))) except __HOLE__: datarow.append("0x%s" % cell.encode("utf-8").encode("hex")) elif isinstance(cell, (int, long, float)): datarow.append(str(cell)) else: datarow.append("'%s'" % self.connection.escapeString(str(cell))) if row > 0: f.write(", ") f.write("(%s)" % ",".join(datarow)) self.subProgress.emit(rownum, count, "Dumping rows of table %s" % quoteTable) f.write("""; /*!40000 ALTER TABLE {table} ENABLE KEYS */; UNLOCK TABLES; """.format(table=quoteTable)) if self.dumpViews: for view in views: view = self.connection.quoteIdentifier(view) self.advance("Dumping view %s" % view) cursor.execute("SHOW CREATE VIEW %s.%s;" % (quoteDbName, view)) row = cursor.fetchone() create = re.sub("^(CREATE ALGORITHM=[^ ]+ )(DEFINER=[^ ]+ SQL SECURITY [^ ]+ )", "/*!50001 \\1*/\n/*!50013 \\2*/\n", row[1]) f.write(""" -- -- View structure for view {view} -- /*!50001 DROP TABLE IF EXISTS {view}*/; /*!50001 DROP VIEW IF EXISTS {view}*/; /*!50001 SET @saved_cs_client = @@character_set_client */; /*!50001 SET @saved_cs_results = @@character_set_results */; /*!50001 SET @saved_col_connection = @@collation_connection */; /*!50001 SET character_set_client = utf8 */; /*!50001 SET character_set_results = utf8 */; /*!50001 SET collation_connection = utf8_general_ci */; {create}; /*!50001 SET character_set_client = @saved_cs_client */; /*!50001 SET character_set_results = @saved_cs_results */; /*!50001 SET collation_connection = @saved_col_connection */;; """.format( view=view, create=create, )) self.advance("Dump terminated") except MySQLError as (errno, errmsg): # @UnusedVariable print errmsg f.write(""" /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; /*!40101 SET SQL_MODE=@OLD_SQL_MODE */; /*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; /*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; -- Dump completed on %s\n""" % datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
UnicodeDecodeError
dataset/ETHPy150Open mtorromeo/sqlantaresia/sqlantaresia/DumpTab.py/DumpThread.dbworker
def __init__(self, number): self.number = number # FIXME: need to make sure we're really getting a number and not any non-number characters. try: self.digits = [int(digit) for digit in str(self.number).strip()] self.region_code = int(str(self.digits[0]) + str(self.digits[1])) self.converted = True except __HOLE__: # Not a number, failed to convert self.digits = None self.region_code = None self.converted = False
ValueError
dataset/ETHPy150Open wesabe/fixofx/lib/ofx/validators.py/RoutingNumber.__init__
def resolve(self, context, quiet=True): """ Return an object described by the accessor by traversing the attributes of context. """ try: obj = context for level in self.levels: if isinstance(obj, dict): obj = obj[level] elif isinstance(obj, list) or isinstance(obj, tuple): obj = obj[int(level)] else: if callable(getattr(obj, level)): try: obj = getattr(obj, level)() except __HOLE__: obj = getattr(obj, level) else: # for model field that has choice set # use get_xxx_display to access display = 'get_%s_display' % level obj = getattr(obj, display)() if hasattr(obj, display) else getattr(obj, level) if not obj: break return obj except Exception as e: if quiet: return '' else: raise e
KeyError
dataset/ETHPy150Open shymonk/django-datatable/table/utils.py/Accessor.resolve
def render(self, context): try: expire_time = self.expire_time_var.resolve(context) except VariableDoesNotExist: raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var) try: expire_time = int(expire_time) except (ValueError, __HOLE__): raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time) # Build a unicode key for this fragment and all vary-on's. args = md5_constructor(u':'.join([urlquote(resolve_variable(var, context)) for var in self.vary_on])) cache_key = 'template.cache.%s.%s' % (self.fragment_name, args.hexdigest()) value = cache.get(cache_key) if value is None: value = self.nodelist.render(context) cache.set(cache_key, value, expire_time) return value
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/templatetags/cache.py/CacheNode.render
def _real_main(argv=None): # Compatibility fixes for Windows if sys.platform == 'win32': # https://github.com/rg3/youtube-dl/issues/820 codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) workaround_optparse_bug9161() setproctitle('youtube-dl') parser, opts, args = parseOpts(argv) # Set user agent if opts.user_agent is not None: std_headers['User-Agent'] = opts.user_agent # Set referer if opts.referer is not None: std_headers['Referer'] = opts.referer # Custom HTTP headers if opts.headers is not None: for h in opts.headers: if h.find(':', 1) < 0: parser.error('wrong header formatting, it should be key:value, not "%s"' % h) key, value = h.split(':', 2) if opts.verbose: write_string('[debug] Adding header from command line option %s:%s\n' % (key, value)) std_headers[key] = value # Dump user agent if opts.dump_user_agent: compat_print(std_headers['User-Agent']) sys.exit(0) # Batch file verification batch_urls = [] if opts.batchfile is not None: try: if opts.batchfile == '-': batchfd = sys.stdin else: batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore') batch_urls = read_batch_urls(batchfd) if opts.verbose: write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') except IOError: sys.exit('ERROR: batch file could not be read') all_urls = batch_urls + args all_urls = [url.strip() for url in all_urls] _enc = preferredencoding() all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] if opts.list_extractors: for ie in list_extractors(opts.age_limit): compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) matchedUrls = [url for url in all_urls if ie.suitable(url)] for mu in matchedUrls: compat_print(' ' + mu) sys.exit(0) if opts.list_extractor_descriptions: for ie in list_extractors(opts.age_limit): if not ie._WORKING: continue desc = getattr(ie, 'IE_DESC', ie.IE_NAME) if desc is False: continue if hasattr(ie, 'SEARCH_KEY'): _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow') _COUNTS = ('', '5', '10', 'all') desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) compat_print(desc) sys.exit(0) # Conflicting, missing and erroneous options if opts.usenetrc and (opts.username is not None or opts.password is not None): parser.error('using .netrc conflicts with giving username/password') if opts.password is not None and opts.username is None: parser.error('account username missing\n') if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): parser.error('using output template conflicts with using title, video ID or auto number') if opts.usetitle and opts.useid: parser.error('using title conflicts with using video ID') if opts.username is not None and opts.password is None: opts.password = compat_getpass('Type account password and press [Return]: ') if opts.ratelimit is not None: numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) if numeric_limit is None: parser.error('invalid rate limit specified') opts.ratelimit = numeric_limit if opts.min_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) if numeric_limit is None: parser.error('invalid min_filesize specified') opts.min_filesize = numeric_limit if opts.max_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) if numeric_limit is None: parser.error('invalid max_filesize specified') opts.max_filesize = numeric_limit if opts.retries is not None: try: opts.retries = int(opts.retries) except (TypeError, __HOLE__): parser.error('invalid retry count specified') if opts.buffersize is not None: numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) if numeric_buffersize is None: parser.error('invalid buffer size specified') opts.buffersize = numeric_buffersize if opts.playliststart <= 0: raise ValueError('Playlist start must be positive') if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: raise ValueError('Playlist end must be greater than playlist start') if opts.extractaudio: if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: parser.error('invalid audio format specified') if opts.audioquality: opts.audioquality = opts.audioquality.strip('k').strip('K') if not opts.audioquality.isdigit(): parser.error('invalid audio quality specified') if opts.recodevideo is not None: if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']: parser.error('invalid video recode format specified') if opts.date is not None: date = DateRange.day(opts.date) else: date = DateRange(opts.dateafter, opts.datebefore) # Do not download videos when there are audio-only formats if opts.extractaudio and not opts.keepvideo and opts.format is None: opts.format = 'bestaudio/best' # --all-sub automatically sets --write-sub if --write-auto-sub is not given # this was the old behaviour if only --all-sub was given. if opts.allsubtitles and not opts.writeautomaticsub: opts.writesubtitles = True if sys.version_info < (3,): # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) if opts.outtmpl is not None: opts.outtmpl = opts.outtmpl.decode(preferredencoding()) outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') or (opts.useid and '%(id)s.%(ext)s') or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or DEFAULT_OUTTMPL) if not os.path.splitext(outtmpl)[1] and opts.extractaudio: parser.error('Cannot download a video and extract audio into the same' ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' ' template'.format(outtmpl)) any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json any_printing = opts.print_json download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive # PostProcessors postprocessors = [] # Add the metadata pp first, the other pps will copy it if opts.addmetadata: postprocessors.append({'key': 'FFmpegMetadata'}) if opts.extractaudio: postprocessors.append({ 'key': 'FFmpegExtractAudio', 'preferredcodec': opts.audioformat, 'preferredquality': opts.audioquality, 'nopostoverwrites': opts.nopostoverwrites, }) if opts.recodevideo: postprocessors.append({ 'key': 'FFmpegVideoConvertor', 'preferedformat': opts.recodevideo, }) if opts.embedsubtitles: postprocessors.append({ 'key': 'FFmpegEmbedSubtitle', 'subtitlesformat': opts.subtitlesformat, }) if opts.xattrs: postprocessors.append({'key': 'XAttrMetadata'}) if opts.embedthumbnail: if not opts.addmetadata: postprocessors.append({'key': 'FFmpegAudioFix'}) postprocessors.append({'key': 'AtomicParsley'}) # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. # So if the user is able to remove the file before your postprocessor runs it might cause a few problems. if opts.exec_cmd: postprocessors.append({ 'key': 'ExecAfterDownload', 'verboseOutput': opts.verbose, 'exec_cmd': opts.exec_cmd, }) ydl_opts = { 'usenetrc': opts.usenetrc, 'username': opts.username, 'password': opts.password, 'twofactor': opts.twofactor, 'videopassword': opts.videopassword, 'quiet': (opts.quiet or any_getting or any_printing), 'no_warnings': opts.no_warnings, 'forceurl': opts.geturl, 'forcetitle': opts.gettitle, 'forceid': opts.getid, 'forcethumbnail': opts.getthumbnail, 'forcedescription': opts.getdescription, 'forceduration': opts.getduration, 'forcefilename': opts.getfilename, 'forceformat': opts.getformat, 'forcejson': opts.dumpjson or opts.print_json, 'dump_single_json': opts.dump_single_json, 'simulate': opts.simulate or any_getting, 'skip_download': opts.skip_download, 'format': opts.format, 'format_limit': opts.format_limit, 'listformats': opts.listformats, 'outtmpl': outtmpl, 'autonumber_size': opts.autonumber_size, 'restrictfilenames': opts.restrictfilenames, 'ignoreerrors': opts.ignoreerrors, 'ratelimit': opts.ratelimit, 'nooverwrites': opts.nooverwrites, 'retries': opts.retries, 'buffersize': opts.buffersize, 'noresizebuffer': opts.noresizebuffer, 'continuedl': opts.continue_dl, 'noprogress': opts.noprogress, 'progress_with_newline': opts.progress_with_newline, 'playliststart': opts.playliststart, 'playlistend': opts.playlistend, 'playlistreverse': opts.playlist_reverse, 'noplaylist': opts.noplaylist, 'logtostderr': opts.outtmpl == '-', 'consoletitle': opts.consoletitle, 'nopart': opts.nopart, 'updatetime': opts.updatetime, 'writedescription': opts.writedescription, 'writeannotations': opts.writeannotations, 'writeinfojson': opts.writeinfojson, 'writethumbnail': opts.writethumbnail, 'writesubtitles': opts.writesubtitles, 'writeautomaticsub': opts.writeautomaticsub, 'allsubtitles': opts.allsubtitles, 'listsubtitles': opts.listsubtitles, 'subtitlesformat': opts.subtitlesformat, 'subtitleslangs': opts.subtitleslangs, 'matchtitle': decodeOption(opts.matchtitle), 'rejecttitle': decodeOption(opts.rejecttitle), 'max_downloads': opts.max_downloads, 'prefer_free_formats': opts.prefer_free_formats, 'verbose': opts.verbose, 'dump_intermediate_pages': opts.dump_intermediate_pages, 'write_pages': opts.write_pages, 'test': opts.test, 'keepvideo': opts.keepvideo, 'min_filesize': opts.min_filesize, 'max_filesize': opts.max_filesize, 'min_views': opts.min_views, 'max_views': opts.max_views, 'daterange': date, 'cachedir': opts.cachedir, 'youtube_print_sig_code': opts.youtube_print_sig_code, 'age_limit': opts.age_limit, 'download_archive': download_archive_fn, 'cookiefile': opts.cookiefile, 'nocheckcertificate': opts.no_check_certificate, 'prefer_insecure': opts.prefer_insecure, 'proxy': opts.proxy, 'socket_timeout': opts.socket_timeout, 'bidi_workaround': opts.bidi_workaround, 'debug_printtraffic': opts.debug_printtraffic, 'prefer_ffmpeg': opts.prefer_ffmpeg, 'include_ads': opts.include_ads, 'default_search': opts.default_search, 'youtube_include_dash_manifest': opts.youtube_include_dash_manifest, 'encoding': opts.encoding, 'exec_cmd': opts.exec_cmd, 'extract_flat': opts.extract_flat, 'merge_output_format': opts.merge_output_format, 'postprocessors': postprocessors, 'fixup': opts.fixup, 'source_address': opts.source_address, 'call_home': opts.call_home, } with YoutubeDL(ydl_opts) as ydl: # Update version if opts.update_self: update_self(ydl.to_screen, opts.verbose) # Remove cache dir if opts.rm_cachedir: ydl.cache.remove() # Maybe do nothing if (len(all_urls) < 1) and (opts.load_info_filename is None): if opts.update_self or opts.rm_cachedir: sys.exit() ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv) parser.error('you must provide at least one URL') try: if opts.load_info_filename is not None: retcode = ydl.download_with_info_file(opts.load_info_filename) else: retcode = ydl.download(all_urls) except MaxDownloadsReached: ydl.to_screen('--max-download limit reached, aborting.') retcode = 101 sys.exit(retcode)
ValueError
dataset/ETHPy150Open yasoob/youtube-dl-GUI/youtube_dl/__init__.py/_real_main
def main(argv=None): try: _real_main(argv) except DownloadError: sys.exit(1) except SameFileError: sys.exit('ERROR: fixed output name but more than one file to download') except __HOLE__: sys.exit('\nERROR: Interrupted by user')
KeyboardInterrupt
dataset/ETHPy150Open yasoob/youtube-dl-GUI/youtube_dl/__init__.py/main
def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv # setup command line parser parser = E.OptionParser(version="%prog version: $Id$", usage=globals()["__doc__"]) parser.add_option("-t", "--test", dest="test", type="string", help="supply help") parser.add_option("--method", dest="method", type="choice", choices=("metrics", "summary", "module_summary"), help="method to summarise clustering") parser.add_option("--ref-gtf-files", dest="ref_gtf", type="string", help="comma separated list of reference gtf files") # add common options (-h/--help, ...) and parse command line (options, args) = E.Start(parser, argv=argv) if options.method == "metrics": infile = argv[-1] E.info("loading input file: %s" % infile) assert infile df = pd.read_table(infile, sep="\t", header=None, index_col=0) df = df.ix[:, :50] cluster_combs = (x for x in itertools.combinations(df.columns, 2)) genes = df.index results_dict = {} all_clusts = {} E.info("setting up cluster containers") for i in df.columns: clusters = set(df[i].values.tolist()) cluster_dict = {} for clust in clusters: cluster_dict[clust] = [] for gene in genes: cluster_dict[df[i][gene]].append(gene) for col in clusters: col_set = set() clust_col = cluster_dict[col] gene_members = itertools.combinations(clust_col, 2) col_set.update(gene_members) cluster_dict[col] = col_set all_clusts[i] = cluster_dict E.info("generating all pair-wise cluster comparisons") E.info("calculating adjusted mutual information") for k in cluster_combs: clusters1 = all_clusts[k[0]] clusters2 = all_clusts[k[1]] metric_dict = {} metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1, clusters2) results_dict[k] = metric_dict res_frame = pd.DataFrame(results_dict).T res_frame = res_frame.reset_index() res_frame.drop(['level_0'], inplace=True, axis=1) res_frame.drop(['level_1'], inplace=True, axis=1) # flatten rand indices and add to output dataframe rand_arrays = TS.randIndexes(df) flat_adj_rand = TS.unravel_arrays(rand_arrays[0]) flat_rand = TS.unravel_arrays(rand_arrays[1]) res_frame['Rand_Index'] = flat_rand res_frame['Adjusted_Rand_Index'] = flat_adj_rand E.info("aggregating results") res_frame.to_csv(options.stdout, sep="\t", index_label='idx') elif options.method == "summary": infiles = argv[-1] list_of_files = infiles.split(",") file_dict = {} for fle in list_of_files: fname = fle.split("/")[-1] condition = fname.split("-")[0] ref = fname.split("-")[1] df_ = pd.read_table(fle, sep="\t", header=0, index_col=0) df_.columns = ['gene_id', 'cluster'] clust_dict = {} for idx in df_.index: cluster = df_.loc[idx]['cluster'] gene = df_.loc[idx]['gene_id'] try: clust_dict[cluster] += 1 except KeyError: clust_dict[cluster] = 1 med_size = np.median(clust_dict.values()) file_dict[fname] = {'condition': condition, 'reference': ref, 'median_cluster_size': med_size} outframe = pd.DataFrame(file_dict).T outframe.to_csv(options.stdout, sep="\t", index_label='idx') elif options.method == "module_summary": # get lncRNA/gene lengths from reference gtfs ref_gtfs = options.ref_gtf.split(",") length_dict = {} for ref in ref_gtfs: oref = IOTools.openFile(ref, "rb") git = GTF.transcript_iterator(GTF.iterator(oref)) for gene in git: for trans in gene: length = trans.end - trans.start try: length_dict[trans.gene_id] += length except __HOLE__: length_dict[trans.gene_id] = length oref.close() infiles = argv[-1] list_of_files = infiles.split(",") fdfs = [] for fle in list_of_files: cond = fle.split("/")[-1].split("-")[0] refer = fle.split("/")[-1].split("-")[1] _df = pd.read_table(fle, sep="\t", header=0, index_col=0) _df.columns = ['gene_id', 'cluster'] clusters = set(_df['cluster']) c_dict = {} # summarize over each cluster for clust in clusters: lengths = [] c_df = _df[_df['cluster'] == clust] for lid in c_df['gene_id']: lengths.append(length_dict[lid]) c_dict[clust] = {'cluster_size': len(c_df['gene_id']), 'mean_length': np.mean(lengths), 'index': (cond, refer), 'module': clust} cdf = pd.DataFrame(c_dict).T # use a multindex for hierarchical indexing midx = pd.MultiIndex.from_tuples(cdf['index']) cdf.index = midx cdf.drop(['index'], inplace=True, axis=1) fdfs.append(cdf) # generate a single output df s_df = fdfs[0] fdfs.pop(0) for df in fdfs: s_df = s_df.append(df) s_df.to_csv(options.stdout, index_label=("condition", "reference"), sep="\t") # write footer and output benchmark information. E.Stop()
KeyError
dataset/ETHPy150Open CGATOxford/cgat/scripts/clusters2metrics.py/main
def _Dynamic_Unsubscribe(self, request, response): """Unsubscribe a query. Args: request: UnsubscribeRequest response: UnsubscribeResponse (not used) """ ValidateSubscriptionId(request.sub_id()) ValidateTopic(request.topic()) try: del self.topics[request.topic()][request.sub_id()] except __HOLE__: pass self._Write()
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/prospective_search/prospective_search_stub.py/ProspectiveSearchStub._Dynamic_Unsubscribe
@handle_response_format @treeio_login_required @module_admin_required() def settings_view(request, response_format='html'): "Settings view" # default permissions try: conf = ModuleSetting.get_for_module( 'treeio.core', 'default_permissions')[0] default_permissions = conf.value except: default_permissions = settings.HARDTREE_DEFAULT_PERMISSIONS default_permissions_display = default_permissions for key, value in PERMISSION_CHOICES: if key == default_permissions: default_permissions_display = _(value) # default perspective try: conf = ModuleSetting.get_for_module( 'treeio.core', 'default_perspective')[0] default_perspective = Perspective.objects.get(pk=long(conf.value)) except: default_perspective = None # language language = getattr(settings, 'HARDTREE_LANGUAGES_DEFAULT', '') try: conf = ModuleSetting.get_for_module('treeio.core', 'language')[0] language = conf.value except __HOLE__: pass all_languages = getattr( settings, 'HARDTREE_LANGUAGES', [('en', 'English')]) logopath = '' try: conf = ModuleSetting.get_for_module('treeio.core', 'logopath')[0] logopath = conf.value match = re.match('.*[a-z0-9]{32}__(?P<filename>.+)$', logopath) if match: logopath = match.group('filename') except: pass # time zone default_timezone = settings.HARDTREE_SERVER_DEFAULT_TIMEZONE try: conf = ModuleSetting.get_for_module( 'treeio.core', 'default_timezone')[0] default_timezone = conf.value except Exception: default_timezone = getattr( settings, 'HARDTREE_SERVER_TIMEZONE')[default_timezone][0] all_timezones = getattr(settings, 'HARDTREE_SERVER_TIMEZONE', [ (1, '(GMT-11:00) International Date Line West')]) return render_to_response('core/administration/settings_view', { 'default_permissions': default_permissions, 'default_permissions_display': default_permissions_display, 'default_perspective': default_perspective, 'language': language, 'all_languages': all_languages, 'logopath': logopath, 'default_timezone': default_timezone, 'all_timezones': all_timezones }, context_instance=RequestContext(request), response_format=response_format)
IndexError
dataset/ETHPy150Open treeio/treeio/treeio/core/administration/views.py/settings_view
def move_or_copy(src, dst): try: os.rename(src, dst) except __HOLE__ as e: if e.errno == ERRNO_INVALID_CROSS_DEVICE_LINK: try: shutil.copy(src, dst) finally: os.unlink(src) else: raise
OSError
dataset/ETHPy150Open eallik/spinoff/spinoff/contrib/filetransfer/fileref.py/move_or_copy
def _find_comp(self, dt): if len(self._comps) == 1: return self._comps[0] dt = dt.replace(tzinfo=None) try: return self._cachecomp[self._cachedate.index(dt)] except __HOLE__: pass lastcomp = None lastcompdt = None for comp in self._comps: if not comp.isdst: # Handle the extra hour in DST -> STD compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True) else: compdt = comp.rrule.before(dt, inc=True) if compdt and (not lastcompdt or lastcompdt < compdt): lastcompdt = compdt lastcomp = comp if not lastcomp: # RFC says nothing about what to do when a given # time is before the first onset date. We'll look for the # first standard component, or the first component, if # none is found. for comp in self._comps: if not comp.isdst: lastcomp = comp break else: lastcomp = comp[0] self._cachedate.insert(0, dt) self._cachecomp.insert(0, lastcomp) if len(self._cachedate) > 10: self._cachedate.pop() self._cachecomp.pop() return lastcomp
ValueError
dataset/ETHPy150Open CouchPotato/CouchPotatoServer/libs/dateutil/tz.py/_tzicalvtz._find_comp
def gettz(name=None): tz = None if not name: try: name = os.environ["TZ"] except __HOLE__: pass if name is None or name == ":": for filepath in TZFILES: if not os.path.isabs(filepath): filename = filepath for path in TZPATHS: filepath = os.path.join(path, filename) if os.path.isfile(filepath): break else: continue if os.path.isfile(filepath): try: tz = tzfile(filepath) break except (IOError, OSError, ValueError): pass else: tz = tzlocal() else: if name.startswith(":"): name = name[:-1] if os.path.isabs(name): if os.path.isfile(name): tz = tzfile(name) else: tz = None else: for path in TZPATHS: filepath = os.path.join(path, name) if not os.path.isfile(filepath): filepath = filepath.replace(' ', '_') if not os.path.isfile(filepath): continue try: tz = tzfile(filepath) break except (IOError, OSError, ValueError): pass else: tz = None if tzwin: try: tz = tzwin(name) except OSError: pass if not tz: from dateutil.zoneinfo import gettz tz = gettz(name) if not tz: for c in name: # name must have at least one offset to be a tzstr if c in "0123456789": try: tz = tzstr(name) except ValueError: pass break else: if name in ("GMT", "UTC"): tz = tzutc() elif name in time.tzname: tz = tzlocal() return tz # vim:ts=4:sw=4:et
KeyError
dataset/ETHPy150Open CouchPotato/CouchPotatoServer/libs/dateutil/tz.py/gettz
def segment_axis(a, length, overlap=0, axis=None, end='cut', endvalue=0): """Generate a new array that chops the given array along the given axis into overlapping frames. Parameters ---------- a : array-like The array to segment length : int The length of each frame overlap : int, optional The number of array elements by which the frames should overlap axis : int, optional The axis to operate on; if None, act on the flattened array end : {'cut', 'wrap', 'end'}, optional What to do with the last frame, if the array is not evenly divisible into pieces. - 'cut' Simply discard the extra values - 'wrap' Copy values from the beginning of the array - 'pad' Pad with a constant value endvalue : object The value to use for end='pad' Examples -------- >>> segment_axis(arange(10), 4, 2) array([[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7], [6, 7, 8, 9]]) Notes ----- The array is not copied unless necessary (either because it is unevenly strided and being flattened or because end is set to 'pad' or 'wrap'). use as_strided """ if axis is None: a = np.ravel(a) # may copy axis = 0 l = a.shape[axis] if overlap>=length: raise ValueError, "frames cannot overlap by more than 100%" if overlap<0 or length<=0: raise ValueError, "overlap must be nonnegative and length must be "\ "positive" if l<length or (l-length)%(length-overlap): if l>length: roundup = length + \ (1+(l-length)//(length-overlap))*(length-overlap) rounddown = length + \ ((l-length)//(length-overlap))*(length-overlap) else: roundup = length rounddown = 0 assert rounddown<l<roundup assert roundup==rounddown+(length-overlap) or \ (roundup==length and rounddown==0) a = a.swapaxes(-1,axis) if end=='cut': a = a[...,:rounddown] elif end in ['pad','wrap']: # copying will be necessary s = list(a.shape) s[-1]=roundup b = np.empty(s,dtype=a.dtype) b[...,:l] = a if end=='pad': b[...,l:] = endvalue elif end=='wrap': b[...,l:] = a[...,:roundup-l] a = b a = a.swapaxes(-1,axis) l = a.shape[axis] if l==0: raise ValueError, "Not enough data points to segment array in 'cut' "\ "mode; try 'pad' or 'wrap'" assert l>=length assert (l-length)%(length-overlap) == 0 n = 1+(l-length)//(length-overlap) s = a.strides[axis] newshape = a.shape[:axis] + (n,length) + a.shape[axis+1:] newstrides = a.strides[:axis] + ((length-overlap)*s, s) + \ a.strides[axis+1:] try: return as_strided(a, strides=newstrides, shape=newshape) except __HOLE__: warnings.warn("Problem with ndarray creation forces copy.") a = a.copy() # Shape doesn't change but strides does newstrides = a.strides[:axis] + ((length-overlap)*s, s) + \ a.strides[axis+1:] return as_strided(a, strides=newstrides, shape=newshape)
TypeError
dataset/ETHPy150Open jfsantos/ift6266h14/old/pylearn2_timit/segmentaxis.py/segment_axis
def __next__(self): try: if not self._origin_iter: self._origin_iter = self._origin.__iter__() n = next(self._origin_iter) except __HOLE__ as e: self._finished = True raise e else: self._state.append(n) return n
StopIteration
dataset/ETHPy150Open graphql-python/graphene/graphene/utils/lazylist.py/LazyList.__next__
def read_events(self, timeout=None): if timeout is not None: rs, ws, xs = select.select([self.__fd], [], [], timeout) if self.__fd not in rs: return [] while True: try: s = os.read(self.__fd, 1024) break except OSError as e: if e.errno != errno.EINTR: raise FSMonitorOSError(*e.args) events = [] if not module_loaded: return events for wd, mask, cookie, name in parse_events(s): with self.__lock: watch = self.__wd_to_watch.get(wd) if watch is not None and watch.enabled: bit = 1 while bit < 0x10000: if mask & bit: action = action_map.get(bit) if action is not None and (action & watch.flags): events.append(FSEvent(watch, action, name)) bit <<= 1 if mask & IN_IGNORED: with self.__lock: try: del self.__wd_to_watch[wd] except __HOLE__: pass return events
KeyError
dataset/ETHPy150Open shaurz/fsmonitor/fsmonitor/linux.py/FSMonitor.read_events
def run(self, doc): marker_found = False div = etree.Element("div") div.attrib["class"] = "toc" last_li = None # Add title to the div if self.config["title"]: header = etree.SubElement(div, "span") header.attrib["class"] = "toctitle" header.text = self.config["title"] level = 0 list_stack=[div] header_rgx = re.compile("[Hh][123456]") # Get a list of id attributes used_ids = [] for c in doc.getiterator(): if "id" in c.attrib: used_ids.append(c.attrib["id"]) for (p, c) in self.iterparent(doc): text = ''.join(itertext(c)).strip() if not text: continue # To keep the output from screwing up the # validation by putting a <div> inside of a <p> # we actually replace the <p> in its entirety. # We do not allow the marker inside a header as that # would causes an enless loop of placing a new TOC # inside previously generated TOC. if c.text and c.text.strip() == self.config["marker"] and \ not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']: for i in range(len(p)): if p[i] == c: p[i] = div break marker_found = True if header_rgx.match(c.tag): try: tag_level = int(c.tag[-1]) while tag_level < level: list_stack.pop() level -= 1 if tag_level > level: newlist = etree.Element("ul") if last_li: last_li.append(newlist) else: list_stack[-1].append(newlist) list_stack.append(newlist) if level == 0: level = tag_level else: level += 1 # Do not override pre-existing ids if not "id" in c.attrib: id = unique(self.config["slugify"](text, '-'), used_ids) c.attrib["id"] = id else: id = c.attrib["id"] # List item link, to be inserted into the toc div last_li = etree.Element("li") link = etree.SubElement(last_li, "a") link.text = text link.attrib["href"] = '#' + id if self.config["anchorlink"] in [1, '1', True, 'True', 'true']: anchor = etree.Element("a") anchor.text = c.text anchor.attrib["href"] = "#" + id anchor.attrib["class"] = "toclink" c.text = "" for elem in c.getchildren(): anchor.append(elem) c.remove(elem) c.append(anchor) list_stack[-1].append(last_li) except __HOLE__: # We have bad ordering of headers. Just move on. pass if not marker_found: # searialize and attach to markdown instance. prettify = self.markdown.treeprocessors.get('prettify') if prettify: prettify.run(div) toc = self.markdown.serializer(div) for pp in self.markdown.postprocessors.values(): toc = pp.run(toc) self.markdown.toc = toc
IndexError
dataset/ETHPy150Open darcyliu/storyboard/markdown/extensions/toc.py/TocTreeprocessor.run
def GetKindsForAllNamespaces(self, deadline): """Obtain a list of all kind names from the datastore. Pulls kinds from all namespaces. The result is deduped and alphabetized. Args: deadline: maximum number of seconds to spend getting kinds. Returns: kinds: an alphabetized list of kinds for the specified namespace(s). more_kinds: a boolean indicating whether there may be additional kinds not included in 'kinds' (e.g. because the query deadline was reached). """ start = time.time() kind_name_set = set() def ReadFromKindIters(kind_iter_list): """Read kinds from a list of iterators. Reads a kind from each iterator in kind_iter_list, adds it to kind_name_set, and removes any completed iterators. Args: kind_iter_list: a list of iterators of kinds. """ completed = [] for kind_iter in kind_iter_list: try: kind_name = kind_iter.next().kind_name if utils.IsKindNameVisible(kind_name): kind_name_set.add(kind_name) except __HOLE__: completed.append(kind_iter) for kind_iter in completed: kind_iter_list.remove(kind_iter) more_kinds = False try: namespace_iter = metadata.Namespace.all().run(batch_size=1000, deadline=deadline) kind_iter_list = [] for ns in namespace_iter: remaining = deadline - (time.time() - start) if remaining <= 0: raise datastore_errors.Timeout kind_iter_list.append(metadata.Kind.all(namespace=ns.namespace_name) .run(batch_size=1000, deadline=remaining)) while len(kind_iter_list) == MAX_RPCS: ReadFromKindIters(kind_iter_list) while kind_iter_list: ReadFromKindIters(kind_iter_list) except (datastore_errors.Timeout, apiproxy_errors.DeadlineExceededError): more_kinds = True logging.warning('Failed to retrieve all kinds within deadline.') return sorted(kind_name_set), more_kinds
StopIteration
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/ext/datastore_admin/main.py/RouteByActionHandler.GetKindsForAllNamespaces
def make_word_list(): print "making word list..." word_list = [] for i in w: try: d[i.lower()] except __HOLE__: pass else: if i.lower() == "'s": pass elif i[-1] == ".": pass else: word_list.append((i.lower(), d[i.lower()][0])) return word_list
KeyError
dataset/ETHPy150Open rossgoodwin/sonnetizer/sonnetizer_m.py/make_word_list
def sylcount(s): try: d[s] except __HOLE__: return None else: if len(d[s]) <= 1: sj = ''.join(d[s][0]) sl = re.split('0|1|2', sj) return len(sl) - 1 else: sj0 = ''.join(d[s][0]) sl0 = re.split('0|1|2', sj0) sj1 = ''.join(d[s][1]) sl1 = re.split('0|1|2', sj1) if len(sl1) < len(sl0): return len(sl1) - 1 else: return len(sl0) - 1
KeyError
dataset/ETHPy150Open rossgoodwin/sonnetizer/sonnetizer_m.py/sylcount
def fs_cleanup(filename, suppress_exceptions=True): """ Tries to remove the given filename. Ignores non-existent files """ try: os.remove(filename) except __HOLE__: if suppress_exceptions: pass else: raise
OSError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/common/utils.py/fs_cleanup
def get_descriptor(file_input, read=True): try: # Is it a file like object? file_input.seek(0) except __HOLE__: # If not, try open it. if read: return open(file_input, 'rb') else: return open(file_input, 'wb') else: return file_input
AttributeError
dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/common/utils.py/get_descriptor
def _collect_files_being_pushed(ref_list, remote): """Collect modified files and filter those that need linting. Parameter: ref_list: list of references to parse (provided by git in stdin) remote: the remote being pushed to Returns: dict: Dict mapping branch names to 2-tuples of the form (list of changed files, list of files to lint) """ if not ref_list: return {} # avoid testing of non branch pushes (tags for instance) or deletions ref_heads_only = [ref for ref in ref_list if ref.local_ref.startswith('refs/heads/')] # get branch name from e.g. local_ref='refs/heads/lint_hook' branches = [ref.local_ref.split('/')[-1] for ref in ref_heads_only] hashes = [ref.local_sha1 for ref in ref_heads_only] remote_hashes = [ref.remote_sha1 for ref in ref_heads_only] collected_files = {} # git allows that multiple branches get pushed simultaneously with the "all" # flag. Therefore we need to loop over the ref_list provided. for branch, sha1, remote_sha1 in zip(branches, hashes, remote_hashes): # git reports the following for an empty / non existing branch # sha1: '0000000000000000000000000000000000000000' if set(remote_sha1) != {'0'}: try: modified_files = _compare_to_remote(remote, branch) except ValueError as e: print e.message sys.exit(1) else: # Get the difference to origin/develop instead try: modified_files = _compare_to_remote(remote, branch, remote_branch='develop') except ValueError: # give up, return all files in repo try: modified_files = _git_diff_name_status(GIT_NULL_COMMIT, sha1) except __HOLE__ as e: print e.message sys.exit(1) files_to_lint = _extract_files_to_lint(modified_files) collected_files[branch] = (modified_files, files_to_lint) for branch, (modified_files, files_to_lint) in collected_files.iteritems(): if modified_files: print '\nModified files in %s:' % branch pprint.pprint(modified_files) print '\nFiles to lint in %s:' % branch pprint.pprint(files_to_lint) print '\n' return collected_files
ValueError
dataset/ETHPy150Open oppia/oppia/scripts/pre_push_hook.py/_collect_files_being_pushed
def _install_hook(): # install script ensures that oppia is root oppia_dir = os.getcwd() hooks_dir = os.path.join(oppia_dir, '.git', 'hooks') pre_push_file = os.path.join(hooks_dir, 'pre-push') if os.path.islink(pre_push_file): print 'Symlink already exists' return try: os.symlink(os.path.abspath(__file__), pre_push_file) print 'Created symlink in .git/hooks directory' # raises AttributeError on windows, OSError added as failsafe except (OSError, __HOLE__): shutil.copy(__file__, pre_push_file) print 'Copied file to .git/hooks directory'
AttributeError
dataset/ETHPy150Open oppia/oppia/scripts/pre_push_hook.py/_install_hook
def test_truncate(self): "Sequence truncate" assert str(self.seq[-202020202:5]) == 'atttg' assert self.seq[-202020202:5] == self.seq[0:5] assert self.seq[-2020202:] == self.seq assert str(self.seq[-202020202:-5]) == 'atttgactatgc' assert str(self.seq[-5:2029]) == 'tccag' assert str(self.seq[-5:]) == 'tccag' try: self.seq[999:10000] raise ValueError('failed to trap out of bounds slice') except __HOLE__: pass try: self.seq[-10000:-3000] raise ValueError('failed to trap out of bounds slice') except IndexError: pass try: self.seq[1000:] raise ValueError('failed to trap out of bounds slice') except IndexError: pass
IndexError
dataset/ETHPy150Open cjlee112/pygr/tests/sequence_test.py/Sequence_Test.test_truncate
def test_rctruncate(self): "Sequence reverse complement truncate" seq= -self.seq assert str(seq[-202020202:5]) == 'ctgga' assert seq[-202020202:5] == seq[0:5] assert seq[-2020202:] == seq assert str(seq[-202020202:-5]) == 'ctggagcatagt' assert str(seq[-5:2029]) == 'caaat' assert str(seq[-5:]) == 'caaat' try: seq[999:10000] raise ValueError('failed to trap out of bounds slice') except IndexError: pass try: seq[-10000:-3000] raise ValueError('failed to trap out of bounds slice') except __HOLE__: pass try: seq[1000:] raise ValueError('failed to trap out of bounds slice') except IndexError: pass
IndexError
dataset/ETHPy150Open cjlee112/pygr/tests/sequence_test.py/Sequence_Test.test_rctruncate
def __init__(self, bootstrap, timeout=60, debug=False, token=None): self.debug = debug self.timeout = timeout self.domainname = None self.token = token self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.socket.setblocking(1) self.writelock = Lock() self.bootstraplist = self.discoverbootstrap(bootstrap) if len(self.bootstraplist) == 0: raise ConnectionError("No bootstrap found") if not self.connecttobootstrap(): raise ConnectionError("Cannot connect to any bootstrap") myaddr = findOwnIP() myport = self.socket.getsockname()[1] self.me = Peer(myaddr, myport, NODE_CLIENT) self.commandnumber = random.randint(1, sys.maxint) # synchronization self.lock = Lock() self.pendingops = {} # pending requests indexed by commandnumber self.doneops = {} # requests that are finalized, indexed by command number # spawn thread, invoke recv_loop try: recv_thread = Thread(target=self.recv_loop, name='ReceiveThread') recv_thread.daemon = True recv_thread.start() except (__HOLE__, SystemExit): self._graceexit()
KeyboardInterrupt
dataset/ETHPy150Open denizalti/concoord/concoord/blockingclientproxy.py/ClientProxy.__init__
def discoverbootstrap(self, givenbootstrap): tmpbootstraplist = [] try: for bootstrap in givenbootstrap.split(","): bootstrap = bootstrap.strip() # The bootstrap list is read only during initialization if bootstrap.find(":") >= 0: bootaddr,bootport = bootstrap.split(":") for peer in self._getipportpairs(bootaddr, int(bootport)): if peer not in tmpbootstraplist: tmpbootstraplist.append(peer) else: self.domainname = bootstrap tmpbootstraplist = self.getbootstrapfromdomain(self.domainname) except __HOLE__: if self.debug: print "bootstrap usage: ipaddr1:port1,ipaddr2:port2 or domainname" self._graceexit() return tmpbootstraplist
ValueError
dataset/ETHPy150Open denizalti/concoord/concoord/blockingclientproxy.py/ClientProxy.discoverbootstrap
def invoke_command(self, *args): # create a request descriptor reqdesc = ReqDesc(self, args, self.token) self.pendingops[reqdesc.commandnumber] = reqdesc # send the request with self.writelock: self.conn.send(reqdesc.cm) with self.lock: try: while not reqdesc.replyvalid: reqdesc.replyarrived.wait() except __HOLE__: self._graceexit() del self.pendingops[reqdesc.commandnumber] if reqdesc.reply.replycode == CR_OK or reqdesc.reply.replycode == CR_UNBLOCK: return reqdesc.reply.reply elif reqdesc.reply.replycode == CR_EXCEPTION: raise Exception(pickle.loads(reqdesc.reply.reply)) else: print "Unexpected Client Reply Code: %d" % reqdesc.reply.replycode
KeyboardInterrupt
dataset/ETHPy150Open denizalti/concoord/concoord/blockingclientproxy.py/ClientProxy.invoke_command
def recv_loop(self, *args): socketset = [self.socket] while True: try: needreconfig = False inputready,outputready,exceptready = select.select(socketset, [], socketset, 0) for s in inputready: reply = self.conn.receive() if reply is None: needreconfig = True elif reply and reply.type == MSG_CLIENTREPLY: reqdesc = self.pendingops[reply.inresponseto] with self.lock: if reply.replycode == CR_OK or reply.replycode == CR_EXCEPTION or reply.replycode == CR_UNBLOCK: # actionable response, wake up the thread if reply.replycode == CR_UNBLOCK: assert reqdesc.lastcr == CR_BLOCK, "unblocked thread not previously blocked" reqdesc.lastcr = reply.replycode reqdesc.reply = reply reqdesc.replyvalid = True reqdesc.replyarrived.notify() elif reply.replycode == CR_INPROGRESS or reply.replycode == CR_BLOCK: # the thread is already waiting, no need to do anything reqdesc.lastcr = reply.replycode elif reply.replycode == CR_REJECTED or reply.replycode == CR_LEADERNOTREADY: needreconfig = True else: print "should not happen -- unknown response type" while needreconfig: if not self.trynewbootstrap(): raise ConnectionError("Cannot connect to any bootstrap") needreconfig = False # check if we need to re-send any pending operations for commandno,reqdesc in self.pendingops.iteritems(): if not reqdesc.replyvalid and reqdesc.lastreplycr != CR_BLOCK: reqdesc.sendcount += 1 reqdesc.cm[FLD_SENDCOUNT] = reqdesc.sendcount if not self.conn.send(reqdesc.cm): needreconfig = True continue except __HOLE__: self._graceexit()
KeyboardInterrupt
dataset/ETHPy150Open denizalti/concoord/concoord/blockingclientproxy.py/ClientProxy.recv_loop
def parse(self, text, maxwidth=None, maxheight=None, template_dir=None, context=None, urlize_all_links=CONSUMER_URLIZE_ALL): """ Scans a block of text, replacing anything matching a provider pattern with an OEmbed html snippet, if possible. Templates should be stored at oembed/{format}.html, so for example: oembed/video.html An optional template_dir can be provided, allowing for oembed/[template_dir]/video.html These templates are passed a context variable, ``response``, which is an OEmbedResource, as well as the ``original_url`` """ context = context or Context() context['maxwidth'] = maxwidth context['maxheight'] = maxheight try: text = unicode(text) except __HOLE__: text = unicode(text.decode('utf-8')) return self.parse_data(text, maxwidth, maxheight, template_dir, context, urlize_all_links)
UnicodeDecodeError
dataset/ETHPy150Open worldcompany/djangoembed/oembed/parsers/base.py/BaseParser.parse
def Parse(rc_name, h_name = None): if h_name: h_file = open(h_name, "rU") else: # See if same basename as the .rc h_name = rc_name[:-2]+"h" try: h_file = open(h_name, "rU") except __HOLE__: # See if MSVC default of 'resource.h' in the same dir. h_name = os.path.join(os.path.dirname(rc_name), "resource.h") try: h_file = open(h_name, "rU") except IOError: # .h files are optional anyway h_file = None rc_file = open(rc_name, "rU") try: return ParseStreams(rc_file, h_file) finally: if h_file is not None: h_file.close() rc_file.close() return rcp
IOError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/win32/lib/win32rcparser.py/Parse
def getimage(self, size=None): if size is None: size = self.bestsize() channels = self.dataforsize(size) im = channels.get("RGB").copy() try: im.putalpha(channels["A"]) except __HOLE__: pass return im ## # Image plugin for Mac OS icons.
KeyError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/site-packages/PIL/IcnsImagePlugin.py/IcnsFile.getimage
def __init__(self, pkg='', rev='', address=None, payload=None, **kwargs): """ :param str pkg: The package import path within the remote library; by default the root package path (equivalent to passing `pkg='.'` or `pkg=''`). :param str rev: Identifies which version of the remote library to download. This could be a commit SHA (git), node id (hg), etc. If left unspecified the version will default to the latest available. It's highly recommended to not accept the default and instead pin the rev explicitly for repeatable builds. """ try: package_path = self.normalize_package_path(pkg) except __HOLE__ as e: raise TargetDefinitionException(address.spec, str(e)) payload = payload or Payload() payload.add_fields({ 'rev': PrimitiveField(rev or ''), # Guard against/allow `None`. 'pkg': PrimitiveField(package_path), }) super(GoRemoteLibrary, self).__init__(address=address, payload=payload, **kwargs)
ValueError
dataset/ETHPy150Open pantsbuild/pants/contrib/go/src/python/pants/contrib/go/targets/go_remote_library.py/GoRemoteLibrary.__init__
def get_alias_strings(aliases): alias_strings = [] for alias in aliases: alias = item_module.canonical_alias_tuple(alias) (namespace, nid) = alias try: alias_strings += [namespace+":"+nid] except __HOLE__: # jsonify the biblio dicts alias_strings += [namespace+":"+json.dumps(nid)] return alias_strings
TypeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/collection.py/get_alias_strings
def get_items_for_client(tiids, myrefsets, myredis, most_recent_metric_date=None, most_recent_diff_metric_date=None): item_metric_dicts = get_readonly_item_metric_dicts(tiids, most_recent_metric_date, most_recent_diff_metric_date) dict_of_item_docs = {} for tiid in item_metric_dicts: try: item_doc_for_client = item_module.build_item_for_client(item_metric_dicts[tiid], myrefsets, myredis) dict_of_item_docs[tiid] = item_doc_for_client except (KeyError, __HOLE__, AttributeError): logger.info(u"Couldn't build item {tiid}".format(tiid=tiid)) raise return dict_of_item_docs
TypeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/collection.py/get_items_for_client
def get_collection_with_items_for_client(cid, myrefsets, myredis, mydao, include_history=False): collection_obj = Collection.query.get(cid) collection_doc = get_collection_doc_from_object(collection_obj) if not collection_doc: return (None, None) collection_doc["items"] = [] tiids = collection_obj.tiids if tiids: item_metric_dicts = get_readonly_item_metric_dicts(tiids) for tiid in item_metric_dicts: #logger.info(u"got item {tiid} for {cid}".format( # tiid=item_obj.tiid, cid=cid)) try: item_for_client = item_module.build_item_for_client(item_metric_dicts[tiid], myrefsets, myredis) except (KeyError, TypeError, __HOLE__): logger.info(u"Couldn't build item {tiid}, excluding it from the returned collection {cid}".format( tiid=tiid, cid=cid)) item_for_client = None raise if item_for_client: collection_doc["items"] += [item_for_client] something_currently_updating = not is_all_done(tiids, myredis) # logger.debug(u"Got items for collection_doc %s" %cid) return (collection_doc, something_currently_updating)
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/collection.py/get_collection_with_items_for_client
def clean_value_for_csv(value_to_store): try: value_to_store = value_to_store.encode("utf-8").strip() except __HOLE__: pass return value_to_store
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/collection.py/clean_value_for_csv
def make_csv_rows(items): header_metric_names = [] for item in items: header_metric_names += item["metrics"].keys() header_metric_names = sorted(list(set(header_metric_names))) header_alias_names = ["title", "doi"] # make header row header_list = ["tiid"] + header_alias_names + header_metric_names ordered_fieldnames = OrderedDict([(col, None) for col in header_list]) # body rows rows = [] for item in items: ordered_fieldnames = OrderedDict() ordered_fieldnames["tiid"] = item["_id"] for alias_name in header_alias_names: try: if alias_name=="title": ordered_fieldnames[alias_name] = clean_value_for_csv(item['aliases']['biblio'][0]['title']) else: ordered_fieldnames[alias_name] = clean_value_for_csv(item['aliases'][alias_name][0]) except (__HOLE__, KeyError): ordered_fieldnames[alias_name] = "" for metric_name in header_metric_names: try: raw_value = item['metrics'][metric_name]['values']['raw'] ordered_fieldnames[metric_name] = clean_value_for_csv(raw_value) except (AttributeError, KeyError): ordered_fieldnames[metric_name] = "" rows += [ordered_fieldnames] return(ordered_fieldnames, rows)
AttributeError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/collection.py/make_csv_rows
def build_all_reference_lookups(myredis, mydao): # for expediency, assuming all reference collections are this size # risky assumption, but run with it for now! size_of_reference_collections = 100 confidence_interval_level = 0.95 percentiles = range(100) confidence_interval_table = myredis.get_confidence_interval_table(size_of_reference_collections, confidence_interval_level) if not confidence_interval_table: table_return = calc_confidence_interval_table(size_of_reference_collections, confidence_interval_level=confidence_interval_level, percentiles=percentiles) confidence_interval_table = table_return["lookup_table"] myredis.set_confidence_interval_table(size_of_reference_collections, confidence_interval_level, confidence_interval_table) #print(json.dumps(confidence_interval_table, indent=4)) # logger.info(u"querying for reference_set_rows") reference_set_rows = Collection.query.filter(Collection.refset_metadata != None).all() #res = mydao.db.view("reference-sets/reference-sets", descending=True, include_docs=False, limits=100) #logger.info(u"Number rows = " + str(len(res.rows))) reference_lookup_dict = {"article": defaultdict(dict), "dataset": defaultdict(dict), "software": defaultdict(dict)} reference_histogram_dict = {"article": defaultdict(dict), "dataset": defaultdict(dict), "software": defaultdict(dict)} # randomize rows so that multiple gunicorn instances hit them in different orders randomized_rows = reference_set_rows random.shuffle(randomized_rows) if randomized_rows: for row in randomized_rows: try: #(cid, title) = row.key #refset_metadata = row.value cid = row.cid title = row.title refset_metadata = row.refset_metadata genre = refset_metadata["genre"] year = refset_metadata["year"] refset_name = refset_metadata["name"] refset_version = refset_metadata["version"] if refset_version < 0.1: logger.error(u"Refset version too low for '%s', not loading its normalizations" %str(row.key)) continue except __HOLE__: logger.error(u"Normalization '%s' not formatted as expected, not loading its normalizations" %str(row.key)) continue histogram = myredis.get_reference_histogram_dict(genre, refset_name, year) lookup = myredis.get_reference_lookup_dict(genre, refset_name, year) if histogram and lookup: logger.info(u"Loaded successfully from cache") reference_histogram_dict[genre][refset_name][year] = histogram reference_lookup_dict[genre][refset_name][year] = lookup else: logger.info(u"Not found in cache, so now building from items") if refset_name: try: # send it without reference sets because we are trying to load the reference sets here! (coll_with_items, is_updating) = get_collection_with_items_for_client(cid, None, myredis, mydao) except (LookupError, AttributeError): raise #not found logger.info(u"Loading normalizations for %s" %coll_with_items["title"]) # hack for now to get big collections normalization_numbers = get_metric_values_of_reference_sets(coll_with_items["items"]) reference_histogram_dict[genre][refset_name][year] = normalization_numbers reference_lookup = get_normalization_confidence_interval_ranges(normalization_numbers, confidence_interval_table) reference_lookup_dict[genre][refset_name][year] = reference_lookup # save to redis myredis.set_reference_histogram_dict(genre, refset_name, year, normalization_numbers) myredis.set_reference_lookup_dict(genre, refset_name, year, reference_lookup) return(reference_lookup_dict, reference_histogram_dict) # from http://userpages.umbc.edu/~rcampbel/Computers/Python/probstat.html # also called binomial coefficient
ValueError
dataset/ETHPy150Open Impactstory/total-impact-core/totalimpact/collection.py/build_all_reference_lookups
@patch('corehq.apps.userreports.specs.datetime') def test_indicators(self, datetime_mock): fake_time_now = datetime.datetime(2015, 4, 24, 12, 30, 8, 24886) datetime_mock.utcnow.return_value = fake_time_now # indicators sample_doc, expected_indicators = get_sample_doc_and_indicators(fake_time_now) [results] = self.config.get_all_values(sample_doc) for result in results: try: self.assertEqual(expected_indicators[result.column.id], result.value) except __HOLE__: # todo: this is a hack due to the fact that type conversion currently happens # in the database layer. this should eventually be fixed. self.assertEqual(str(expected_indicators[result.column.id]), result.value)
AssertionError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/userreports/tests/test_data_source_config.py/DataSourceConfigurationTest.test_indicators
def _wait_synchronous(self): # Wait to finish, but cancel if KeyboardInterrupt from impala.hiveserver2 import OperationalError loop_start = time.time() def _sleep_interval(start_time): elapsed = time.time() - start_time if elapsed < 0.05: return 0.01 elif elapsed < 1.0: return 0.05 elif elapsed < 10.0: return 0.1 elif elapsed < 60.0: return 0.5 return 1.0 cur = self._cursor try: while True: state = cur.status() if self._cursor._op_state_is_error(state): raise OperationalError("Operation is in ERROR_STATE") if not cur._op_state_is_executing(state): break time.sleep(_sleep_interval(loop_start)) except __HOLE__: print('Canceling query') self.cancel() raise
KeyboardInterrupt
dataset/ETHPy150Open cloudera/ibis/ibis/impala/client.py/ImpalaCursor._wait_synchronous
def choose_boundary(): """Return a string usable as a multipart boundary. The string chosen is unique within a single program run, and incorporates the user id (if available), process id (if available), and current time. So it's very unlikely the returned string appears in message text, but there's no guarantee. The boundary contains dots so you have to quote it in the header.""" global _prefix import time if _prefix is None: import socket try: hostid = socket.gethostbyname(socket.gethostname()) except socket.gaierror: hostid = '127.0.0.1' try: uid = repr(os.getuid()) except AttributeError: uid = '1' try: pid = repr(os.getpid()) except __HOLE__: pid = '1' _prefix = hostid + '.' + uid + '.' + pid return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter()) # Subroutines for decoding some common content-transfer-types
AttributeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/mimetools.py/choose_boundary
def ustr(s, encoding="utf-8"): """ Convert argument to unicode string. """ if isinstance(s, str): return s try: return s.decode(encoding) except __HOLE__: return str(s)
AttributeError
dataset/ETHPy150Open nigelsmall/httpstream/httpstream/util.py/ustr
def get_video_dims(fname): """ Pull out the frame length, spatial height and spatial width of a video file using ffmpeg. Parameters ---------- fname : str Path to video file to be inspected. Returns ------- shape : tuple The spatiotemporal dimensions of the video (length, height, width). """ try: import pyffmpeg except __HOLE__: raise ImportError("This function requires pyffmpeg " "<http://code.google.com/p/pyffmpeg/>") mp = pyffmpeg.FFMpegReader() try: mp.open(fname) tracks = mp.get_tracks() for track in tracks: if isinstance(track, pyffmpeg.VideoTrack): break else: raise ValueError('no video track found') return (track.duration(),) + track.get_orig_size() finally: mp.close()
ImportError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/utils/video.py/get_video_dims
def copy_to_clipboard(text): # reliable on mac if sys.platform == 'darwin': os.system('echo "{0}" | pbcopy'.format(text)) return # okay we'll try cross-platform way try: from Tkinter import Tk except __HOLE__: return r = Tk() r.withdraw() r.clipboard_clear() r.clipboard_append(text.encode('ascii')) r.destroy()
ImportError
dataset/ETHPy150Open zeekay/soundcloud-cli/soundcloud_cli/utils.py/copy_to_clipboard
def escape_list(mylist, escape_func): """Escape a list of arguments by running the specified escape_func on every object in the list that has an escape() method.""" def escape(obj, escape_func=escape_func): try: e = obj.escape except __HOLE__: return obj else: return e(escape_func) return list(map(escape, mylist))
AttributeError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Subst.py/escape_list
def __getattr__(self, attr): nl = self.nl._create_nodelist() try: nl0 = nl[0] except __HOLE__: # If there is nothing in the list, then we have no attributes to # pass through, so raise AttributeError for everything. raise AttributeError("NodeList has no attribute: %s" % attr) return getattr(nl0, attr)
IndexError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Subst.py/Target_or_Source.__getattr__
def subst_dict(target, source): """Create a dictionary for substitution of special construction variables. This translates the following special arguments: target - the target (object or array of objects), used to generate the TARGET and TARGETS construction variables source - the source (object or array of objects), used to generate the SOURCES and SOURCE construction variables """ dict = {} if target: def get_tgt_subst_proxy(thing): try: subst_proxy = thing.get_subst_proxy() except __HOLE__: subst_proxy = thing # probably a string, just return it return subst_proxy tnl = NLWrapper(target, get_tgt_subst_proxy) dict['TARGETS'] = Targets_or_Sources(tnl) dict['TARGET'] = Target_or_Source(tnl) # This is a total cheat, but hopefully this dictionary goes # away soon anyway. We just let these expand to $TARGETS # because that's "good enough" for the use of ToolSurrogates # (see test/ToolSurrogate.py) to generate documentation. dict['CHANGED_TARGETS'] = '$TARGETS' dict['UNCHANGED_TARGETS'] = '$TARGETS' else: dict['TARGETS'] = NullNodesList dict['TARGET'] = NullNodesList if source: def get_src_subst_proxy(node): try: rfile = node.rfile except AttributeError: pass else: node = rfile() try: return node.get_subst_proxy() except AttributeError: return node # probably a String, just return it snl = NLWrapper(source, get_src_subst_proxy) dict['SOURCES'] = Targets_or_Sources(snl) dict['SOURCE'] = Target_or_Source(snl) # This is a total cheat, but hopefully this dictionary goes # away soon anyway. We just let these expand to $TARGETS # because that's "good enough" for the use of ToolSurrogates # (see test/ToolSurrogate.py) to generate documentation. dict['CHANGED_SOURCES'] = '$SOURCES' dict['UNCHANGED_SOURCES'] = '$SOURCES' else: dict['SOURCES'] = NullNodesList dict['SOURCE'] = NullNodesList return dict # Constants for the "mode" parameter to scons_subst_list() and # scons_subst(). SUBST_RAW gives the raw command line. SUBST_CMD # gives a command line suitable for passing to a shell. SUBST_SIG # gives a command line appropriate for calculating the signature # of a command line...if this changes, we should rebuild.
AttributeError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Subst.py/subst_dict
def scons_subst(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None): """Expand a string or list containing construction variable substitutions. This is the work-horse function for substitutions in file names and the like. The companion scons_subst_list() function (below) handles separating command lines into lists of arguments, so see that function if that's what you're looking for. """ if isinstance(strSubst, str) and strSubst.find('$') < 0: return strSubst class StringSubber(object): """A class to construct the results of a scons_subst() call. This binds a specific construction environment, mode, target and source with two methods (substitute() and expand()) that handle the expansion. """ def __init__(self, env, mode, conv, gvars): self.env = env self.mode = mode self.conv = conv self.gvars = gvars def expand(self, s, lvars): """Expand a single "token" as necessary, returning an appropriate string containing the expansion. This handles expanding different types of things (strings, lists, callables) appropriately. It calls the wrapper substitute() method to re-expand things as necessary, so that the results of expansions of side-by-side strings still get re-evaluated separately, not smushed together. """ if is_String(s): try: s0, s1 = s[:2] except (__HOLE__, ValueError): return s if s0 != '$': return s if s1 == '$': return '$' elif s1 in '()': return s else: key = s[1:] if key[0] == '{' or key.find('.') >= 0: if key[0] == '{': key = key[1:-1] try: s = eval(key, self.gvars, lvars) except KeyboardInterrupt: raise except Exception, e: if e.__class__ in AllowableExceptions: return '' raise_exception(e, lvars['TARGETS'], s) else: if key in lvars: s = lvars[key] elif key in self.gvars: s = self.gvars[key] elif not NameError in AllowableExceptions: raise_exception(NameError(key), lvars['TARGETS'], s) else: return '' # Before re-expanding the result, handle # recursive expansion by copying the local # variable dictionary and overwriting a null # string for the value of the variable name # we just expanded. # # This could potentially be optimized by only # copying lvars when s contains more expansions, # but lvars is usually supposed to be pretty # small, and deeply nested variable expansions # are probably more the exception than the norm, # so it should be tolerable for now. lv = lvars.copy() var = key.split('.')[0] lv[var] = '' return self.substitute(s, lv) elif is_Sequence(s): def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars): return conv(substitute(l, lvars)) return list(map(func, s)) elif callable(s): try: s = s(target=lvars['TARGETS'], source=lvars['SOURCES'], env=self.env, for_signature=(self.mode != SUBST_CMD)) except TypeError: # This probably indicates that it's a callable # object that doesn't match our calling arguments # (like an Action). if self.mode == SUBST_RAW: return s s = self.conv(s) return self.substitute(s, lvars) elif s is None: return '' else: return s def substitute(self, args, lvars): """Substitute expansions in an argument or list of arguments. This serves as a wrapper for splitting up a string into separate tokens. """ if is_String(args) and not isinstance(args, CmdStringHolder): args = str(args) # In case it's a UserString. try: def sub_match(match): return self.conv(self.expand(match.group(1), lvars)) result = _dollar_exps.sub(sub_match, args) except TypeError: # If the internal conversion routine doesn't return # strings (it could be overridden to return Nodes, for # example), then the 1.5.2 re module will throw this # exception. Back off to a slower, general-purpose # algorithm that works for all data types. args = _separate_args.findall(args) result = [] for a in args: result.append(self.conv(self.expand(a, lvars))) if len(result) == 1: result = result[0] else: result = ''.join(map(str, result)) return result else: return self.expand(args, lvars) if conv is None: conv = _strconv[mode] # Doing this every time is a bit of a waste, since the Executor # has typically already populated the OverrideEnvironment with # $TARGET/$SOURCE variables. We're keeping this (for now), though, # because it supports existing behavior that allows us to call # an Action directly with an arbitrary target+source pair, which # we use in Tool/tex.py to handle calling $BIBTEX when necessary. # If we dropped that behavior (or found another way to cover it), # we could get rid of this call completely and just rely on the # Executor setting the variables. if 'TARGET' not in lvars: d = subst_dict(target, source) if d: lvars = lvars.copy() lvars.update(d) # We're (most likely) going to eval() things. If Python doesn't # find a __builtins__ value in the global dictionary used for eval(), # it copies the current global values for you. Avoid this by # setting it explicitly and then deleting, so we don't pollute the # construction environment Dictionary(ies) that are typically used # for expansion. gvars['__builtins__'] = __builtins__ ss = StringSubber(env, mode, conv, gvars) result = ss.substitute(strSubst, lvars) try: del gvars['__builtins__'] except KeyError: pass if is_String(result): # Remove $(-$) pairs and any stuff in between, # if that's appropriate. remove = _regex_remove[mode] if remove: result = remove.sub('', result) if mode != SUBST_RAW: # Compress strings of white space characters into # a single space. result = _space_sep.sub(' ', result).strip() elif is_Sequence(result): remove = _list_remove[mode] if remove: result = remove(result) return result #Subst_List_Strings = {}
IndexError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Subst.py/scons_subst
def scons_subst_list(strSubst, env, mode=SUBST_RAW, target=None, source=None, gvars={}, lvars={}, conv=None): """Substitute construction variables in a string (or list or other object) and separate the arguments into a command list. The companion scons_subst() function (above) handles basic substitutions within strings, so see that function instead if that's what you're looking for. """ # try: # Subst_List_Strings[strSubst] = Subst_List_Strings[strSubst] + 1 # except KeyError: # Subst_List_Strings[strSubst] = 1 # import SCons.Debug # SCons.Debug.caller_trace(1) class ListSubber(collections.UserList): """A class to construct the results of a scons_subst_list() call. Like StringSubber, this class binds a specific construction environment, mode, target and source with two methods (substitute() and expand()) that handle the expansion. In addition, however, this class is used to track the state of the result(s) we're gathering so we can do the appropriate thing whenever we have to append another word to the result--start a new line, start a new word, append to the current word, etc. We do this by setting the "append" attribute to the right method so that our wrapper methods only need ever call ListSubber.append(), and the rest of the object takes care of doing the right thing internally. """ def __init__(self, env, mode, conv, gvars): collections.UserList.__init__(self, []) self.env = env self.mode = mode self.conv = conv self.gvars = gvars if self.mode == SUBST_RAW: self.add_strip = lambda x: self.append(x) else: self.add_strip = lambda x: None self.in_strip = None self.next_line() def expand(self, s, lvars, within_list): """Expand a single "token" as necessary, appending the expansion to the current result. This handles expanding different types of things (strings, lists, callables) appropriately. It calls the wrapper substitute() method to re-expand things as necessary, so that the results of expansions of side-by-side strings still get re-evaluated separately, not smushed together. """ if is_String(s): try: s0, s1 = s[:2] except (__HOLE__, ValueError): self.append(s) return if s0 != '$': self.append(s) return if s1 == '$': self.append('$') elif s1 == '(': self.open_strip('$(') elif s1 == ')': self.close_strip('$)') else: key = s[1:] if key[0] == '{' or key.find('.') >= 0: if key[0] == '{': key = key[1:-1] try: s = eval(key, self.gvars, lvars) except KeyboardInterrupt: raise except Exception, e: if e.__class__ in AllowableExceptions: return raise_exception(e, lvars['TARGETS'], s) else: if key in lvars: s = lvars[key] elif key in self.gvars: s = self.gvars[key] elif not NameError in AllowableExceptions: raise_exception(NameError(), lvars['TARGETS'], s) else: return # Before re-expanding the result, handle # recursive expansion by copying the local # variable dictionary and overwriting a null # string for the value of the variable name # we just expanded. lv = lvars.copy() var = key.split('.')[0] lv[var] = '' self.substitute(s, lv, 0) self.this_word() elif is_Sequence(s): for a in s: self.substitute(a, lvars, 1) self.next_word() elif callable(s): try: s = s(target=lvars['TARGETS'], source=lvars['SOURCES'], env=self.env, for_signature=(self.mode != SUBST_CMD)) except TypeError: # This probably indicates that it's a callable # object that doesn't match our calling arguments # (like an Action). if self.mode == SUBST_RAW: self.append(s) return s = self.conv(s) self.substitute(s, lvars, within_list) elif s is None: self.this_word() else: self.append(s) def substitute(self, args, lvars, within_list): """Substitute expansions in an argument or list of arguments. This serves as a wrapper for splitting up a string into separate tokens. """ if is_String(args) and not isinstance(args, CmdStringHolder): args = str(args) # In case it's a UserString. args = _separate_args.findall(args) for a in args: if a[0] in ' \t\n\r\f\v': if '\n' in a: self.next_line() elif within_list: self.append(a) else: self.next_word() else: self.expand(a, lvars, within_list) else: self.expand(args, lvars, within_list) def next_line(self): """Arrange for the next word to start a new line. This is like starting a new word, except that we have to append another line to the result.""" collections.UserList.append(self, []) self.next_word() def this_word(self): """Arrange for the next word to append to the end of the current last word in the result.""" self.append = self.add_to_current_word def next_word(self): """Arrange for the next word to start a new word.""" self.append = self.add_new_word def add_to_current_word(self, x): """Append the string x to the end of the current last word in the result. If that is not possible, then just add it as a new word. Make sure the entire concatenated string inherits the object attributes of x (in particular, the escape function) by wrapping it as CmdStringHolder.""" if not self.in_strip or self.mode != SUBST_SIG: try: current_word = self[-1][-1] except IndexError: self.add_new_word(x) else: # All right, this is a hack and it should probably # be refactored out of existence in the future. # The issue is that we want to smoosh words together # and make one file name that gets escaped if # we're expanding something like foo$EXTENSION, # but we don't want to smoosh them together if # it's something like >$TARGET, because then we'll # treat the '>' like it's part of the file name. # So for now, just hard-code looking for the special # command-line redirection characters... try: last_char = str(current_word)[-1] except IndexError: last_char = '\0' if last_char in '<>|': self.add_new_word(x) else: y = current_word + x # We used to treat a word appended to a literal # as a literal itself, but this caused problems # with interpreting quotes around space-separated # targets on command lines. Removing this makes # none of the "substantive" end-to-end tests fail, # so we'll take this out but leave it commented # for now in case there's a problem not covered # by the test cases and we need to resurrect this. #literal1 = self.literal(self[-1][-1]) #literal2 = self.literal(x) y = self.conv(y) if is_String(y): #y = CmdStringHolder(y, literal1 or literal2) y = CmdStringHolder(y, None) self[-1][-1] = y def add_new_word(self, x): if not self.in_strip or self.mode != SUBST_SIG: literal = self.literal(x) x = self.conv(x) if is_String(x): x = CmdStringHolder(x, literal) self[-1].append(x) self.append = self.add_to_current_word def literal(self, x): try: l = x.is_literal except AttributeError: return None else: return l() def open_strip(self, x): """Handle the "open strip" $( token.""" self.add_strip(x) self.in_strip = 1 def close_strip(self, x): """Handle the "close strip" $) token.""" self.add_strip(x) self.in_strip = None if conv is None: conv = _strconv[mode] # Doing this every time is a bit of a waste, since the Executor # has typically already populated the OverrideEnvironment with # $TARGET/$SOURCE variables. We're keeping this (for now), though, # because it supports existing behavior that allows us to call # an Action directly with an arbitrary target+source pair, which # we use in Tool/tex.py to handle calling $BIBTEX when necessary. # If we dropped that behavior (or found another way to cover it), # we could get rid of this call completely and just rely on the # Executor setting the variables. if 'TARGET' not in lvars: d = subst_dict(target, source) if d: lvars = lvars.copy() lvars.update(d) # We're (most likely) going to eval() things. If Python doesn't # find a __builtins__ value in the global dictionary used for eval(), # it copies the current global values for you. Avoid this by # setting it explicitly and then deleting, so we don't pollute the # construction environment Dictionary(ies) that are typically used # for expansion. gvars['__builtins__'] = __builtins__ ls = ListSubber(env, mode, conv, gvars) ls.substitute(strSubst, lvars, 0) try: del gvars['__builtins__'] except KeyError: pass return ls.data
IndexError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Subst.py/scons_subst_list
@instance_synchronized def get_console_output(self, instance_name): console_log_paths = self._pathutils.get_vm_console_log_paths( instance_name) try: log = b'' # Start with the oldest console log file. for log_path in console_log_paths[::-1]: if os.path.exists(log_path): with open(log_path, 'rb') as fp: log += fp.read() return log except __HOLE__ as err: raise exception.ConsoleLogOutputException( instance_id=instance_name, reason=six.text_type(err))
IOError
dataset/ETHPy150Open openstack/compute-hyperv/hyperv/nova/serialconsoleops.py/SerialConsoleOps.get_console_output
def test_bugdown_fixtures(self): format_tests, linkify_tests = self.load_bugdown_tests() self.maxDiff = None for name, test in six.iteritems(format_tests): converted = bugdown_convert(test['input']) print("Running Bugdown test %s" % (name,)) self.assertEqual(converted, test['expected_output']) def replaced(payload, url, phrase=''): target = " target=\"_blank\"" if url[:4] == 'http': href = url elif '@' in url: href = 'mailto:' + url target = "" else: href = 'http://' + url return payload % ("<a href=\"%s\"%s title=\"%s\">%s</a>" % (href, target, href, url),) print("Running Bugdown Linkify tests") self.maxDiff = None for inline_url, reference, url in linkify_tests: try: match = replaced(reference, url, phrase=inline_url) except __HOLE__: match = reference converted = bugdown_convert(inline_url) self.assertEqual(match, converted)
TypeError
dataset/ETHPy150Open zulip/zulip/zerver/tests/test_bugdown.py/BugdownTest.test_bugdown_fixtures
def connect(self,dupe=None): """ Starts connection. Connects to proxy, supplies login and password to it (if were specified while creating instance). Instructs proxy to make connection to the target server. Returns non-empty sting on success. """ if not TCPsocket.connect(self,(self._proxy['host'],self._proxy['port'])): return self.DEBUG("Proxy server contacted, performing authentification",'start') connector = ['CONNECT %s:%s HTTP/1.0'%self._server, 'Proxy-Connection: Keep-Alive', 'Pragma: no-cache', 'Host: %s:%s'%self._server, 'User-Agent: HTTPPROXYsocket/v0.1'] if self._proxy.has_key('user') and self._proxy.has_key('password'): credentials = '%s:%s'%(self._proxy['user'],self._proxy['password']) credentials = base64.encodestring(credentials).strip() connector.append('Proxy-Authorization: Basic '+credentials) connector.append('\r\n') self.send('\r\n'.join(connector)) try: reply = self.receive().replace('\r','') except IOError: self.DEBUG('Proxy suddenly disconnected','error') self._owner.disconnected() return try: proto,code,desc=reply.split('\n')[0].split(' ',2) except: raise error('Invalid proxy reply') if code<>'200': self.DEBUG('Invalid proxy reply: %s %s %s'%(proto,code,desc),'error') self._owner.disconnected() return while reply.find('\n\n') == -1: try: reply += self.receive().replace('\r','') except __HOLE__: self.DEBUG('Proxy suddenly disconnected','error') self._owner.disconnected() return self.DEBUG("Authentification successfull. Jabber server contacted.",'ok') return 'ok'
IOError
dataset/ETHPy150Open kuri65536/python-for-android/python-build/python-libs/xmpppy/xmpp/transports.py/HTTPPROXYsocket.connect
def watch(self, args): from_date = None while True: from_date = self.query(args, from_date) try: time.sleep(2) except (KeyboardInterrupt, __HOLE__): sys.exit(0)
SystemExit
dataset/ETHPy150Open alerta/python-alerta/alerta/shell.py/AlertCommand.watch
def top(self, args): screen = Screen(endpoint=args.endpoint, key=args.key) try: screen.run() except __HOLE__ as e: screen._reset() print(e) sys.exit(1) except (KeyboardInterrupt, SystemExit): screen.w.running = False screen._reset() print('Exiting...') sys.exit(0)
RuntimeError
dataset/ETHPy150Open alerta/python-alerta/alerta/shell.py/AlertCommand.top
def keys(self, args): response = self._keys() keys = response['keys'] print('{:<40} {:<24} {:<20} {:<16} {:<10} {:19} {:19} {:4}'.format('API KEY', 'USER', 'DESCRIPTION', 'CUSTOMER', 'RO / RW', 'EXPIRES', 'LAST USED', 'COUNT')) for key in keys: expire_time = datetime.strptime(key['expireTime'], '%Y-%m-%dT%H:%M:%S.%fZ') tz = pytz.timezone(args.timezone) try: last_used_time = datetime.strptime(key['lastUsedTime'], '%Y-%m-%dT%H:%M:%S.%fZ') last_used_time_or_none = last_used_time.replace(tzinfo=pytz.UTC).astimezone(tz).strftime('%Y/%m/%d %H:%M:%S') except __HOLE__: last_used_time_or_none = 'not used' print('{} {:<24} {:<20} {:<16} {:<10} {:19} {:19} {:>5}'.format( key['key'], key['user'], key['text'], key.get('customer', '') or '-', key['type'], expire_time.replace(tzinfo=pytz.UTC).astimezone(tz).strftime('%Y/%m/%d %H:%M:%S'), last_used_time_or_none, key['count'] ))
TypeError
dataset/ETHPy150Open alerta/python-alerta/alerta/shell.py/AlertCommand.keys
def main(): logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") # Only mangle the terminal if using Python 2.x if sys.version_info[0] == 2: sys.stdout = codecs.getwriter('utf-8')(sys.stdout) try: AlertaShell().run() except (__HOLE__, KeyboardInterrupt): LOG.warning("Exiting alerta client.") sys.exit(0) except Exception as e: LOG.error(e, exc_info=1) sys.exit(1)
SystemExit
dataset/ETHPy150Open alerta/python-alerta/alerta/shell.py/main
def range_usage_text(request): start = request.GET.get('start', None) end = request.GET.get('end', None) format = request.GET.get('format', 'human_readable') if not (start and end): return HttpResponse(json.dumps({ 'success': False, 'error_messages': 'Provide a start and end'})) get_objects = request.GET.get('get_objects', False) if start.find(':') > -1: ip_type = '6' else: ip_type = '4' try: usage_data = range_usage(start, end, ip_type, get_objects) except (__HOLE__, ipaddr.AddressValueError), e: return HttpResponse( json.dumps({ 'error_messages': str(e), 'success': False })) if format == 'human_readable': usage_data['free_ranges'] = map(lambda x: (int_to_ip(x[0], ip_type), int_to_ip(x[1], ip_type)), usage_data['free_ranges']) usage_data['success'] = True return HttpResponse(json.dumps(usage_data))
ValidationError
dataset/ETHPy150Open mozilla/inventory/core/range/views.py/range_usage_text
def redirect_to_range_from_ip(request): ip_str = request.GET.get('ip_str') ip_type = request.GET.get('ip_type') if not (ip_str and ip_type): return HttpResponse(json.dumps({'failure': "Slob"})) if ip_type == '4': try: ip_upper, ip_lower = 0, int(ipaddr.IPv4Address(ip_str)) except ipaddr.AddressValueError: return HttpResponse( json.dumps({'success': False, 'message': "Failure to " "recognize{0} as an IPv4 " "Address.".format(ip_str)})) else: try: ip_upper, ip_lower = ipv6_to_longs(ip_str) except __HOLE__: return HttpResponse(json.dumps({'success': False, 'message': 'Invalid IP'})) range_ = Range.objects.filter(start_upper__lte=ip_upper, start_lower__lte=ip_lower, end_upper__gte=ip_upper, end_lower__gte=ip_lower) if not len(range_) == 1: return HttpResponse(json.dumps({'failure': "Failture to find range"})) else: return HttpResponse(json.dumps( {'success': True, 'redirect_url': range_[0].get_absolute_url()}))
ValidationError
dataset/ETHPy150Open mozilla/inventory/core/range/views.py/redirect_to_range_from_ip
def find_related(request): """ Given a list of site, vlan, and network primary keys, help a user make choices about where to put an IP address A user can select from choices: Networks Vlans Sites The goal of the UI is to help a user choose a range -- which for this function can be seen as filtering down to exactly 1 network. When a user selects a site, this can limit which networks and in turn which vlans are displayed. When a user selects a vlan, this can limit which networks are displayed which in turn can limit which sites are displayed When a user selects a network, this will limit both networks, vlans, and sites to at most one object per each type. input:: { 'choice': [<type>, <pk>], 'sites': [1, ...], 'vlans': [1, ...], 'networks': [1, ...], } The value of '<type>' is a string that is either 'site', 'vlan', or 'network'. The value of '<pk>' is a number. output: Same as input but with things filtered plus a new list of 'range' information. E.x.: { 'sites': [<pks>], 'vlans': [<pks>], 'networks': [<pks>], 'range': [ {'name': ... 'ip_start': ... 'ip_end': ... 'reserved': ... }, ... ... ... {'name': ... 'ip_start': ... 'ip_end': ... 'reserved': ... } ] } This function will key off of 'choice' to determine how to slim down a users choice of objects. """ state = json.loads(request.raw_post_data) if not state: raise Exception("No state?") if 'choice' not in state: raise Exception("No choice?") try: choice_type, choice_pk = state['choice'] except __HOLE__: raise Exception( "Choice was '{0}'. This is wrong".format(state['choice']) ) filter_network, filter_site, filter_vlan = calculate_filters( choice_type, choice_pk ) format_network, format_site, format_vlan = label_value_maker() new_state = { 'sites': format_site(filter_site(state['sites'])), 'vlans': format_vlan(filter_vlan(state['vlans'])), } # Network are special. If there is only one, we need to add some range # info. If there are zero or more than one, don't add any range objects networks = filter_network(state['networks']) if len(networks) == 1: new_state['ranges'] = integrate_real_ranges( networks[0], calc_template_ranges(networks[0]) ) new_state['networks'] = format_network(networks) return HttpResponse(json.dumps(new_state))
ValueError
dataset/ETHPy150Open mozilla/inventory/core/range/views.py/find_related
def _join_if_needed(self, value): if isinstance(value, (list, tuple)): try: return self._join_multivalued.join(value) except __HOLE__: # list in value may not contain strings pass return value
TypeError
dataset/ETHPy150Open scrapy/scrapy/scrapy/exporters.py/CsvItemExporter._join_if_needed
def _build_row(self, values): for s in values: try: yield to_native_str(s) except __HOLE__: yield s
TypeError
dataset/ETHPy150Open scrapy/scrapy/scrapy/exporters.py/CsvItemExporter._build_row
@property def size(self): try: return int(self.data['details'].get('filesize')) except __HOLE__: return None
ValueError
dataset/ETHPy150Open picklepete/pyicloud/pyicloud/services/photos.py/PhotoAsset.size
def __contains__(self, item): try: wr = ref(item) except __HOLE__: return False return wr in self.data
TypeError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/_weakrefset.py/WeakSet.__contains__
def pop(self): if self._pending_removals: self._commit_removals() while True: try: itemref = self.data.pop() except __HOLE__: raise KeyError('pop from empty WeakSet') item = itemref() if item is not None: return item
KeyError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/_weakrefset.py/WeakSet.pop
def __init__(self, parent): parent.title = "Interactive Lobe Segmentation" # TODO make this more human readable by adding spaces parent.categories = ["Chest Imaging Platform"] parent.dependencies = [] parent.contributors = ["Pietro Nardelli (UCC/SPL) and Applied Chest Imaging Laboratory, Brigham and Women's Hopsital"] parent.helpText = """ Scripted loadable module for Interactive Lobe segmentation. """ parent.acknowledgementText = """ This work is funded by the National Heart, Lung, And Blood Institute of the National Institutes of Health under Award Number R01HL116931. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health. """ self.parent = parent # Add this test to the SelfTest module's list for discovery when the module # is created. Since this module may be discovered before SelfTests itself, # create the list if it doesn't already exist. try: slicer.selfTests except __HOLE__: slicer.selfTests = {} slicer.selfTests['InteractiveLobeSegmentation'] = self.runTest
AttributeError
dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/InteractiveLobeSegmentation/InteractiveLobeSegmentation.py/InteractiveLobeSegmentation.__init__
def onReload(self,moduleName="InteractiveLobeSegmentation"): """Generic reload method for any scripted module. ModuleWizard will subsitute correct default moduleName. """ import imp, sys, os, slicer widgetName = moduleName + "Widget" # reload the source code # - set source file path # - load the module to the global space filePath = eval('slicer.modules.%s.path' % moduleName.lower()) p = os.path.dirname(filePath) if not sys.path.__contains__(p): sys.path.insert(0,p) fp = open(filePath, "r") globals()[moduleName] = imp.load_module( moduleName, fp, filePath, ('.py', 'r', imp.PY_SOURCE)) fp.close() # rebuild the widget # - find and hide the existing widget # - create a new widget in the existing parent parent = slicer.util.findChildren(name='%s Reload' % moduleName)[0].parent().parent() for child in parent.children(): try: child.hide() except __HOLE__: pass # Remove spacer items item = parent.layout().itemAt(0) while item: parent.layout().removeItem(item) item = parent.layout().itemAt(0) # delete the old widget instance if hasattr(globals()['slicer'].modules, widgetName): getattr(globals()['slicer'].modules, widgetName).cleanup() # create new widget inside existing parent globals()[widgetName.lower()] = eval( 'globals()["%s"].%s(parent)' % (moduleName, widgetName)) globals()[widgetName.lower()].setup() setattr(globals()['slicer'].modules, widgetName, globals()[widgetName.lower()])
AttributeError
dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/InteractiveLobeSegmentation/InteractiveLobeSegmentation.py/InteractiveLobeSegmentationWidget.onReload
def job_script_kwargs(self, ofile, efile, job_name): pbsargs = {'-o': ofile, '-e': efile, '-N': job_name} for k, v in self.params.items(): if k == 'plugin': continue try: if not k.startswith('-'): k = argmap[k] pbsargs[k] = v except __HOLE__: log.warning(ERROR_MESSAGE_UNRECOGNIZED_ARG % k) template_pbsargs = '' for k, v in pbsargs.items(): template_pbsargs += '#PBS %s %s\n' % (k, v) return dict(headers=template_pbsargs)
KeyError
dataset/ETHPy150Open galaxyproject/pulsar/pulsar/managers/util/cli/job/torque.py/Torque.job_script_kwargs
def _get_job_state(self, state): try: return { 'E': job_states.RUNNING, 'R': job_states.RUNNING, 'Q': job_states.QUEUED, 'C': job_states.OK }.get(state) except __HOLE__: raise KeyError("Failed to map torque status code [%s] to job state." % state)
KeyError
dataset/ETHPy150Open galaxyproject/pulsar/pulsar/managers/util/cli/job/torque.py/Torque._get_job_state
@classmethod def get_row_by_pk(self, ar, pk): """Implements :meth:`get_row_by_pk <lino.core.actors.Actor.get_row_by_pk>` for a database table. """ try: return self.model.objects.get(pk=pk) except __HOLE__: return None except self.model.DoesNotExist: return None
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/core/dbtables.py/Table.get_row_by_pk
def apply(self, transform, pvalueish=None): """Applies a custom transform using the pvalueish specified. Args: transform: the PTranform (or callable) to apply. pvalueish: the input for the PTransform (typically a PCollection). Raises: TypeError: if the transform object extracted from the argument list is not a callable type or a descendant from PTransform. RuntimeError: if the transform object was already applied to this pipeline and needs to be cloned in order to apply again. """ if not isinstance(transform, ptransform.PTransform): transform = _CallableWrapperPTransform(transform) full_label = format_full_label(self._current_transform(), transform) if full_label in self.applied_labels: raise RuntimeError( 'Transform "%s" does not have a stable unique label. ' 'This will prevent updating of pipelines. ' 'To clone a transform with a new label use: ' 'transform.clone("NEW LABEL").' % full_label) self.applied_labels.add(full_label) pvalueish, inputs = transform._extract_input_pvalues(pvalueish) try: inputs = tuple(inputs) for leaf_input in inputs: if not isinstance(leaf_input, pvalue.PValue): raise TypeError except __HOLE__: raise NotImplementedError( 'Unable to extract PValue inputs from %s; either %s does not accept ' 'inputs of this format, or it does not properly override ' '_extract_input_values' % (pvalueish, transform)) child = AppliedPTransform( self._current_transform(), transform, full_label, inputs) self._current_transform().add_part(child) self.transforms_stack.append(child) if self.options is not None: type_options = self.options.view_as(TypeOptions) else: type_options = None if type_options is not None and type_options.pipeline_type_check: transform.type_check_inputs(pvalueish) pvalueish_result = self.runner.apply(transform, pvalueish) if type_options is not None and type_options.pipeline_type_check: transform.type_check_outputs(pvalueish_result) for result in ptransform.GetPValues().visit(pvalueish_result): assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple)) # Make sure we set the producer only for a leaf node in the transform DAG. # This way we preserve the last transform of a composite transform as # being the real producer of the result. if result.producer is None: result.producer = child self._current_transform().add_output(result) # TODO(robertwb): Multi-input, multi-output inference. # TODO(robertwb): Ideally we'd do intersection here. if (type_options is not None and type_options.pipeline_type_check and isinstance(result, (pvalue.PCollection, pvalue.PCollectionView)) and not result.element_type): input_element_type = ( inputs[0].element_type if len(inputs) == 1 else typehints.Any) type_hints = transform.get_type_hints() declared_output_type = type_hints.simple_output_type(transform.label) if declared_output_type: input_types = type_hints.input_types if input_types and input_types[0]: declared_input_type = input_types[0][0] result.element_type = typehints.bind_type_variables( declared_output_type, typehints.match_type_variables(declared_input_type, input_element_type)) else: result.element_type = declared_output_type else: result.element_type = transform.infer_output_type(input_element_type) assert isinstance(result.producer.inputs, tuple) if (type_options is not None and type_options.type_check_strictness == 'ALL_REQUIRED' and transform.get_type_hints().output_types is None): ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label) raise TypeCheckError('Pipeline type checking is enabled, however no ' 'output type-hint was found for the ' 'PTransform %s' % ptransform_name) child.update_input_refcounts() self.transforms_stack.pop() return pvalueish_result
TypeError
dataset/ETHPy150Open GoogleCloudPlatform/DataflowPythonSDK/google/cloud/dataflow/pipeline.py/Pipeline.apply
def tearDown(self): try: shutil.rmtree(self.tempdir) except __HOLE__: pass self.top = None
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/casehandlers/test/test_jsonrecorder.py/TestCase.tearDown