function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def update_note(self, note): """ function to update a specific note object, if the note object does not have a "key" field, a new note is created Arguments - note (dict): note object to update Returns: A tuple `(note, status)` - note (dict): note object - status (int): 0 on sucesss and -1 otherwise """ # use UTF-8 encoding # cpbotha: in both cases check if it's not unicode already # otherwise you get "TypeError: decoding Unicode is not supported" if isinstance(note["content"], str): note["content"] = unicode(note["content"], 'utf-8') if "tags" in note: # if a tag is a string, unicode it, otherwise pass it through # unchanged (it's unicode already) # using the ternary operator, because I like it: a if test else b note["tags"] = [unicode(t, 'utf-8') if isinstance(t, str) else t for t in note["tags"]] # determine whether to create a new note or updated an existing one if "key" in note: url = '%s/%s?auth=%s&email=%s' % (DATA_URL, note["key"], self.get_token(), self.username) else: url = '%s?auth=%s&email=%s' % (DATA_URL, self.get_token(), self.username) request = Request(url, urllib.quote(json.dumps(note))) response = "" try: response = urllib2.urlopen(request).read() except __HOLE__, e: return e, -1 return json.loads(response), 0
IOError
dataset/ETHPy150Open cpbotha/nvpy/nvpy/simplenote.py/Simplenote.update_note
def get_note_list(self, qty=float("inf")): """ function to get the note list The function can be passed an optional argument to limit the size of the list returned. If omitted a list of all notes is returned. Arguments: - quantity (integer number): of notes to list Returns: An array of note objects with all properties set except `content`. """ # initialize data status = 0 ret = [] response = {} notes = {"data": []} # get the note index if qty < NOTE_FETCH_LENGTH: params = 'auth=%s&email=%s&length=%s' % (self.get_token(), self.username, qty) else: params = 'auth=%s&email=%s&length=%s' % (self.get_token(), self.username, NOTE_FETCH_LENGTH) # perform initial HTTP request try: request = Request(INDX_URL + params) response = json.loads(urllib2.urlopen(request).read()) notes["data"].extend(response["data"]) except IOError: status = -1 # get additional notes if bookmark was set in response while "mark" in response and len(notes["data"]) < qty: if (qty - len(notes["data"])) < NOTE_FETCH_LENGTH: vals = (self.get_token(), self.username, response["mark"], qty - len(notes["data"])) else: vals = (self.get_token(), self.username, response["mark"], NOTE_FETCH_LENGTH) params = 'auth=%s&email=%s&mark=%s&length=%s' % vals # perform the actual HTTP request try: request = Request(INDX_URL + params) response = json.loads(urllib2.urlopen(request).read()) notes["data"].extend(response["data"]) except __HOLE__: status = -1 # parse data fields in response ret = notes["data"] return ret, status
IOError
dataset/ETHPy150Open cpbotha/nvpy/nvpy/simplenote.py/Simplenote.get_note_list
def delete_note(self, note_id): """ method to permanently delete a note Arguments: - note_id (string): key of the note to trash Returns: A tuple `(note, status)` - note (dict): an empty dict or an error message - status (int): 0 on sucesss and -1 otherwise """ # notes have to be trashed before deletion note, status = self.trash_note(note_id) if (status == -1): return note, status params = '/%s?auth=%s&email=%s' % (str(note_id), self.get_token(), self.username) request = Request(url=DATA_URL + params, method='DELETE') try: urllib2.urlopen(request) except __HOLE__, e: return e, -1 return {}, 0
IOError
dataset/ETHPy150Open cpbotha/nvpy/nvpy/simplenote.py/Simplenote.delete_note
def verilog_to_pymtl( model, verilog_file, c_wrapper_file, lib_file, py_wrapper_file, vcd_en, lint, verilator_xinit ): model_name = model.class_name try: vlinetrace = model.vlinetrace except __HOLE__: vlinetrace = False # Verilate the model # TODO: clean this up verilate_model( verilog_file, model_name, vcd_en, lint ) # Add names to ports of module for port in model.get_ports(): port.verilog_name = verilog_structural.mangle_name( port.name ) port.verilator_name = verilator_mangle( port.verilog_name ) # Create C++ Wrapper cdefs = create_c_wrapper( model, c_wrapper_file, vcd_en, vlinetrace, verilator_xinit ) # Create Shared C Library create_shared_lib( model_name, c_wrapper_file, lib_file, vcd_en, vlinetrace ) # Create PyMTL wrapper for CFFI interface to Verilated model create_verilator_py_wrapper( model, py_wrapper_file, lib_file, cdefs, vlinetrace ) #----------------------------------------------------------------------- # verilate_model #----------------------------------------------------------------------- # Convert Verilog HDL into a C++ simulator using Verilator. # http://www.veripool.org/wiki/verilator
AttributeError
dataset/ETHPy150Open cornell-brg/pymtl/pymtl/tools/translation/verilator_cffi.py/verilog_to_pymtl
def create_shared_lib( model_name, c_wrapper_file, lib_file, vcd_en, vlinetrace ): # We need to find out where the verilator include directories are # globally installed. We first check the PYMTL_VERILATOR_INCLUDE_DIR # environment variable, and if that does not exist then we fall back on # using pkg-config. verilator_include_dir = os.environ.get('PYMTL_VERILATOR_INCLUDE_DIR') if verilator_include_dir is None: cmd = ['pkg-config', '--variable=includedir', 'verilator'] try: verilator_include_dir = check_output( cmd, stderr=STDOUT ).strip() except __HOLE__ as e: error_msg = """ Error trying to find verilator include directories. The PYMTL_VERILATOR_INCLUDE_DIR environment variable was not set, so we attempted to use pkg-config to find where verilator was installed, but it looks like we had trouble finding or executing pkg-config itself. Try running the following command on your own to debug the issue. Command: {command} Error: [Errno {errno}] {strerror} """ raise VerilatorCompileError( error_msg.format( command = ' '.join( cmd ), errno = e.errno, strerror = e.strerror, )) except CalledProcessError as e: error_msg = """ Error trying to find verilator include directories. The PYMTL_VERILATOR_INCLUDE_DIR environment variable was not set, so we attempted to use pkg-config to find where verilator was installed, but it looks like pkg-config had trouble finding the verilator.pc file installed by verilator. Is a recent version of verilator installed? Older versions of verilator did not have pkg-config support. Try running the following command on your own to debug the issue. Command: {command} Error: {error} """ raise VerilatorCompileError( error_msg.format( command = ' '.join( e.cmd ), error = e.output, )) include_dirs = [ verilator_include_dir, verilator_include_dir+"/vltstd", ] # Compile standard Verilator code if libverilator.a does not exist. # Originally, I was also including verilated_dpi.cpp in this library, # but for some reason that screws up line tracing. Somehow there is # some kind of global state or something that is shared across the # shared libraries or something. I was able to fix it by recompiling # verilated_dpi if linetracing is enabled. Actually, the line tracing # doesn't work -- if you use this line tracing approach, so we are back # to always recomping everyting every time for now. # if not os.path.exists( "libverilator.a" ): # # compile( # flags = "-O3 -c", # include_dirs = include_dirs, # output_file = "verilator.o", # input_files = [ verilator_include_dir+"/verilated.cpp" ] # ) # # compile( # flags = "-O3 -c", # include_dirs = include_dirs, # output_file = "verilator_vcd_c.o", # input_files = [ verilator_include_dir+"/verilated_vcd_c.cpp" ] # ) # # # compile( # # flags = "-O3 -c", # # include_dirs = include_dirs, # # output_file = "verilator_dpi.o", # # input_files = [ verilator_include_dir+"/verilated_dpi.cpp" ] # # ) # # make_lib( # output_file = "libverilator.a", # # input_files = [ "verilator.o", "verilator_vcd_c.o", "verilator_dpi.o" ] # input_files = [ "verilator.o", "verilator_vcd_c.o" ] # ) obj_dir_prefix = "obj_dir_{m}/V{m}".format( m=model_name ) # We need to find a list of all the generated classes. We look in the # Verilator makefile for that. cpp_sources_list = [] with open( obj_dir_prefix+"_classes.mk" ) as mkfile: found = False for line in mkfile: if line.startswith("VM_CLASSES_FAST += "): found = True elif found: if line.strip() == "": found = False else: filename = line.strip()[:-2] cpp_file = "obj_dir_{m}/{f}.cpp".format( m=model_name, f=filename ) cpp_sources_list.append( cpp_file ) # Compile this module cpp_sources_list += [ obj_dir_prefix+"__Syms.cpp", verilator_include_dir+"/verilated.cpp", verilator_include_dir+"/verilated_dpi.cpp", c_wrapper_file, ] if vcd_en: cpp_sources_list += [ verilator_include_dir+"/verilated_vcd_c.cpp", obj_dir_prefix+"__Trace.cpp", obj_dir_prefix+"__Trace__Slow.cpp", ] compile( # flags = "-O1 -fstrict-aliasing -fPIC -shared -L. -lverilator", flags = "-O1 -fstrict-aliasing -fPIC -shared", include_dirs = include_dirs, output_file = lib_file, input_files = cpp_sources_list, ) #----------------------------------------------------------------------- # create_verilator_py_wrapper #-----------------------------------------------------------------------
OSError
dataset/ETHPy150Open cornell-brg/pymtl/pymtl/tools/translation/verilator_cffi.py/create_shared_lib
def __eq__(self, cmd): """ Compare two command instances to each other by matching their key and aliases. Args: cmd (Command or str): Allows for equating both Command objects and their keys. Returns: equal (bool): If the commands are equal or not. """ try: # first assume input is a command (the most common case) return self._matchset.intersection(cmd._matchset) #return cmd.key in self._matchset except __HOLE__: # probably got a string return cmd in self._matchset
AttributeError
dataset/ETHPy150Open evennia/evennia/evennia/commands/command.py/Command.__eq__
def __ne__(self, cmd): """ The logical negation of __eq__. Since this is one of the most called methods in Evennia (along with __eq__) we do some code-duplication here rather than issuing a method-lookup to __eq__. """ try: return self._matchset.isdisjoint(cmd._matchset) #return not cmd.key in self._matcheset except __HOLE__: return not cmd in self._matchset
AttributeError
dataset/ETHPy150Open evennia/evennia/evennia/commands/command.py/Command.__ne__
def run(self): """ Run the command, infer time period to be used in metric analysis phase. :return: None """ cmd_args = shlex.split(self.run_cmd) logger.info('Local command RUN-STEP starting with rank %d', self.run_rank) logger.info('Running subprocess command with following args: ' + str(cmd_args)) # TODO: Add try catch blocks. Kill process on CTRL-C # Infer time period for analysis. Assume same timezone between client and servers. self.ts_start = time.strftime("%Y-%m-%d %H:%M:%S") try: self.process = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) if self.kill_after_seconds: self.timer = Timer(self.kill_after_seconds, self.kill) self.timer.start() # Using 2nd method here to stream output: # http://stackoverflow.com/questions/2715847/python-read-streaming-input-from-subprocess-communicate for line in iter(self.process.stdout.readline, b''): logger.info(line.strip()) self.process.communicate() except __HOLE__: logger.warning('Handling keyboard interrupt (Ctrl-C)') self.kill() if self.timer: self.timer.cancel() self.ts_end = time.strftime("%Y-%m-%d %H:%M:%S") logger.info('subprocess finished') logger.info('run_step started at ' + self.ts_start + ' and ended at ' + self.ts_end)
KeyboardInterrupt
dataset/ETHPy150Open linkedin/naarad/src/naarad/run_steps/local_cmd.py/Local_Cmd.run
def kill(self): """ If run_step needs to be killed, this method will be called :return: None """ try: logger.info('Trying to terminating run_step...') self.process.terminate() time_waited_seconds = 0 while self.process.poll() is None and time_waited_seconds < CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM: time.sleep(0.5) time_waited_seconds += 0.5 if self.process.poll() is None: self.process.kill() logger.warning('Waited %d seconds for run_step to terminate. Killing now....', CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM) except __HOLE__, e: logger.error('Error while trying to kill the subprocess: %s', e)
OSError
dataset/ETHPy150Open linkedin/naarad/src/naarad/run_steps/local_cmd.py/Local_Cmd.kill
def main(): parser = argparse.ArgumentParser() parser.add_argument('-k', '--key', default='name') parser.add_argument('-s', '--case-sensitive', default=False, action='store_true') parser.add_argument('-r', '--reverse', default=False, action='store_true') args = parser.parse_args() if sys.stdin.isatty(): parser.error('no input, pipe another btc command output into this command') l = sys.stdin.read() if len(l.strip()) == 0: exit(1) try: l = decoder.decode(l) except __HOLE__: error('unexpected input: %s' % l) def key(x): key = x[args.key] if ((isinstance(x[args.key], str) or isinstance(x[args.key], unicode)) and not args.case_sensitive): return key.lower() return key l = sorted(l, key=key) if args.reverse: l = list(reversed(l)) print(encoder.encode([ordered_dict(d) for d in l]))
ValueError
dataset/ETHPy150Open bittorrent/btc/btc/btc_sort.py/main
def main(): (opts, args) = getopts() if chkopts(opts) is True: return PROCERROR cf = readconf(opts.config) if cf is None: print >>sys.stderr, 'Failed to load the config file "%s". (%s)' % (opts.config, sys.argv[0]) return PROCERROR # conf parse if parse_conf(cf) is False: return PROCERROR # set env=PYSILHOUETTE_CONF os.environ['PYSILHOUETTE_CONF'] = opts.config if reload_conf(cf["env.sys.log.conf.path"]): logger = logging.getLogger('pysilhouette.asynperformer') else: print >>sys.stderr, 'Failed to load the log file. (%s)' % sys.argv[0] return PROCERROR try: try: signal.signal(signal.SIGTERM, sigterm_handler) asynperformer = AsynPerformer(opts, cf) ret = asynperformer.process() # start!! return ret except __HOLE__, k: logger.critical('Keyboard interrupt occurred. - %s' % str(k.args)) print >>sys.stderr, 'Keyboard interrupt occurred. - %s' % str(k.args) except Exception, e: logger.critical('System error has occurred. - %s' % str(e.args)) print >>sys.stderr, 'System error has occurred. - %s' % str(e.args) print >>sys.stderr, traceback.format_exc() t_logger = logging.getLogger('pysilhouette_traceback') t_logger.critical(traceback.format_exc()) finally: if opts.daemon is True and os.path.isfile(opts.pidfile): os.unlink(opts.pidfile) logger.info('Process file has been deleted.. - pidfile=%s' % opts.pidfile) return PROCERROR
KeyboardInterrupt
dataset/ETHPy150Open karesansui/pysilhouette/pysilhouette/asynperformer.py/main
def is_eager(self, backend_name): """Return whether this backend is eager, meaning it runs tasks synchronously rather than queueing them to celery. A backend configures its eagerness by setting the backend configuration value ``router.celery.eager`` to True or False. The default is False. """ try: backend = self.backends[backend_name] except __HOLE__: return False return backend._config.get('router.celery.eager', False)
KeyError
dataset/ETHPy150Open rapidsms/rapidsms/rapidsms/router/celery/router.py/CeleryRouter.is_eager
def default_interface(): """ Get default gateway interface. Some OSes return 127.0.0.1 when using socket.gethostbyname(socket.gethostname()), so we're attempting to get a kind of valid hostname here. """ try: return netifaces.gateways()['default'][netifaces.AF_INET][1] except __HOLE__: # Sometimes 'default' is empty but AF_INET exists alongside it return netifaces.gateways()[netifaces.AF_INET][0][1]
KeyError
dataset/ETHPy150Open nils-werner/zget/zget/utils.py/default_interface
def ip_addr(interface): """ Get IP address from interface. """ try: return netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr'] except __HOLE__: raise ValueError(_("You have selected an invalid interface"))
KeyError
dataset/ETHPy150Open nils-werner/zget/zget/utils.py/ip_addr
def unique_filename(filename, limit=maxsize): if not os.path.exists(filename): return filename path, name = os.path.split(filename) name, ext = os.path.splitext(name) def make_filename(i): return os.path.join(path, '%s_%d%s' % (name, i, ext)) for i in xrange(1, limit): unique_filename = make_filename(i) if not os.path.exists(unique_filename): return unique_filename try: raise FileExistsError() except __HOLE__: raise IOError(errno.EEXIST)
NameError
dataset/ETHPy150Open nils-werner/zget/zget/utils.py/unique_filename
def urlretrieve( url, output=None, reporthook=None ): r = requests.get(url, stream=True) try: maxsize = int(r.headers['content-length']) except __HOLE__: maxsize = -1 if output is None: try: filename = re.findall( "filename=(\S+)", r.headers['content-disposition'] )[0].strip('\'"') except (IndexError, KeyError): filename = urlparse.unquote( os.path.basename(urlparse.urlparse(url).path) ) filename = unique_filename(filename) reporthook.filename = filename else: filename = output with open(filename, 'wb') as f: for i, chunk in enumerate(r.iter_content(chunk_size=1024 * 8)): if chunk: f.write(chunk) if reporthook is not None: reporthook(i, 1024 * 8, maxsize)
KeyError
dataset/ETHPy150Open nils-werner/zget/zget/utils.py/urlretrieve
def main(): try: locale.setlocale(locale.LC_ALL, '') py3 = Py3statusWrapper() py3.setup() except __HOLE__: py3.notify_user('Setup interrupted (KeyboardInterrupt).') sys.exit(0) except Exception: py3.report_exception('Setup error') sys.exit(2) try: py3.run() except Exception: py3.report_exception('Runtime error') sys.exit(3) except KeyboardInterrupt: pass finally: py3.stop() sys.exit(0)
KeyboardInterrupt
dataset/ETHPy150Open ultrabug/py3status/py3status/__init__.py/main
def snapshots_to_iterations(apps, schema_editor): Iteration = apps.get_model('orchestra', 'Iteration') Task = apps.get_model('orchestra', 'Task') TaskAssignment = apps.get_model('orchestra', 'TaskAssignment') for task in Task.objects.all(): task_snapshots = get_ordered_snapshots(task) for task_snap_index, snapshot in enumerate(task_snapshots): assignment = snapshot['assignment'] iteration = Iteration.objects.create( assignment=assignment, end_datetime=parse(snapshot['datetime']), submitted_data=snapshot['data'], status=snapshot_type_to_iteration_status[snapshot['type']]) if snapshot['assignment_snap_index'] == 0: # Snapshot is the first for its assignment, so its start # datetime will be the same as its assignment # NOTE: This should cover the case where task_snap_index == 0 iteration.start_datetime = assignment.start_datetime else: iteration.start_datetime = ( task_snapshots[task_snap_index - 1]['datetime']) iteration.save() processing_assignments = task.assignments.filter( status=AssignmentStatus.PROCESSING) if processing_assignments.exists(): if processing_assignments.count() > 1: logger.error( 'Task {} has too many processing assignments'.format( task.id)) else: # If task has a processing assignment, create a final # processing iteration processing_assignment = processing_assignments.first() if (not task_snapshots or not processing_assignment.iterations.exists()): final_start_datetime = processing_assignment.start_datetime else: final_start_datetime = task_snapshots[-1]['datetime'] iteration = Iteration.objects.create( assignment=processing_assignment, start_datetime=final_start_datetime, status=IterationStatus.PROCESSING) try: verify_iterations(task.id) except __HOLE__: logger.error('Iterations invalid for task {}.'.format(task.id))
AssertionError
dataset/ETHPy150Open unlimitedlabs/orchestra/orchestra/migrations/0028_snapshots_to_iterations.py/snapshots_to_iterations
def _get_hadoop_bin(self, env): try: return env['HADOOP_BIN'] except __HOLE__: return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'hadoop')
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py/PseudoHdfs4._get_hadoop_bin
def _get_mapred_bin(self, env): try: return env['MAPRED_BIN'] except __HOLE__: return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'mapred')
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py/PseudoHdfs4._get_mapred_bin
def _get_yarn_bin(self, env): try: return env['YARN_BIN'] except __HOLE__: return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'yarn')
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py/PseudoHdfs4._get_yarn_bin
def _get_hdfs_bin(self, env): try: return env['HDFS_BIN'] except __HOLE__: return os.path.join(get_run_root('ext/hadoop/hadoop'), 'bin', 'hdfs')
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/libs/hadoop/src/hadoop/pseudo_hdfs4.py/PseudoHdfs4._get_hdfs_bin
def execute_command(command, working_dir=None): startupinfo = None # hide console window on windows if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW output = None try: output = subprocess.check_output( command, cwd=working_dir, startupinfo=startupinfo ) except (subprocess.CalledProcessError, __HOLE__): # Git will return an error when the given directory # is not a repository, which means that we can ignore this error pass else: output = str(output, encoding="utf-8").strip() return output
AttributeError
dataset/ETHPy150Open Zeeker/sublime-GitConflictResolver/modules/util.py/execute_command
def read_one(self, num): pin_num = int(num) try: pin_config = self.pins[pin_num] return self.pin_response(pin_num, pin_config) except __HOLE__: return None
KeyError
dataset/ETHPy150Open projectweekend/Pi-GPIO-Server/pi_gpio/config/pins.py/PinManager.read_one
def update_value(self, num, value): pin_num = int(num) try: self.pins[pin_num] self.gpio.output(pin_num, value) return True except __HOLE__: return None
KeyError
dataset/ETHPy150Open projectweekend/Pi-GPIO-Server/pi_gpio/config/pins.py/PinManager.update_value
def decode(self, data): """ Decoder implementation of the Bencode algorithm @param data: The encoded data @type data: str @note: This is a convenience wrapper for the recursive decoding algorithm, C{_decodeRecursive} @return: The decoded data, as a native Python type @rtype: int, list, dict or str """ if len(data) == 0: raise DecodeError, 'Cannot decode empty string' try: return self._decodeRecursive(data)[0] except __HOLE__ as e: raise DecodeError, e.message
ValueError
dataset/ETHPy150Open lbryio/lbry/lbrynet/dht/encoding.py/Bencode.decode
@staticmethod def _decodeRecursive(data, startIndex=0): """ Actual implementation of the recursive Bencode algorithm Do not call this; use C{decode()} instead """ if data[startIndex] == 'i': endPos = data[startIndex:].find('e')+startIndex return (int(data[startIndex+1:endPos]), endPos+1) elif data[startIndex] == 'l': startIndex += 1 decodedList = [] while data[startIndex] != 'e': listData, startIndex = Bencode._decodeRecursive(data, startIndex) decodedList.append(listData) return (decodedList, startIndex+1) elif data[startIndex] == 'd': startIndex += 1 decodedDict = {} while data[startIndex] != 'e': key, startIndex = Bencode._decodeRecursive(data, startIndex) value, startIndex = Bencode._decodeRecursive(data, startIndex) decodedDict[key] = value return (decodedDict, startIndex) elif data[startIndex] == 'f': # This (float data type) is a non-standard extension to the original Bencode algorithm endPos = data[startIndex:].find('e')+startIndex return (float(data[startIndex+1:endPos]), endPos+1) elif data[startIndex] == 'n': # This (None/NULL data type) is a non-standard extension to the original Bencode algorithm return (None, startIndex+1) else: splitPos = data[startIndex:].find(':')+startIndex try: length = int(data[startIndex:splitPos]) except __HOLE__, e: raise DecodeError, e startIndex = splitPos+1 endPos = startIndex+length bytes = data[startIndex:endPos] return (bytes, endPos)
ValueError
dataset/ETHPy150Open lbryio/lbry/lbrynet/dht/encoding.py/Bencode._decodeRecursive
def __parse_version_from_changelog(): try: deb_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'debian', 'changelog') with open(deb_path, 'r') as changelog: regmatch = re.match(r'python-tortik \((.*)\).*', changelog.readline()) return regmatch.groups()[0] except (IOError, __HOLE__): return None
AttributeError
dataset/ETHPy150Open glibin/tortik/tortik/version.py/__parse_version_from_changelog
def map_blocks(func, *args, **kwargs): """ Map a function across all blocks of a dask array Parameters ---------- func: callable Function to apply to every block in the array args: dask arrays or constants dtype: np.dtype Datatype of resulting array chunks: tuple (optional) chunk shape of resulting blocks if the function does not preserve shape drop_axis: number or iterable (optional) Dimensions lost by the function new_axis: number or iterable (optional) New dimensions created by the function **kwargs: Other keyword arguments to pass to function. Values must be constants (not dask.arrays) You must also specify the chunks and dtype of the resulting array. If you don't then we assume that the resulting array has the same block structure as the input. Examples -------- >>> import dask.array as da >>> x = da.arange(6, chunks=3) >>> x.map_blocks(lambda x: x * 2).compute() array([ 0, 2, 4, 6, 8, 10]) The ``da.map_blocks`` function can also accept multiple arrays >>> d = da.arange(5, chunks=2) >>> e = da.arange(5, chunks=2) >>> f = map_blocks(lambda a, b: a + b**2, d, e) >>> f.compute() array([ 0, 2, 6, 12, 20]) If function changes shape of the blocks then please provide chunks explicitly. >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),)) You have a bit of freedom in specifying chunks. If all of the output chunk sizes are the same, you can provide just that chunk size as a single tuple. >>> a = da.arange(18, chunks=(6,)) >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,)) If the function changes the dimension of the blocks you must specify the created or destroyed dimensions. >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1), ... new_axis=[0, 2]) Map_blocks aligns blocks by block positions without regard to shape. In the following example we have two arrays with the same number of blocks but with different shape and chunk sizes. >>> x = da.arange(1000, chunks=(100,)) >>> y = da.arange(100, chunks=(10,)) The relevant attribute to match is numblocks >>> x.numblocks (10,) >>> y.numblocks (10,) If these must match (up to broadcasting rules) then we can map arbitrary functions across blocks >>> def func(a, b): ... return np.array([a.max(), b.max()]) >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8') dask.array<..., shape=(20,), dtype=int64, chunksize=(2,)> >>> _.compute() array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699, 69, 799, 79, 899, 89, 999, 99]) Your block function can learn where in the array it is if it supports a ``block_id`` keyword argument. This will receive entries like (2, 0, 1), the position of the block in the dask array. >>> def func(block, block_id=None): ... pass You may specify the name of the resulting task in the graph with the optional ``name`` keyword argument. >>> y = x.map_blocks(lambda x: x + 1, name='increment') """ if not callable(func): raise TypeError("First argument must be callable function, not %s\n" "Usage: da.map_blocks(function, x)\n" " or: da.map_blocks(function, x, y, z)" % type(func).__name__) name = kwargs.pop('name', None) name = name or 'map-blocks-%s' % tokenize(func, args, **kwargs) dtype = kwargs.pop('dtype', None) chunks = kwargs.pop('chunks', None) drop_axis = kwargs.pop('drop_axis', []) new_axis = kwargs.pop('new_axis', []) if isinstance(drop_axis, Number): drop_axis = [drop_axis] if isinstance(new_axis, Number): new_axis = [new_axis] arrs = [a for a in args if isinstance(a, Array)] args = [(i, a) for i, a in enumerate(args) if not isinstance(a, Array)] if kwargs: func = partial(func, **kwargs) if args: func = partial_by_order(func, args) arginds = [(a, tuple(range(a.ndim))[::-1]) for a in arrs] numblocks = dict([(a.name, a.numblocks) for a, _ in arginds]) argindsstr = list(concat([(a.name, ind) for a, ind in arginds])) out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1] dsk = top(func, name, out_ind, *argindsstr, numblocks=numblocks) # If func has block_id as an argument then swap out func # for func with block_id partialed in try: spec = getargspec(func) except: spec = None if spec: args = spec.args try: args += spec.kwonlyargs except __HOLE__: pass if 'block_id' in args: for k in dsk.keys(): dsk[k] = (partial(func, block_id=k[1:]),) + dsk[k][1:] numblocks = list(arrs[0].numblocks) if drop_axis: dsk = dict((tuple(k for i, k in enumerate(k) if i - 1 not in drop_axis), v) for k, v in dsk.items()) numblocks = [n for i, n in enumerate(numblocks) if i not in drop_axis] if new_axis: dsk, old_dsk = dict(), dsk for key in old_dsk: new_key = list(key) for i in new_axis: new_key.insert(i + 1, 0) dsk[tuple(new_key)] = old_dsk[key] for i in sorted(new_axis, reverse=False): numblocks.insert(i, 1) if chunks is not None and chunks and not isinstance(chunks[0], tuple): chunks = [nb * (bs,) for nb, bs in zip(numblocks, chunks)] if chunks is not None: chunks = tuple(chunks) else: chunks = broadcast_chunks(*[a.chunks for a in arrs]) return Array(merge(dsk, *[a.dask for a in arrs]), name, chunks, dtype)
AttributeError
dataset/ETHPy150Open dask/dask/dask/array/core.py/map_blocks
def cache(self, store=None, **kwargs): """ Evaluate and cache array Parameters ---------- store: MutableMapping or ndarray-like Place to put computed and cached chunks kwargs: Keyword arguments to pass on to ``get`` function for scheduling Examples -------- This triggers evaluation and store the result in either 1. An ndarray object supporting setitem (see da.store) 2. A MutableMapping like a dict or chest It then returns a new dask array that points to this store. This returns a semantically equivalent dask array. >>> import dask.array as da >>> x = da.arange(5, chunks=2) >>> y = 2*x + 1 >>> z = y.cache() # triggers computation >>> y.compute() # Does entire computation array([1, 3, 5, 7, 9]) >>> z.compute() # Just pulls from store array([1, 3, 5, 7, 9]) You might base a cache off of an array like a numpy array or h5py.Dataset. >>> cache = np.empty(5, dtype=x.dtype) >>> z = y.cache(store=cache) >>> cache array([1, 3, 5, 7, 9]) Or one might use a MutableMapping like a dict or chest >>> cache = dict() >>> z = y.cache(store=cache) >>> cache # doctest: +SKIP {('x', 0): array([1, 3]), ('x', 1): array([5, 7]), ('x', 2): array([9])} """ if store is not None and hasattr(store, 'shape'): self.store(store) return from_array(store, chunks=self.chunks) if store is None: try: from chest import Chest store = Chest() except __HOLE__: if self.nbytes <= 1e9: store = dict() else: raise ValueError("No out-of-core storage found." "Either:\n" "1. Install ``chest``, an out-of-core dictionary\n" "2. Provide an on-disk array like an h5py.Dataset") # pragma: no cover if isinstance(store, MutableMapping): name = 'cache-' + tokenize(self) dsk = dict(((name, k[1:]), (operator.setitem, store, (tuple, list(k)), k)) for k in core.flatten(self._keys())) Array._get(merge(dsk, self.dask), list(dsk.keys()), **kwargs) dsk2 = dict((k, (operator.getitem, store, (tuple, list(k)))) for k in store) return Array(dsk2, self.name, chunks=self.chunks, dtype=self._dtype)
ImportError
dataset/ETHPy150Open dask/dask/dask/array/core.py/Array.cache
def unpack_singleton(x): """ >>> unpack_singleton([[[[1]]]]) 1 >>> unpack_singleton(np.array(np.datetime64('2000-01-01'))) array(datetime.date(2000, 1, 1), dtype='datetime64[D]') """ while isinstance(x, (list, tuple)): try: x = x[0] except (__HOLE__, TypeError, KeyError): break return x
IndexError
dataset/ETHPy150Open dask/dask/dask/array/core.py/unpack_singleton
def elemwise(op, *args, **kwargs): """ Apply elementwise function across arguments Respects broadcasting rules Examples -------- >>> elemwise(add, x, y) # doctest: +SKIP >>> elemwise(sin, x) # doctest: +SKIP See Also -------- atop """ if not set(['name', 'dtype']).issuperset(kwargs): raise TypeError("%s does not take the following keyword arguments %s" % (op.__name__, str(sorted(set(kwargs) - set(['name', 'dtype']))))) shapes = [getattr(arg, 'shape', ()) for arg in args] out_ndim = len(broadcast_shapes(*shapes)) # Raises ValueError if dimensions mismatch expr_inds = tuple(range(out_ndim))[::-1] arrays = [asarray(a) for a in args if not is_scalar_for_elemwise(a)] other = [(i, a) for i, a in enumerate(args) if is_scalar_for_elemwise(a)] if 'dtype' in kwargs: dt = kwargs['dtype'] elif any(a._dtype is None for a in arrays): dt = None else: # We follow NumPy's rules for dtype promotion, which special cases # scalars and 0d ndarrays (which it considers equivalent) by using # their values to compute the result dtype: # https://github.com/numpy/numpy/issues/6240 # We don't inspect the values of 0d dask arrays, because these could # hold potentially very expensive calculations. vals = [np.empty((1,) * a.ndim, dtype=a.dtype) if not is_scalar_for_elemwise(a) else a for a in args] try: dt = op(*vals).dtype except __HOLE__: dt = None name = kwargs.get('name', None) or 'elemwise-' + tokenize(op, dt, *args) if other: op2 = partial_by_order(op, other) else: op2 = op return atop(op2, expr_inds, *concat((a, tuple(range(a.ndim)[::-1])) for a in arrays), dtype=dt, name=name)
AttributeError
dataset/ETHPy150Open dask/dask/dask/array/core.py/elemwise
def chunks_from_arrays(arrays): """ Chunks tuple from nested list of arrays >>> x = np.array([1, 2]) >>> chunks_from_arrays([x, x]) ((2, 2),) >>> x = np.array([[1, 2]]) >>> chunks_from_arrays([[x], [x]]) ((1, 1), (2,)) >>> x = np.array([[1, 2]]) >>> chunks_from_arrays([[x, x]]) ((1,), (2, 2)) >>> chunks_from_arrays([1, 1]) ((1, 1),) """ if not arrays: return () result = [] dim = 0 def shape(x): try: return x.shape except __HOLE__: return (1,) while isinstance(arrays, (list, tuple)): result.append(tuple(shape(deepfirst(a))[dim] for a in arrays)) arrays = arrays[0] dim += 1 return tuple(result)
AttributeError
dataset/ETHPy150Open dask/dask/dask/array/core.py/chunks_from_arrays
def concatenate3(arrays): """ Recursive np.concatenate Input should be a nested list of numpy arrays arranged in the order they should appear in the array itself. Each array should have the same number of dimensions as the desired output and the nesting of the lists. >>> x = np.array([[1, 2]]) >>> concatenate3([[x, x, x], [x, x, x]]) array([[1, 2, 1, 2, 1, 2], [1, 2, 1, 2, 1, 2]]) >>> concatenate3([[x, x], [x, x], [x, x]]) array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 1, 2]]) """ arrays = concrete(arrays) ndim = ndimlist(arrays) if not ndim: return arrays if not arrays: return np.empty(()) chunks = chunks_from_arrays(arrays) shape = tuple(map(sum, chunks)) def dtype(x): try: return x.dtype except __HOLE__: return type(x) result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays))) for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)): if hasattr(arr, 'ndim'): while arr.ndim < ndim: arr = arr[None, ...] result[idx] = arr return result
AttributeError
dataset/ETHPy150Open dask/dask/dask/array/core.py/concatenate3
def splitquote(line, stopchar=None, lower=False, quotechars = '"\''): """ Fast LineSplitter. Copied from The F2Py Project. """ items = [] i = 0 while 1: try: char = line[i]; i += 1 except __HOLE__: break l = [] l_append = l.append nofslashes = 0 if stopchar is None: # search for string start while 1: if char in quotechars and not nofslashes % 2: stopchar = char i -= 1 break if char=='\\': nofslashes += 1 else: nofslashes = 0 l_append(char) try: char = line[i]; i += 1 except IndexError: break if not l: continue item = ''.join(l) if lower: item = item.lower() items.append(item) continue if char==stopchar: # string starts with quotechar l_append(char) try: char = line[i]; i += 1 except IndexError: if l: item = str(''.join(l)) items.append(item) break # else continued string while 1: if char==stopchar and not nofslashes % 2: l_append(char) stopchar = None break if char=='\\': nofslashes += 1 else: nofslashes = 0 l_append(char) try: char = line[i]; i += 1 except IndexError: break if l: item = str(''.join(l)) items.append(item) return items, stopchar
IndexError
dataset/ETHPy150Open pearu/pylibtiff/libtiff/utils.py/splitquote
def referer(pattern, accept=True, accept_missing=False, error=403, message='Forbidden Referer header.', debug=False): """Raise HTTPError if Referer header does/does not match the given pattern. pattern A regular expression pattern to test against the Referer. accept If True, the Referer must match the pattern; if False, the Referer must NOT match the pattern. accept_missing If True, permit requests with no Referer header. error The HTTP error code to return to the client on failure. message A string to include in the response body on failure. """ try: ref = cherrypy.serving.request.headers['Referer'] match = bool(re.match(pattern, ref)) if debug: cherrypy.log('Referer %r matches %r' % (ref, pattern), 'TOOLS.REFERER') if accept == match: return except __HOLE__: if debug: cherrypy.log('No Referer header', 'TOOLS.REFERER') if accept_missing: return raise cherrypy.HTTPError(error, message)
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/lib/cptools.py/referer
def __bind(self, name, invkind): """Bind (name, invkind) and return a FuncDesc instance or None. Results (even unsuccessful ones) are cached.""" # We could cache the info in the class instead of the # instance, but we would need an additional key for that: # self._iid try: return self._tdesc[(name, invkind)] except __HOLE__: try: descr = self._tcomp.Bind(name, invkind)[1] except comtypes.COMError: info = None else: # Using a separate instance to store interesting # attributes of descr avoids that the typecomp instance is # kept alive... info = FuncDesc(memid=descr.memid, invkind=descr.invkind, cParams=descr.cParams, funckind=descr.funckind) self._tdesc[(name, invkind)] = info return info
KeyError
dataset/ETHPy150Open enthought/comtypes/comtypes/client/lazybind.py/Dispatch.__bind
def __getattr__(self, name): """Get a COM attribute.""" if name.startswith("__") and name.endswith("__"): raise AttributeError(name) # check for propget or method descr = self.__bind(name, DISPATCH_METHOD | DISPATCH_PROPERTYGET) if descr is None: raise AttributeError(name) if descr.invkind == DISPATCH_PROPERTYGET: # DISPATCH_PROPERTYGET if descr.funckind == FUNC_DISPATCH: if descr.cParams == 0: return self._comobj._invoke(descr.memid, descr.invkind, 0) elif descr.funckind == FUNC_PUREVIRTUAL: # FUNC_PUREVIRTUAL descriptions contain the property # itself as a parameter. if descr.cParams == 1: return self._comobj._invoke(descr.memid, descr.invkind, 0) else: raise RuntimeError("funckind %d not yet implemented" % descr.funckind) put = self.__bind(name, DISPATCH_PROPERTYPUT) putref = self.__bind(name, DISPATCH_PROPERTYPUTREF) return NamedProperty(self, descr, put, putref) else: # DISPATCH_METHOD def caller(*args): return self._comobj._invoke(descr.memid, descr.invkind, 0, *args) try: caller.__name__ = name except __HOLE__: # In Python 2.3, __name__ is readonly pass return caller
TypeError
dataset/ETHPy150Open enthought/comtypes/comtypes/client/lazybind.py/Dispatch.__getattr__
def get_cached_mtimes(self, root_folder, use_files_relpaths, get_all=False): location = os.path.join(root_folder, ".git", "harpoon_cached_mtimes.json") sorted_use_files_relpaths = sorted(use_files_relpaths) result = [] if os.path.exists(location): try: result = json.load(open(location)) except (__HOLE__, ValueError) as error: log.warning("Failed to open harpoon cached mtimes\tlocation=%s\terror=%s", location, error) else: if type(result) is not list or not all(type(item) is dict for item in result): log.warning("Harpoon cached mtimes needs to be a list of dictionaries\tlocation=%s\tgot=%s", location, type(result)) result = [] if get_all: return result for item in result: if sorted(item.get("use_files_relpaths", [])) == sorted_use_files_relpaths: return item.get("commit"), item.get("mtimes") return None, {}
TypeError
dataset/ETHPy150Open realestate-com-au/harpoon/harpoon/ship/context.py/ContextBuilder.get_cached_mtimes
def set_cached_mtimes(self, root_folder, first_commit, mtimes, use_files_relpaths): location = os.path.join(root_folder, ".git", "harpoon_cached_mtimes.json") sorted_use_files_relpaths = sorted(use_files_relpaths) current = self.get_cached_mtimes(root_folder, use_files_relpaths, get_all=True) found = False for item in current: if sorted(item.get("use_files_relpaths", [])) == sorted_use_files_relpaths: item["mtimes"] = mtimes item["commit"] = first_commit found = True break if not found: current.append({"commit": first_commit, "mtimes": mtimes, "use_files_relpaths": sorted_use_files_relpaths}) try: log.info("Writing harpoon cached mtimes\tlocation=%s", location) with open(location, "w") as fle: json.dump(current, fle) except (TypeError, ValueError, __HOLE__) as error: log.warning("Failed to dump harpoon mtime cache\tlocation=%s\terror=%s", location, error)
IOError
dataset/ETHPy150Open realestate-com-au/harpoon/harpoon/ship/context.py/ContextBuilder.set_cached_mtimes
def findstring(self, string): """Returns the Enum object given a name string.""" d = self.get_mapping() try: return d[string] except __HOLE__: raise ValueError("Enum string not found.") # common enumerations
KeyError
dataset/ETHPy150Open kdart/pycopia/aid/pycopia/aid.py/Enums.findstring
def sgn(val): """Sign function. Returns -1 if val negative, 0 if zero, and 1 if positive. """ try: return val._sgn_() except __HOLE__: if val == 0: return 0 if val > 0: return 1 else: return -1 # Nice floating point range function from Python snippets
AttributeError
dataset/ETHPy150Open kdart/pycopia/aid/pycopia/aid.py/sgn
def __getattr__(self, name): try: return self.__dict__["_attribs"][name] except __HOLE__: raise AttributeError("Invalid attribute %r" % (name,))
KeyError
dataset/ETHPy150Open kdart/pycopia/aid/pycopia/aid.py/mapstr.__getattr__
def __getattr__(self, name): try: return self.__dict__["_attribs"][name] except __HOLE__: raise AttributeError("Invalid attribute %r" % (name,))
KeyError
dataset/ETHPy150Open kdart/pycopia/aid/pycopia/aid.py/formatstr.__getattr__
def removedups(s): """Return a list of the elements in s, but without duplicates. Thanks to Tim Peters for fast method. """ n = len(s) if n == 0: return [] u = {} try: for x in s: u[x] = 1 except TypeError: del u # move on to the next method else: return list(u.keys()) # We can't hash all the elements. Second fastest is to sort, # which brings the equal elements together; then duplicates are # easy to weed out in a single pass. try: t = list(s) t.sort() except __HOLE__: del t # move on to the next method else: assert n > 0 last = t[0] lasti = i = 1 while i < n: if t[i] != last: t[lasti] = last = t[i] lasti = lasti + 1 i = i + 1 return t[:lasti] # Brute force is all that's left. u = [] for x in s: if x not in u: u.append(x) return u
TypeError
dataset/ETHPy150Open kdart/pycopia/aid/pycopia/aid.py/removedups
def Import(modname): """Improved __import__ function that returns fully initialized subpackages.""" try: return sys.modules[modname] except __HOLE__: pass __import__(modname) return sys.modules[modname]
KeyError
dataset/ETHPy150Open kdart/pycopia/aid/pycopia/aid.py/Import
def read(fname): try: return open(os.path.join(os.path.dirname(__file__), fname), 'r').read() except __HOLE__: return u''
IOError
dataset/ETHPy150Open arteria/django-hijack/setup.py/read
def repeat_until_consistent(self, f, *args, **kwargs): """ Repeatedly call a function with given arguments until AssertionErrors stop or configured number of repetitions are reached. Some backends are eventually consistent, which means results of listing volumes may not reflect actions immediately. So for read-only operations that rely on listing we want to be able to retry. Retry policy can be changed by overriding the ``repeat_retries`` method. @param f: Function to call. @param args: Arguments for ``f``. @param kwargs: Keyword arguments for ``f``. """ for step in self.repeat_retries(): try: return f(*args, **kwargs) except __HOLE__ as e: time.sleep(step) raise e
AssertionError
dataset/ETHPy150Open ClusterHQ/flocker/flocker/node/agents/testtools/_blockdevice.py/IBlockDeviceAPITestsMixin.repeat_until_consistent
def umount_all(root_path): """ Unmount all devices with mount points contained in ``root_path``. :param FilePath root_path: A directory in which to search for mount points. """ def is_under_root(path): try: FilePath(path).segmentsFrom(root_path) except __HOLE__: return False return True partitions_under_root = list(p for p in psutil.disk_partitions() if is_under_root(p.mountpoint)) for partition in partitions_under_root: umount(FilePath(partition.mountpoint))
ValueError
dataset/ETHPy150Open ClusterHQ/flocker/flocker/node/agents/testtools/_blockdevice.py/umount_all
def __init__(self, params): timeout = params.get('timeout', 300) try: timeout = int(timeout) except (__HOLE__, TypeError): timeout = 300 self.default_timeout = timeout
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/core/cache/backends/base.py/BaseCache.__init__
def main_loop(bots, config): if "LOGFILE" in config: logging.basicConfig(filename=config[ "LOGFILE"], level=logging.INFO, format='%(asctime)s %(message)s') try: for bot in bots: bot.init() while True: for bot in bots: # print 'processing', bot bot.process() relay_ins = bot.collect_relay() for xbot in bots: if type(bot) == type(xbot): continue xbot.relay(bot, relay_ins) time.sleep(.2) except __HOLE__: sys.exit(0) except: logging.exception('OOPS')
KeyboardInterrupt
dataset/ETHPy150Open youknowone/slairck/slairck.py/main_loop
def main(): try: import clime except __HOLE__: clime = None if clime: clime.start({'calendar': git_calendar}) else: raise Exception("I need clime.")
ImportError
dataset/ETHPy150Open littleq0903/git-calendar/git_calendar/main.py/main
def __init__(self, name, value=None, schema=None): self.name = name self.value = value self.schema = schema self._status = self.PROPERTY_STATUS_DEFAULT self._required = self.PROPERTY_REQUIRED_DEFAULT # Validate required 'type' property exists try: self.schema['type'] except __HOLE__: msg = (_('Schema definition of "%(pname)s" must have a "type" ' 'attribute.') % dict(pname=self.name)) ExceptionCollector.appendException( InvalidSchemaError(message=msg)) if self.schema: self._load_required_attr_from_schema() self._load_status_attr_from_schema()
KeyError
dataset/ETHPy150Open openstack/tosca-parser/toscaparser/elements/property_definition.py/PropertyDef.__init__
def validate_boards(ctx, param, value): # pylint: disable=W0613 unknown_boards = set(value) - set(get_boards().keys()) try: assert not unknown_boards return value except __HOLE__: raise click.BadParameter( "%s. Please search for the board types using " "`platformio boards` command" % ", ".join(unknown_boards))
AssertionError
dataset/ETHPy150Open platformio/platformio/platformio/commands/init.py/validate_boards
def begin_site(self): in_production = self.site.config.mode.startswith('prod') if not in_production: self.logger.info('Generating draft posts as the site is' 'not in production mode.') return for resource in self.site.content.walk_resources(): if not resource.is_processable: continue try: is_draft = resource.meta.is_draft except __HOLE__: is_draft = False if is_draft: resource.is_processable = False self.logger.info( '%s is%s draft' % (resource, '' if is_draft else ' not'))
AttributeError
dataset/ETHPy150Open hyde/hyde/hyde/ext/plugins/blog.py/DraftsPlugin.begin_site
def get_actions_urls(model, url_name=None, **kwargs): """ Get automatically the actions urls for a model. """ from ionyweb.administration.actions.views import (ActionAdminListView, ActionAdminDetailView, ActionAdminOrderView) app_label = model._meta.app_label # Url Name if url_name is None: module_name = model._meta.module_name else: module_name = url_name # Specific Detail View try: DetailView = kwargs.pop('detail_view_class') except KeyError: DetailView = ActionAdminDetailView # Specific List View try: ListView = kwargs.pop('list_view_class') except __HOLE__: ListView = ActionAdminListView # Prefix URL try: prefix_url = kwargs.pop('prefix_url') except KeyError: prefix_url = '' # Args Management def filter_kwargs(exclude_args, kwargs): new_kwargs = kwargs.copy() for arg in exclude_args: if arg in new_kwargs: del new_kwargs[arg] return new_kwargs list_kwargs = filter_kwargs(['form_class'], kwargs) detail_kwargs = filter_kwargs(['ordering', 'sortable', 'sortable_field', 'list_display'], kwargs) urlpatterns = patterns('', # Change List Action url(r'^%s%s_list/$' % (prefix_url, module_name), ListView.as_view(model=model, **list_kwargs), name='wa-%s-%s_list-action' % (app_label, module_name)), # Object Detail Action url(r'^%s%s/(?P<object_pk>[0-9]*)/$' % (prefix_url, module_name), DetailView.as_view(model=model, **detail_kwargs), name='wa-%s-%s-action' % (app_label, module_name)), # Object Creation Action url(r'^%s%s/$' % (prefix_url, module_name), DetailView.as_view(model=model, **detail_kwargs), name='wa-%s-%s-creation-action' % (app_label, module_name)), ) if kwargs.get('sortable', False): urlpatterns += patterns('', url(r'^%s%s_order/$' % (prefix_url, module_name), ActionAdminOrderView.as_view(model=model), name='wa-%s-%s_order-action' % (app_label, module_name)), ) return urlpatterns
KeyError
dataset/ETHPy150Open ionyse/ionyweb/ionyweb/administration/actions/utils.py/get_actions_urls
def get_actions_for_object_model(obj, relation_id=None): # -- DEPRECATED -- # Only for backward compatibilty if hasattr(obj, 'get_actions'): # All app and plugin define a get_actions method, # so we check if the method is overloaded... get_actions = getattr(obj, 'get_actions') if get_actions(): # Actions actions_html = [] # Relation ID if not relation_id: relation_id = obj.get_relation_id() for action in get_actions(): actions_html.append(u'<a onclick="%s(\'%s\'); return false;">%s</a>' % ( action[1], relation_id, action[0])) # Title title = obj.__class__.get_name() return {'title': title, 'list': actions_html} # -- NEW SYNTAX -- # Only if object model defines ActionsAdmin inner class. if hasattr(obj, 'ActionsAdmin'): # Get title of actions group (or default value) title = getattr(obj.ActionsAdmin, 'title', _(u'Actions')) # Get relation_id for the first param of WA urls if not relation_id: related_obj = obj if not isinstance(obj, AbstractPageApp) and not isinstance(obj, AbstractPlugin): related_object_path = getattr(obj.ActionsAdmin, 'related_object', 'app') for item in related_object_path.split('__'): related_obj = getattr(related_obj, item) relation_id = related_obj.get_relation_id() # Make actions list actions_html = [] for action in getattr(obj.ActionsAdmin, 'actions_list', []): # Formating each argument (in string) # in order to provide the callback function args_formated = u'' for arg in action.get('args', []): try: args_formated += u', %s' % getattr(obj, arg) except __HOLE__: pass actions_html.append( u'<a onclick="%s(\'%s\'%s); return false;">%s</a>' % ( action.get('callback'), relation_id, args_formated, action.get('title')) ) return {'title': title, 'list': actions_html} return {}
AttributeError
dataset/ETHPy150Open ionyse/ionyweb/ionyweb/administration/actions/utils.py/get_actions_for_object_model
def __str__(self): try: return self.plugin_class.get_identifier(self) except __HOLE__: return str(self.image)
AttributeError
dataset/ETHPy150Open jrief/djangocms-cascade/cmsplugin_cascade/mixins.py/ImagePropertyMixin.__str__
@property def image(self): if not hasattr(self, '_image_model'): try: Model = apps.get_model(*self.glossary['image']['model'].split('.')) self._image_model = Model.objects.get(pk=self.glossary['image']['pk']) except (__HOLE__, ObjectDoesNotExist): self._image_model = None return self._image_model
KeyError
dataset/ETHPy150Open jrief/djangocms-cascade/cmsplugin_cascade/mixins.py/ImagePropertyMixin.image
def __init__(self, swift_proxy=None, swift_proxy_storage_path=None, swift_proxy_cdn_path=None, attempts=5, eventlet=None, chunk_size=65536, verbose=None, verbose_id='', direct_object_ring=None): super(DirectClient, self).__init__() self.storage_path = swift_proxy_storage_path self.cdn_path = swift_proxy_cdn_path self.attempts = attempts self.chunk_size = chunk_size if verbose: self.verbose = lambda m, *a, **k: verbose( self._verbose_id + m, *a, **k) else: self.verbose = lambda *a, **k: None self.verbose_id = verbose_id self._verbose_id = self.verbose_id if self._verbose_id: self._verbose_id += ' ' self.swift_proxy = swift_proxy if not swift_proxy: self.verbose('Creating default proxy instance.') import swift.proxy.server from swiftly.client.localmemcache import LocalMemcache from swiftly.client.nulllogger import NullLogger try: import swift.common.swob self.Request = swift.common.swob.Request except ImportError: import webob self.Request = webob.Request self.swift_proxy = swift.proxy.server.Application( {}, memcache=LocalMemcache(), logger=NullLogger()) self.oring = None def get_oring(*args): return self.oring if direct_object_ring: self.oring = swift.common.ring.ring.Ring(direct_object_ring) self.swift_proxy.get_object_ring = get_oring if eventlet is None: try: import eventlet # Eventlet 0.11.0 fixed the CPU bug if eventlet.__version__ >= '0.11.0': eventlet = True except __HOLE__: pass if eventlet: try: import eventlet self.sleep = eventlet.sleep except ImportError: import time self.sleep = time.sleep else: import time self.sleep = time.sleep
ImportError
dataset/ETHPy150Open gholt/swiftly/swiftly/client/directclient.py/DirectClient.__init__
def request(self, method, path, contents, headers, decode_json=False, stream=False, query=None, cdn=False): """ See :py:func:`swiftly.client.client.Client.request` """ if query: path += '?' + '&'.join( ('%s=%s' % (quote(k), quote(v)) if v else quote(k)) for k, v in sorted(six.iteritems(query))) reset_func = self._default_reset_func if isinstance(contents, six.string_types): contents = StringIO(contents) tell = getattr(contents, 'tell', None) seek = getattr(contents, 'seek', None) if tell and seek: try: orig_pos = tell() reset_func = lambda: seek(orig_pos) except Exception: tell = seek = None elif not contents: reset_func = lambda: None status = 0 reason = 'Unknown' attempt = 0 while attempt < self.attempts: attempt += 1 if cdn: conn_path = self.cdn_path else: conn_path = self.storage_path titled_headers = dict((k.title(), v) for k, v in six.iteritems({ 'User-Agent': self.user_agent})) if headers: titled_headers.update( (k.title(), v) for k, v in six.iteritems(headers)) resp = None if not hasattr(contents, 'read'): if method not in self.no_content_methods and contents and \ 'Content-Length' not in titled_headers and \ 'Transfer-Encoding' not in titled_headers: titled_headers['Content-Length'] = str( len(contents or '')) req = self.Request.blank( conn_path + path, environ={'REQUEST_METHOD': method, 'swift_owner': True}, headers=titled_headers, body=contents) verbose_headers = ' '.join( '%s: %s' % (k, v) for k, v in six.iteritems(titled_headers)) self.verbose( '> %s %s %s', method, conn_path + path, verbose_headers) resp = req.get_response(self.swift_proxy) else: req = self.Request.blank( conn_path + path, environ={'REQUEST_METHOD': method, 'swift_owner': True}, headers=titled_headers) content_length = None for h, v in six.iteritems(titled_headers): if h.lower() == 'content-length': content_length = int(v) req.headers[h] = v if method not in self.no_content_methods and \ content_length is None: titled_headers['Transfer-Encoding'] = 'chunked' req.headers['Transfer-Encoding'] = 'chunked' else: req.content_length = content_length req.body_file = contents verbose_headers = ' '.join( '%s: %s' % (k, v) for k, v in six.iteritems(titled_headers)) self.verbose( '> %s %s %s', method, conn_path + path, verbose_headers) resp = req.get_response(self.swift_proxy) status = resp.status_int reason = resp.status.split(' ', 1)[1] hdrs = headers_to_dict(resp.headers.items()) if stream: def iter_reader(size=-1): if size == -1: return ''.join(resp.app_iter) else: try: return next(resp.app_iter) except __HOLE__: return '' iter_reader.read = iter_reader value = iter_reader else: value = resp.body self.verbose('< %s %s', status, reason) if status and status // 100 != 5: if not stream and decode_json and status // 100 == 2: if value: value = json.loads(value) else: value = None return (status, reason, hdrs, value) if reset_func: reset_func() self.sleep(2 ** attempt) raise Exception('%s %s failed: %s %s' % (method, path, status, reason))
StopIteration
dataset/ETHPy150Open gholt/swiftly/swiftly/client/directclient.py/DirectClient.request
def _import(module, reload="False"): """ Creates a global translation dictionary for module. The argument module has to be one of the following strings: "math", "mpmath", "numpy", "sympy". These dictionaries map names of python functions to their equivalent in other modules. """ from sympy.external import import_module try: namespace, namespace_default, translations, import_commands = MODULES[ module] except __HOLE__: raise NameError( "'%s' module can't be used for lambdification" % module) # Clear namespace or exit if namespace != namespace_default: # The namespace was already generated, don't do it again if not forced. if reload: namespace.clear() namespace.update(namespace_default) else: return for import_command in import_commands: if import_command.startswith('import_module'): module = eval(import_command) if module is not None: namespace.update(module.__dict__) continue else: try: exec_(import_command, {}, namespace) continue except ImportError: pass raise ImportError( "can't import '%s' with '%s' command" % (module, import_command)) # Add translated names to namespace for sympyname, translation in translations.items(): namespace[sympyname] = namespace[translation] # For computing the modulus of a sympy expression we use the builtin abs # function, instead of the previously used fabs function for all # translation modules. This is because the fabs function in the math # module does not accept complex valued arguments. (see issue 9474). The # only exception, where we don't use the builtin abs function is the # mpmath translation module, because mpmath.fabs returns mpf objects in # contrast to abs(). if 'Abs' not in namespace: namespace['Abs'] = abs
KeyError
dataset/ETHPy150Open sympy/sympy/sympy/utilities/lambdify.py/_import
@doctest_depends_on(modules=('numpy')) def lambdify(args, expr, modules=None, printer=None, use_imps=True, dummify=True): """ Returns a lambda function for fast calculation of numerical values. If not specified differently by the user, SymPy functions are replaced as far as possible by either python-math, numpy (if available) or mpmath functions - exactly in this order. To change this behavior, the "modules" argument can be used. It accepts: - the strings "math", "mpmath", "numpy", "numexpr", "sympy" - any modules (e.g. math) - dictionaries that map names of sympy functions to arbitrary functions - lists that contain a mix of the arguments above, with higher priority given to entries appearing first. The default behavior is to substitute all arguments in the provided expression with dummy symbols. This allows for applied functions (e.g. f(t)) to be supplied as arguments. Call the function with dummify=False if dummy substitution is unwanted (and `args` is not a string). If you want to view the lambdified function or provide "sympy" as the module, you should probably set dummify=False. For functions involving large array calculations, numexpr can provide a significant speedup over numpy. Please note that the available functions for numexpr are more limited than numpy but can be expanded with implemented_function and user defined subclasses of Function. If specified, numexpr may be the only option in modules. The official list of numexpr functions can be found at: https://github.com/pydata/numexpr#supported-functions In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix`` by default. As of release 1.0 ``numpy.array`` is the default. To get the old default behavior you must pass in ``[{'ImmutableMatrix': numpy.matrix}, 'numpy']`` to the ``modules`` kwarg. >>> from sympy import lambdify, Matrix >>> from sympy.abc import x, y >>> import numpy >>> array2mat = [{'ImmutableMatrix': numpy.matrix}, 'numpy'] >>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat) >>> f(1, 2) matrix([[1], [2]]) Usage ===== (1) Use one of the provided modules: >>> from sympy import sin, tan, gamma >>> from sympy.utilities.lambdify import lambdastr >>> from sympy.abc import x, y >>> f = lambdify(x, sin(x), "math") Attention: Functions that are not in the math module will throw a name error when the lambda function is evaluated! So this would be better: >>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy")) (2) Use some other module: >>> import numpy >>> f = lambdify((x,y), tan(x*y), numpy) Attention: There are naming differences between numpy and sympy. So if you simply take the numpy module, e.g. sympy.atan will not be translated to numpy.arctan. Use the modified module instead by passing the string "numpy": >>> f = lambdify((x,y), tan(x*y), "numpy") >>> f(1, 2) -2.18503986326 >>> from numpy import array >>> f(array([1, 2, 3]), array([2, 3, 5])) [-2.18503986 -0.29100619 -0.8559934 ] (3) Use a dictionary defining custom functions: >>> def my_cool_function(x): return 'sin(%s) is cool' % x >>> myfuncs = {"sin" : my_cool_function} >>> f = lambdify(x, sin(x), myfuncs); f(1) 'sin(1) is cool' Examples ======== >>> from sympy.utilities.lambdify import implemented_function >>> from sympy import sqrt, sin, Matrix >>> from sympy import Function >>> from sympy.abc import w, x, y, z >>> f = lambdify(x, x**2) >>> f(2) 4 >>> f = lambdify((x, y, z), [z, y, x]) >>> f(1,2,3) [3, 2, 1] >>> f = lambdify(x, sqrt(x)) >>> f(4) 2.0 >>> f = lambdify((x, y), sin(x*y)**2) >>> f(0, 5) 0.0 >>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy') >>> row(1, 2) Matrix([[1, 3]]) Tuple arguments are handled and the lambdified function should be called with the same type of arguments as were used to create the function.: >>> f = lambdify((x, (y, z)), x + y) >>> f(1, (2, 4)) 3 A more robust way of handling this is to always work with flattened arguments: >>> from sympy.utilities.iterables import flatten >>> args = w, (x, (y, z)) >>> vals = 1, (2, (3, 4)) >>> f = lambdify(flatten(args), w + x + y + z) >>> f(*flatten(vals)) 10 Functions present in `expr` can also carry their own numerical implementations, in a callable attached to the ``_imp_`` attribute. Usually you attach this using the ``implemented_function`` factory: >>> f = implemented_function(Function('f'), lambda x: x+1) >>> func = lambdify(x, f(x)) >>> func(4) 5 ``lambdify`` always prefers ``_imp_`` implementations to implementations in other namespaces, unless the ``use_imps`` input parameter is False. """ from sympy.core.symbol import Symbol from sympy.utilities.iterables import flatten # If the user hasn't specified any modules, use what is available. module_provided = True if modules is None: module_provided = False # Use either numpy (if available) or python.math where possible. # XXX: This leads to different behaviour on different systems and # might be the reason for irreproducible errors. modules = ["math", "mpmath", "sympy"] #Attempt to import numpy try: _import("numpy") except __HOLE__: pass else: modules.insert(1, "numpy") # Get the needed namespaces. namespaces = [] # First find any function implementations if use_imps: namespaces.append(_imp_namespace(expr)) # Check for dict before iterating if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'): namespaces.append(modules) else: # consistency check if _module_present('numexpr', modules) and len(modules) > 1: raise TypeError("numexpr must be the only item in 'modules'") namespaces += list(modules) # fill namespace with first having highest priority namespace = {} for m in namespaces[::-1]: buf = _get_namespace(m) namespace.update(buf) if hasattr(expr, "atoms"): #Try if you can extract symbols from the expression. #Move on if expr.atoms in not implemented. syms = expr.atoms(Symbol) for term in syms: namespace.update({str(term): term}) if _module_present('numpy',namespaces) and printer is None: #XXX: This has to be done here because of circular imports from sympy.printing.lambdarepr import NumPyPrinter as printer if _module_present('numexpr',namespaces) and printer is None: #XXX: This has to be done here because of circular imports from sympy.printing.lambdarepr import NumExprPrinter as printer # Get the names of the args, for creating a docstring if not iterable(args): args = (args,) names = [] # Grab the callers frame, for getting the names by inspection (if needed) callers_local_vars = inspect.currentframe().f_back.f_locals.items() for n, var in enumerate(args): if hasattr(var, 'name'): names.append(var.name) else: # It's an iterable. Try to get name by inspection of calling frame. name_list = [var_name for var_name, var_val in callers_local_vars if var_val is var] if len(name_list) == 1: names.append(name_list[0]) else: # Cannot infer name with certainty. arg_# will have to do. names.append('arg_' + str(n)) # Create lambda function. lstr = lambdastr(args, expr, printer=printer, dummify=dummify) flat = '__flatten_args__' if flat in lstr: namespace.update({flat: flatten}) # Provide lambda expression with builtins, and compatible implementation of range namespace.update({'builtins':builtins, 'range':range}) func = eval(lstr, namespace) # Apply the docstring sig = "func({0})".format(", ".join(str(i) for i in names)) sig = textwrap.fill(sig, subsequent_indent=' '*8) expr_str = str(expr) if len(expr_str) > 78: expr_str = textwrap.wrap(expr_str, 75)[0] + '...' func.__doc__ = ("Created with lambdify. Signature:\n\n{sig}\n\n" "Expression:\n\n{expr}").format(sig=sig, expr=expr_str) return func
ImportError
dataset/ETHPy150Open sympy/sympy/sympy/utilities/lambdify.py/lambdify
def separate_users(node, user_ids): """Separates users into ones with permissions and ones without given a list. :param node: Node to separate based on permissions :param user_ids: List of ids, will also take and return User instances :return: list of subbed, list of removed user ids """ removed = [] subbed = [] for user_id in user_ids: try: user = User.load(user_id) except __HOLE__: user = user_id if node.has_permission(user, 'read'): subbed.append(user_id) else: removed.append(user_id) return subbed, removed
TypeError
dataset/ETHPy150Open CenterForOpenScience/osf.io/website/notifications/utils.py/separate_users
def test_careduce(): """ test sum pattern 1, 11, 10, 01, 001, 010, 100, 110, 011, 111, 0011, 0101, 0111, 1011, 1111 test sum pattern implemented with reshape: 1000, 0100, 0010, 0001, 11111 others implemented by reshape that are not tested 0011,0101,0110,1001,1010,1100 1110,1101,1011 TODO: test with broadcast We test with the pre_scalar_op sqr in all cases. This cover all code, with and without it the pre_scalar_op. """ for scalar_op, careduce_op in [ (theano.scalar.mul, tensor.elemwise.CAReduceDtype), (theano.scalar.add, tensor.elemwise.CAReduceDtype), (theano.scalar.maximum, tensor.CAReduce), (theano.scalar.minimum, tensor.CAReduce) # The following 2 cases could work if the scalar_op.c_code work with float* dtype. # Currently we have this error: # error: invalid operands of types 'npy_float32' and 'npy_float32' to binary 'operator&' #(theano.scalar.and_, tensor.elemwise.CAReduce), #(theano.scalar.or_, tensor.elemwise.CAReduce), ]: for shape, pattern in [((1, 1), (1,)), ((1, 0), (1,)), ((0, 1), (1,)), ((0, 0), (1,)), ((0, 0, 0), (1, 2)), ((0, 0, 0, 0), (1, 2, 3)), ((2, 1), (1,)), ((1, 2), (1,)), ((100, 3, 1300), [1]), ((0,), [0]), ((5,), [0]), ((0, 0), [0, 1]), ((1, 0), [0, 1]), ((5, 4), [0, 1]), ((33, 31), [0, 1]), ((5, 4), [1]), ((5, 4), [0]), # need something bigger then 32 for some opt test. ((5, 4, 3), [0]), ((5, 4, 3), [1]), ((5, 4, 3), [0, 1]), ((5, 4, 3), [2]), ((5, 4, 3), [1, 2]), ((5, 4, 3), [0, 1, 2]), ((0, 0, 0, 0), [0, 1, 2, 3]), ((5, 4, 3, 20), [2, 3]), ((5, 4, 3, 2), [0, 1, 2, 3]), ((5, 4, 3, 2), [0, 2, 3]), ((5, 4, 3, 2), [1, 2, 3]), ((5, 4, 3, 10, 11), [1, 2]), ((5, 4, 3, 20), [2, 3]), ((5, 4, 3, 2), [0, 1, 2, 3]), ((5, 4, 3, 2), [0, 2, 3]), ((5, 4, 3, 2), [1, 2, 3]), # test shape bigger then 4096 on each dimension to make sure that we work correctly when we don't have enough thread/block in each dimensions ((4100, 3), [0]), ((3, 4101), [0]), # 10 ((1024, 33), [0]), ((33, 1024), [0]), # 10 ((1025, 33), [0]), ((33, 1025), [0]), # 10 ((4100, 3), [1]), ((3, 4101), [1]), # 01 ((1024, 33), [1]), ((33, 1024), [1]), # 01 ((1025, 33), [1]), ((33, 1025), [1]), # 01 ((4100, 3), [0, 1]), ((3, 4101), [0, 1]), # 11 ((1024, 33), [0, 1]), ((33, 1024), [0, 1]), # 01 ((1025, 33), [0, 1]), ((33, 1025), [0, 1]), # 01 ((4100, 4, 3), [0]), ((5, 4100, 3), [0]), ((5, 4, 4100), [0]), ((3, 65536, 1), [0]), # 100 ((4100, 4, 3), [1]), ((5, 4100, 3), [1]), ((5, 4, 4100), [1]), # 010 ((4100, 4, 3), [2]), ((5, 4100, 3), [2]), ((5, 4, 4100), [2]), # 001 ((4100, 4, 3), [0, 1]), ((5, 4100, 3), [0, 1]), ((5, 4, 4100), [0, 1]), # 110 ((4100, 4, 3), [1, 2]), ((5, 4100, 3), [1, 2]), ((5, 4, 4100), [1, 2]), # 011 ((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]), ((4100, 4, 3), [0, 1, 2]), ((5, 4100, 3), [0, 1, 2]), ((5, 4, 4100), [0, 1, 2]), # 111 ((65, 4, 3), [0, 1, 2]), ((5, 65, 3), [0, 1, 2]), ((5, 4, 65), [0, 1, 2]), # 111 # test pattern implemented by reshape ((4100, 4, 3, 2), [0]), ((4, 4100, 3, 2), [0]), ((4, 3, 4100, 2), [0]), ((4, 3, 2, 4100), [0]), # 1000 ((4100, 4, 3, 2), [1]), ((4, 4100, 3, 2), [1]), ((4, 3, 4100, 2), [1]), ((4, 3, 2, 4100), [1]), # 0100 ((4100, 4, 3, 2), [2]), ((4, 4100, 3, 2), [2]), ((4, 3, 4100, 2), [2]), ((4, 3, 2, 4100), [2]), # 0010 ((4100, 4, 3, 2), [3]), ((4, 4100, 3, 2), [3]), ((4, 3, 4100, 2), [3]), ((4, 3, 2, 4100), [3]), # 0001 # reduce over 2d ((4100, 4, 3, 2), [2, 3]), ((4, 4100, 3, 2), [2, 3]), ((4, 3, 4100, 2), [2, 3]), ((4, 3, 2, 4100), [2, 3]), # 0011 ((4100, 4, 3, 2), [1, 3]), ((4, 4100, 3, 2), [1, 3]), ((4, 3, 4100, 2), [1, 3]), ((4, 3, 2, 4100), [1, 3]), # 0101 ((4100, 4, 3, 2), [1, 2]), ((4, 4100, 3, 2), [1, 2]), ((4, 3, 4100, 2), [1, 2]), ((4, 3, 2, 4100), [1, 2]), # 0110 ((4100,4,3,2),[0,3]),((4,4100,3,2),[0,3]),((4,3,4100,2),[0,3]),((4,3,2,4100),[0,3]),#1001 # ((4100,4,3,2),[0,2]),((4,4100,3,2),[0,2]),((4,3,4100,2),[0,2]),((4,3,2,4100),[0,2]),#1010 not implemented ((4100, 4, 3, 2), [0, 1]), ((4, 4100, 3, 2), [0, 1]), ((4, 3, 4100, 2), [0, 1]), ((4, 3, 2, 4100), [0, 1]), # 1100 # reduce over 3d # 3d not tested: 1101, 1110, 1111 ((4100,4,3,2),[0,1,3]),((4,4100,3,2),[0,1,3]),((4,3,4100,2),[0,1,3]),((4,3,2,4100),[0,1,3]),#1101 ((4100, 4, 3, 2), [0, 1, 2]), ((4, 4100, 3, 2), [0, 1, 2]), ((4, 3, 4100, 2), [0, 1, 2]), ((4, 3, 2, 4100), [0, 1, 2]), # 1110 ((4100, 4, 3, 2), [0, 2, 3]), ((4, 4100, 3, 2), [0, 2, 3]), ((4, 3, 4100, 2), [0, 2, 3]), # ((4,3,2,4100),[0,2,3]),#1011 ((4100, 4, 3, 2), [1, 2, 3]), ((4, 4100, 3, 2), [1, 2, 3]), ((4, 3, 4100, 2), [1, 2, 3]), ((4, 3, 2, 4100), [1, 2, 3]), # 0111 ((65, 4, 3, 2), [1, 2, 3]), ((4, 65, 3, 2), [1, 2, 3]), ((4, 3, 65, 2), [1, 2, 3]), ((4, 3, 2, 65), [1, 2, 3]), # 0111 # reduce over 4d ((4100, 2, 3, 4), [0, 1, 2, 3]), ((2, 4100, 3, 4), [0, 1, 2, 3]), ((2, 3, 4100, 4), [0, 1, 2, 3]), ((2, 3, 4, 4100), [0, 1, 2, 3]), ((128, 1, 3, 3), [0, 1, 2, 3]), # 1111 # reduce over 5d ((1100, 2, 3, 4, 5), [0, 1, 2, 3, 4]), ((2, 1100, 3, 4, 5), [0, 1, 2, 3, 4]), ((2, 3, 1100, 4, 5), [0, 1, 2, 3, 4]), ((2, 3, 4, 1100, 5), [0, 1, 2, 3, 4]), ((2, 3, 4, 5, 1100), [0, 1, 2, 3, 4]), # 11111 ]: op = careduce_op(scalar_op, axis=pattern) pat = tensor_pattern_to_gpu_pattern(shape, pattern) a = tensor.TensorType('float32', (False,) * len(shape))() b = op(a*a) val = numpy.random.rand(numpy.prod(shape)).reshape(shape) # val = numpy.ones(shape) # val = numpy.arange(numpy.prod(shape)).reshape(shape) val = theano._asarray(val, dtype='float32') f = theano.function([a], b, mode=mode_with_gpu) f2 = theano.function([a], b, mode=mode_without_gpu) assert tcn.GpuCAReduce in [x.op.__class__ for x in f.maker.fgraph.toposort()], ( scalar_op, shape, pattern) if tcn.GpuElemwise in [x.op.__class__ for x in f.maker.fgraph.toposort()]: assert tcn.GpuReshape in [x.op.__class__ for x in f.maker.fgraph.toposort()] assert op.__class__ in [x.op.__class__ for x in f2.maker.fgraph.toposort()], ( scalar_op, shape, pattern) f_caused_value_error = False try: f_out = f(val) except ValueError as e: exc = e f_caused_value_error = True except __HOLE__: if (numpy.prod(shape) == 0 and getattr(scalar_op, 'identity', None) != 0): continue raise f2_caused_value_error = False try: f2_out = f2(val) except ValueError as e: exc2 = e f2_caused_value_error = True if f_caused_value_error != f2_caused_value_error: if f_caused_value_error: print('f caused this value error:') print(exc) else: print('f did not raise a value error, but should have') if f2_caused_value_error: print('f2 caused this value error:') print(exc2) else: print('f should not have raised a value error') print('shape was: ', shape) print('pattern was: ', pattern) assert False try: # We raise the error threashold as we sum big matrix # and this cause small rounding difference with some seed # example in debug mode with unittests.rseed=9275 orig_rtol = theano.tensor.basic.float32_rtol theano.tensor.basic.float32_rtol = 2e-5 assert _allclose(f_out, f2_out), ('shape', shape, 'pattern', pattern, scalar_op, sum([shape[i] for i in pattern]), f2(val), f(val), val) finally: theano.tensor.basic.float32_rtol = orig_rtol # test with dimshuffle # we shuffle the 2 outer dims. for shape, pattern in [ # ((5,),[0]), ((5, 4), [0, 1]), ((5, 4), [0]), ((5, 4, 3), [0]), ((5, 4, 3), [0, 1]), ((5, 4, 3), [2]), ((5, 4, 3), [0, 1, 2]), ((5, 4, 3, 2), [0, 1, 2, 3]), ((5, 4, 3, 2), [0, 2, 3]), ((128, 1, 3, 3), [0, 1, 2, 3]), ]: op = careduce_op(scalar_op, axis=pattern) pat = tensor_pattern_to_gpu_pattern(shape, pattern) a = tensor.TensorType('float32', (False,) * len(shape))() dim_pattern = list(range(len(shape))) dim_pattern[0] = 1 dim_pattern[1] = 0 a = a.dimshuffle(dim_pattern) b = op(a*a) val = numpy.random.rand(numpy.prod(shape)).reshape(shape) # val = numpy.ones(shape) # val = numpy.arange(numpy.prod(shape)).reshape(shape) val = theano._asarray(val, dtype='float32') f = theano.function([a], b, mode=mode_with_gpu) f2 = theano.function([a], b, mode=mode_without_gpu) assert tcn.GpuCAReduce in [x.op.__class__ for x in f.maker.fgraph.toposort()], ( scalar_op, shape, pattern) assert tcn.GpuElemwise not in [x.op.__class__ for x in f.maker.fgraph.toposort()] assert op.__class__ in [x.op.__class__ for x in f2.maker.fgraph.toposort()], ( scalar_op, shape, pattern) assert _allclose(f2(val), f(val)), ('shape', shape, 'pattern', pattern, scalar_op, sum([shape[i] for i in pattern])) # test with broadcast for shape, pattern in [((5,), [0]), ((5, 4), [0, 1]), ((5, 4), [0]), ((5, 4, 3), [0]), ((5, 4, 3), [0, 1]), ((5, 4, 3), [2]), ((5, 4, 3), [0, 1, 2]), ((5, 4, 3, 2), [0, 1, 2, 3]), ((5, 4, 3, 2), [0, 2, 3]), ((128, 1, 3, 3), [0, 1, 2, 3]), ]: op = careduce_op(scalar_op, axis=pattern) pat = tensor_pattern_to_gpu_pattern(shape, pattern) shape = numpy.asarray(shape) * 2 a = tensor.TensorType('float32', (False,) * len(shape))() a2 = tcn.CudaNdarrayType((False,) * len(shape))() b = op(a*a) b2 = op(a2*a2) val = numpy.random.rand(numpy.prod(shape)).reshape(shape) # val = numpy.ones(shape) # val = numpy.arange(numpy.prod(shape)).reshape(shape) val = theano._asarray(val, dtype='float32') val2 = cuda.CudaNdarray(val) if len(shape) == 1: val = val[::2] val2 = val2[::2] elif len(shape) == 2: val = val[::2, ::2] val2 = val2[::2, ::2] elif len(shape) == 3: val = val[::2, ::2, ::2] val2 = val2[::2, ::2, ::2] elif len(shape) == 4: val = val[::2, ::2, ::2, ::2] val2 = val2[::2, ::2, ::2, ::2] f = theano.function([a], b, mode=mode_without_gpu) f2 = theano.function([a2], b2, mode=mode_with_gpu) assert tcn.GpuCAReduce in [x.op.__class__ for x in f2.maker.fgraph.toposort()], ( scalar_op, shape, pattern) assert tcn.GpuElemwise not in [x.op.__class__ for x in f.maker.fgraph.toposort()] assert op.__class__ in [x.op.__class__ for x in f.maker.fgraph.toposort()], ( scalar_op, shape, pattern) assert _allclose(f2(val2), f(val)), ('shape', shape, 'pattern', pattern, sum([shape[i] for i in pattern]))
NotImplementedError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/sandbox/cuda/tests/test_basic_ops.py/test_careduce
def test_reshape(): a = tcn.CudaNdarrayType((False,))() b = tcn.CudaNdarrayType((False, False))() c = T.reshape(a, [2, 3]) # basic f = theano.function([a], c, mode=mode_with_gpu) fv = f(cuda_ndarray.CudaNdarray(theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32'))) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, B.GpuReshape) for node in topo]) assert numpy.all(fv == numpy.asarray([[0, 1, 2], [3, 4, 5]])) # test that it works without inplace operations a_val = cuda_ndarray.CudaNdarray(theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32')) a_val_copy = cuda_ndarray.CudaNdarray(theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32')) b_val = cuda_ndarray.CudaNdarray(theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32')) f_sub = theano.function([a, b], c - b, mode=mode_with_gpu) topo = f_sub.maker.fgraph.toposort() assert any([isinstance(node.op, B.GpuReshape) for node in topo]) assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy)) # test that it works with inplace operations a_val = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32') a_val_copy = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float32') b_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32') f_sub = theano.function([a, b], c - b, mode=mode_with_gpu) topo = f_sub.maker.fgraph.toposort() assert any([isinstance(node.op, B.GpuReshape) for node in topo]) assert numpy.all(f_sub(a_val, b_val) == 0.0) assert numpy.all(numpy.asarray(a_val) == numpy.asarray(a_val_copy)) # verify gradient def just_vals(v): return T.Reshape(2)(v, theano._asarray([2, 3], dtype='int32')) utt.verify_grad(just_vals, [a_val]) # Test for appropriate handling of -1 indices x = T.tensor3('x') reshp_val = numpy.array([[[1, 0], [0, 1]], [[0, 1], [1, 0]]], dtype='float32') f_reshp = theano.function([x], x.reshape((-1, 1, 1)), mode=mode_with_gpu) y = f_reshp(reshp_val) assert y.shape == (8, 1, 1) dim = T.scalar('dim_val', dtype='int32') f_reshp = theano.function( [x, dim], x.reshape((dim, dim, 1)), mode=mode_with_gpu ) try: f_reshp(reshp_val, 4) raise('Only one -1 is accepted in the new shape') except __HOLE__: pass # Test zero dimensions are allowed x = T.vector('x') f_reshp = theano.function([x], x.reshape((0,100)), mode=mode_with_gpu) assert f_reshp(numpy.ndarray((0,), dtype='float32')).shape == (0,100)
ValueError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/sandbox/cuda/tests/test_basic_ops.py/test_reshape
def test_elemwise_bad_broadcast(): x = cuda.fmatrix('x') y = cuda.fmatrix('y') f = theano.function([x, y], x * y, mode=mode_with_gpu) assert len(f.maker.fgraph.toposort()) == 2 assert isinstance(f.maker.fgraph.toposort()[0].op, cuda.GpuElemwise) assert f.maker.fgraph.toposort()[1].op == cuda.host_from_gpu try: f(rand_cuda_ndarray((10, 3)), rand_cuda_ndarray((10, 1))) except __HOLE__: pass else: raise Exception("Theano should have raised an error")
ValueError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/sandbox/cuda/tests/test_basic_ops.py/test_elemwise_bad_broadcast
def test_gpujoin_assert_cndas(): # this will end up being an ndarray, as it's float64 _a = numpy.asarray([[1, 2], [3, 4]], dtype='float64') a = theano.shared(_a) try: c = cuda.basic_ops.gpu_join(1, a) # can't "assert False" here, as we want the assertion # error from gpu_join except __HOLE__: assert True return assert False
TypeError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/sandbox/cuda/tests/test_basic_ops.py/test_gpujoin_assert_cndas
def get_request_choices(self, request, tbl): """ Return a list of choices for this chooser, using a HttpRequest to build the context. """ from django.contrib.contenttypes.models import ContentType kw = {} # 20120202 if tbl.master_field is not None: rqdata = getrqdata(request) if tbl.master is not None: master = tbl.master else: mt = rqdata.get(constants.URL_PARAM_MASTER_TYPE) try: master = ContentType.objects.get(pk=mt).model_class() except ContentType.DoesNotExist: master = None pk = rqdata.get(constants.URL_PARAM_MASTER_PK, None) if pk and master: try: kw[tbl.master_field.name] = master.objects.get(pk=pk) except __HOLE__: raise Exception( "Invalid primary key %r for %s", pk, master.__name__) except master.DoesNotExist: # todo: ReportRequest should become a subclass of Dialog # and this exception should call dlg.error() raise Exception("There's no %s with primary key %r" % (master.__name__, pk)) for k, v in list(request.GET.items()): kw[str(k)] = v # logger.info( # "20130513 get_request_choices(%r) -> %r", # tbl, kw) for cv in self.converters: kw = cv.convert(**kw) if tbl.known_values: kw.update(tbl.known_values) if False: # removed 20120815 #1114 #~ ar = tbl.request(ui,request,tbl.default_action) if ar.create_kw: kw.update(ar.create_kw) if ar.known_values: kw.update(ar.known_values) if tbl.master_key: kw[tbl.master_key] = ar.master_instance #~ if tbl.known_values: #~ kw.update(tbl.known_values) return self.get_choices(**kw) # 20120918b
ValueError
dataset/ETHPy150Open lsaffre/lino/lino/utils/choosers.py/Chooser.get_request_choices
def handle(self, *args, **options): """Handle the command.""" if len(args) != 1: raise CommandError("You must specify a filename on the command " "line.") filename = args[0] if not os.path.exists(filename): raise CommandError("%s does not exist." % filename) try: import django_reset except __HOLE__: raise CommandError("Before using this command, you need to " "install the 'django-reset' package") confirm = input(""" This will wipe out your existing database prior to loading. It is highly recommended that you have a full SQL database dump in case things go wrong. You should only use this if you're migrating from one type of database to another, with the same version of Review Board on each. Are you sure you want to continue?" Type 'yes' to continue, or 'no' to cancel: """) if confirm != 'yes': return apps = [app.__name__.split('.')[-2] for app in get_apps()] os.system('./reviewboard/manage.py reset --noinput %s' % ' '.join(apps)) transaction_setup = False try: with open(filename, 'r') as f: line = f.readline() m = re.match("^# dbdump v(\d+) - (\d+) objects$", line) if not m: raise CommandError("Unknown dump format\n") version = int(m.group(1)) totalobjs = int(m.group(2)) i = 0 prev_pct = -1 if version != 1: raise CommandError("Unknown dump version\n") transaction.commit_unless_managed() transaction.enter_transaction_management() transaction.managed(True) transaction_setup = True self.stdout.write("Importing new style dump format (v%s)" % version) for line in f: if line[0] == "{": for obj in serializers.deserialize("json", "[%s]" % line): try: obj.save() except Exception as e: self.stderr.write("Error: %s\n" % e) self.stderr.write("Line %s: '%s'" % (i, line)) elif line[0] != "#": self.stderr.write("Junk data on line %s" % i) db.reset_queries() i += 1 pct = (i * 100 / totalobjs) if pct != prev_pct: self.stdout.write(" [%s%%]\r" % pct) self.stdout.flush() prev_pct = pct transaction.commit() transaction.leave_transaction_management() except Exception as e: raise CommandError("Problem installing '%s': %s\n" % (filename, e)) if transaction_setup: transaction.rollback() transaction.leave_transaction_management() self.stdout.write('\nDone.')
ImportError
dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/admin/management/commands/loaddb.py/Command.handle
def collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None): """ Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. Setting fcn1 and/or fcn2 to point to a function rather than None (e.g., stats.sterr, len) will append those results (e.g., the sterr, N) after each calculated mean. cfcn is the collapse function to apply (defaults to mean, defined here in the pstat module to avoid circular imports with stats.py, but harmonicmean or others could be passed). Usage: collapse (listoflists,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None) Returns: a list of lists with all unique permutations of entries appearing in columns ("conditions") specified by keepcols, abutted with the result of cfcn (if cfcn=None, defaults to the mean) of each column specified by collapsecols. """ def collmean (inlist): s = 0 for item in inlist: s = s + item return s/float(len(inlist)) if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols] if type(collapsecols) not in [ListType,TupleType]: collapsecols = [collapsecols] if cfcn == None: cfcn = collmean if keepcols == []: means = [0]*len(collapsecols) for i in range(len(collapsecols)): avgcol = colex(listoflists,collapsecols[i]) means[i] = cfcn(avgcol) if fcn1: try: test = fcn1(avgcol) except: test = 'N/A' means[i] = [means[i], test] if fcn2: try: test = fcn2(avgcol) except: test = 'N/A' try: means[i] = means[i] + [len(avgcol)] except __HOLE__: means[i] = [means[i],len(avgcol)] return means else: values = colex(listoflists,keepcols) uniques = unique(values) uniques.sort() newlist = [] if type(keepcols) not in [ListType,TupleType]: keepcols = [keepcols] for item in uniques: if type(item) not in [ListType,TupleType]: item =[item] tmprows = linexand(listoflists,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if fcn1 <> None: try: test = fcn1(avgcol) except: test = 'N/A' item.append(test) if fcn2 <> None: try: test = fcn2(avgcol) except: test = 'N/A' item.append(test) newlist.append(item) return newlist
TypeError
dataset/ETHPy150Open sunlightlabs/clearspending/completeness/statlib/pstat.py/collapse
def recode (inlist,listmap,cols=None): """ Changes the values in a list to a new set of values (useful when you need to recode data from (e.g.) strings to numbers. cols defaults to None (meaning all columns are recoded). Usage: recode (inlist,listmap,cols=None) cols=recode cols, listmap=2D list Returns: inlist with the appropriate values replaced with new ones """ lst = copy.deepcopy(inlist) if cols != None: if type(cols) not in [ListType,TupleType]: cols = [cols] for col in cols: for row in range(len(lst)): try: idx = colex(listmap,0).index(lst[row][col]) lst[row][col] = listmap[idx][1] except __HOLE__: pass else: for row in range(len(lst)): for col in range(len(lst)): try: idx = colex(listmap,0).index(lst[row][col]) lst[row][col] = listmap[idx][1] except ValueError: pass return lst
ValueError
dataset/ETHPy150Open sunlightlabs/clearspending/completeness/statlib/pstat.py/recode
def sortby(listoflists,sortcols): """ Sorts a list of lists on the column(s) specified in the sequence sortcols. Usage: sortby(listoflists,sortcols) Returns: sorted list, unchanged column ordering """ newlist = abut(colex(listoflists,sortcols),listoflists) newlist.sort() try: numcols = len(sortcols) except __HOLE__: numcols = 1 crit = '[' + str(numcols) + ':]' newlist = colex(newlist,crit) return newlist
TypeError
dataset/ETHPy150Open sunlightlabs/clearspending/completeness/statlib/pstat.py/sortby
def acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None): """ Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If stderror or N of the mean are desired, set either or both parameters to 1. Usage: acollapse (a,keepcols,collapsecols,fcn1=None,fcn2=None,cfcn=None) Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s) of column(s) specified by collapsecols """ def acollmean (inarray): return N.sum(N.ravel(inarray)) if type(keepcols) not in [ListType,TupleType,N.ndarray]: keepcols = [keepcols] if type(collapsecols) not in [ListType,TupleType,N.ndarray]: collapsecols = [collapsecols] if cfcn == None: cfcn = acollmean if keepcols == []: avgcol = acolex(a,collapsecols) means = N.sum(avgcol)/float(len(avgcol)) if fcn1<>None: try: test = fcn1(avgcol) except: test = N.array(['N/A']*len(means)) means = aabut(means,test) if fcn2<>None: try: test = fcn2(avgcol) except: test = N.array(['N/A']*len(means)) means = aabut(means,test) return means else: if type(keepcols) not in [ListType,TupleType,N.ndarray]: keepcols = [keepcols] values = colex(a,keepcols) # so that "item" can be appended (below) uniques = unique(values) # get a LIST, so .sort keeps rows intact uniques.sort() newlist = [] for item in uniques: if type(item) not in [ListType,TupleType,N.ndarray]: item =[item] tmprows = alinexand(a,keepcols,item) for col in collapsecols: avgcol = acolex(tmprows,col) item.append(acollmean(avgcol)) if fcn1<>None: try: test = fcn1(avgcol) except: test = 'N/A' item.append(test) if fcn2<>None: try: test = fcn2(avgcol) except: test = 'N/A' item.append(test) newlist.append(item) try: new_a = N.array(newlist) except __HOLE__: new_a = N.array(newlist,'O') return new_a
TypeError
dataset/ETHPy150Open sunlightlabs/clearspending/completeness/statlib/pstat.py/acollapse
def aunique(inarray): """ Returns unique items in the FIRST dimension of the passed array. Only works on arrays NOT including string items. Usage: aunique (inarray) """ uniques = N.array([inarray[0]]) if len(uniques.shape) == 1: # IF IT'S A 1D ARRAY for item in inarray[1:]: if N.add.reduce(N.equal(uniques,item).ravel()) == 0: try: uniques = N.concatenate([uniques,N.array[N.newaxis,:]]) except TypeError: uniques = N.concatenate([uniques,N.array([item])]) else: # IT MUST BE A 2+D ARRAY if inarray.dtype.char != 'O': # not an Object array for item in inarray[1:]: if not N.sum(N.alltrue(N.equal(uniques,item),1)): try: uniques = N.concatenate( [uniques,item[N.newaxis,:]] ) except TypeError: # the item to add isn't a list uniques = N.concatenate([uniques,N.array([item])]) else: pass # this item is already in the uniques array else: # must be an Object array, alltrue/equal functions don't work for item in inarray[1:]: newflag = 1 for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=> test = N.sum(abs(N.array(map(cmp,item,unq)))) if test == 0: # if item identical to any 1 row in uniques newflag = 0 # then not a novel item to add break if newflag == 1: try: uniques = N.concatenate( [uniques,item[N.newaxis,:]] ) except __HOLE__: # the item to add isn't a list uniques = N.concatenate([uniques,N.array([item])]) return uniques
TypeError
dataset/ETHPy150Open sunlightlabs/clearspending/completeness/statlib/pstat.py/aunique
def __init__(self, filename, width=None, height=None, kind='direct', mask="auto", lazy=1, client=None, target=None): # Client is mandatory. Perhaps move it farther up if we refactor assert client is not None self.__kind=kind if filename.split("://")[0].lower() in ('http','ftp','https'): try: filename2, _ = urllib.urlretrieve(filename) if filename != filename2: client.to_unlink.append(filename2) filename = filename2 except __HOLE__: filename = missing self.filename, self._backend=self.get_backend(filename, client) srcinfo = client, self.filename if kind == 'percentage_of_container': self.image=self._backend(self.filename, width, height, 'direct', mask, lazy, srcinfo) self.image.drawWidth=width self.image.drawHeight=height self.__width=width self.__height=height else: self.image=self._backend(self.filename, width, height, kind, mask, lazy, srcinfo) self.__ratio=float(self.image.imageWidth)/self.image.imageHeight self.__wrappedonce=False self.target = target
IOError
dataset/ETHPy150Open rst2pdf/rst2pdf/rst2pdf/image.py/MyImage.__init__
@classmethod def size_for_node(self, node, client): '''Given a docutils image node, returns the size the image should have in the PDF document, and what 'kind' of size that is. That involves lots of guesswork''' uri = str(node.get("uri")) if uri.split("://")[0].lower() not in ('http','ftp','https'): uri = os.path.join(client.basedir,uri) else: uri, _ = urllib.urlretrieve(uri) client.to_unlink.append(uri) srcinfo = client, uri # Extract all the information from the URI imgname, extension, options = self.split_uri(uri) if not os.path.isfile(imgname): imgname = missing scale = float(node.get('scale', 100))/100 size_known = False # Figuring out the size to display of an image is ... annoying. # If the user provides a size with a unit, it's simple, adjustUnits # will return it in points and we're done. # However, often the unit wil be "%" (specially if it's meant for # HTML originally. In which case, we will use a percentage of # the containing frame. # Find the image size in pixels: kind = 'direct' xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi extension = imgname.split('.')[-1].lower() if extension in ['svg','svgz'] and SVGImage.available(): iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0) # These are in pt, so convert to px iw = iw * xdpi / 72 ih = ih * ydpi / 72 elif extension == 'pdf': if VectorPdf is not None: xobj = VectorPdf.load_xobj(srcinfo) iw, ih = xobj.w, xobj.h else: pdf = LazyImports.pdfinfo if pdf is None: log.warning('PDF images are not supported without pyPdf or pdfrw [%s]', nodeid(node)) return 0, 0, 'direct' reader = pdf.PdfFileReader(open(imgname, 'rb')) box = [float(x) for x in reader.getPage(0)['/MediaBox']] iw, ih = x2 - x1, y2 - y1 # These are in pt, so convert to px iw = iw * xdpi / 72.0 ih = ih * ydpi / 72.0 size_known = True # Assume size from original PDF is OK else: keeptrying = True if LazyImports.PILImage: try: img = LazyImports.PILImage.open(imgname) img.load() iw, ih = img.size xdpi, ydpi = img.info.get('dpi', (xdpi, ydpi)) keeptrying = False except __HOLE__: # PIL throws this when it's a broken/unknown image pass if keeptrying and LazyImports.PMImage: img = LazyImports.PMImage(imgname) iw = img.size().width() ih = img.size().height() density=img.density() # The density is in pixelspercentimeter (!?) xdpi=density.width()*2.54 ydpi=density.height()*2.54 keeptrying = False if keeptrying: if extension not in ['jpg', 'jpeg']: log.error("The image (%s, %s) is broken or in an unknown format" , imgname, nodeid(node)) raise ValueError else: # Can be handled by reportlab log.warning("Can't figure out size of the image (%s, %s). Install PIL for better results." , imgname, nodeid(node)) iw = 1000 ih = 1000 # Try to get the print resolution from the image itself via PIL. # If it fails, assume a DPI of 300, which is pretty much made up, # and then a 100% size would be iw*inch/300, so we pass # that as the second parameter to adjustUnits # # Some say the default DPI should be 72. That would mean # the largest printable image in A4 paper would be something # like 480x640. That would be awful. # w = node.get('width') h = node.get('height') if h is None and w is None: # Nothing specified # Guess from iw, ih log.debug("Using image %s without specifying size." "Calculating based on image size at %ddpi [%s]", imgname, xdpi, nodeid(node)) w = iw*inch/xdpi h = ih*inch/ydpi elif w is not None: # Node specifies only w # In this particular case, we want the default unit # to be pixels so we work like rst2html if w[-1] == '%': kind = 'percentage_of_container' w=int(w[:-1]) else: # This uses default DPI setting because we # are not using the image's "natural size" # this is what LaTeX does, according to the # docutils mailing list discussion w = client.styles.adjustUnits(w, client.styles.tw, default_unit='px') if h is None: # h is set from w with right aspect ratio h = w*ih/iw else: h = client.styles.adjustUnits(h, ih*inch/ydpi, default_unit='px') elif h is not None and w is None: if h[-1] != '%': h = client.styles.adjustUnits(h, ih*inch/ydpi, default_unit='px') # w is set from h with right aspect ratio w = h*iw/ih else: log.error('Setting height as a percentage does **not** work. '\ 'ignoring height parameter [%s]', nodeid(node)) # Set both from image data w = iw*inch/xdpi h = ih*inch/ydpi # Apply scale factor w = w*scale h = h*scale # And now we have this probably completely bogus size! log.info("Image %s size calculated: %fcm by %fcm [%s]", imgname, w/cm, h/cm, nodeid(node)) return w, h, kind
IOError
dataset/ETHPy150Open rst2pdf/rst2pdf/rst2pdf/image.py/MyImage.size_for_node
def prepare(self): ''' Run the preparation sequence required to start a salt master server. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Master, self).prepare() try: if self.config['verify_env']: v_dirs = [ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_denied'), os.path.join(self.config['pki_dir'], 'minions_autosign'), os.path.join(self.config['pki_dir'], 'minions_rejected'), self.config['cachedir'], os.path.join(self.config['cachedir'], 'jobs'), os.path.join(self.config['cachedir'], 'proc'), self.config['sock_dir'], self.config['token_dir'], self.config['syndic_dir'], self.config['sqlite_queue_dir'], ] if self.config.get('transport') == 'raet': v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) # Clear out syndics from cachedir for syndic_file in os.listdir(self.config['syndic_dir']): os.remove(os.path.join(self.config['syndic_dir'], syndic_file)) except __HOLE__ as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up') # TODO: AIO core is separate from transport if self.config['transport'].lower() in ('zeromq', 'tcp'): if not verify_socket(self.config['interface'], self.config['publish_port'], self.config['ret_port']): self.shutdown(4, 'The ports are not available to bind') self.config['interface'] = ip_bracket(self.config['interface']) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.master self.master = salt.master.Master(self.config) else: # Add a udp port check here import salt.daemons.flo self.master = salt.daemons.flo.IofloMaster(self.config) self.daemonize_if_required() self.set_pidfile() salt.utils.process.notify_systemd()
OSError
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/Master.prepare
def prepare(self): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Minion, self).prepare() try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: # If 'default_include' is specified in config, then use it if '*' in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config['conf_file']), confd ) else: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d' ) v_dirs = [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ] if self.config.get('transport') == 'raet': v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except __HOLE__ as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) log.info( 'Setting up the Salt Minion "{0}"'.format( self.config['id'] ) ) migrations.migrate_paths(self.config) # Bail out if we find a process running and it matches out pidfile if self.check_running(): self.action_log_info('An instance is already running. Exiting') self.shutdown(1) # TODO: AIO core is separate from transport if self.config['transport'].lower() in ('zeromq', 'tcp'): # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() if self.config.get('master_type') == 'func': salt.minion.eval_master_func(self.config) if isinstance(self.config.get('master'), list): if self.config.get('master_type') == 'failover': self.minion = salt.minion.Minion(self.config) else: self.minion = salt.minion.MultiMinion(self.config) else: self.minion = salt.minion.Minion(self.config) else: import salt.daemons.flo self.daemonize_if_required() self.set_pidfile() self.minion = salt.daemons.flo.IofloMinion(self.config)
OSError
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/Minion.prepare
def start(self): ''' Start the actual minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`. ''' super(Minion, self).start() try: if check_user(self.config['user']): self.action_log_info('Starting up') self.verify_hash_type() self.minion.tune_in() if self.minion.restart: raise SaltClientError('Minion could not connect to Master') except (__HOLE__, SaltSystemExit) as error: self.action_log_info('Stopping') if isinstance(error, KeyboardInterrupt): log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(error)) self.shutdown(error.code)
KeyboardInterrupt
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/Minion.start
def call(self, cleanup_protecteds): ''' Start the actual minion as a caller minion. cleanup_protecteds is list of yard host addresses that should not be cleaned up this is to fix race condition when salt-caller minion starts up If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`. ''' try: self.prepare() if check_user(self.config['user']): self.minion.opts['__role'] = kinds.APPL_KIND_NAMES[kinds.applKinds.caller] self.minion.opts['raet_cleanup_protecteds'] = cleanup_protecteds self.minion.call_in() except (__HOLE__, SaltSystemExit) as exc: self.action_log_info('Stopping') if isinstance(exc, KeyboardInterrupt): log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(exc)) self.shutdown(exc.code)
KeyboardInterrupt
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/Minion.call
def prepare(self): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(ProxyMinion, self).prepare() if not self.values.proxyid: raise SaltSystemExit('salt-proxy requires --proxyid') # Proxies get their ID from the command line. This may need to change in # the future. self.config['id'] = self.values.proxyid try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: # If 'default_include' is specified in config, then use it if '*' in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config['conf_file']), confd ) else: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d' ) v_dirs = [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ] if self.config.get('transport') == 'raet': v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config.get('proxy_log') or self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except __HOLE__ as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) migrations.migrate_paths(self.config) # TODO: AIO core is separate from transport if self.config['transport'].lower() in ('zeromq', 'tcp'): # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() # TODO Proxy minions don't currently support failover self.minion = salt.minion.ProxyMinion(self.config) else: # For proxy minions, this doesn't work yet. import salt.daemons.flo self.daemonize_if_required() self.set_pidfile() self.minion = salt.daemons.flo.IofloMinion(self.config)
OSError
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/ProxyMinion.prepare
def start(self): ''' Start the actual proxy minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`. ''' super(ProxyMinion, self).start() try: if check_user(self.config['user']): log.info('The proxy minion is starting up') self.verify_hash_type() self.action_log_info('Starting up') self.minion.tune_in() except (__HOLE__, SaltSystemExit) as exc: self.action_log_info('Stopping') if isinstance(exc, KeyboardInterrupt): log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(str(exc)) self.shutdown(exc.code)
KeyboardInterrupt
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/ProxyMinion.start
def prepare(self): ''' Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Syndic, self).prepare() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except __HOLE__ as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) # Late import so logging works correctly import salt.minion self.daemonize_if_required() # if its a multisyndic, do so if isinstance(self.config.get('master'), list): self.syndic = salt.minion.MultiSyndic(self.config) else: self.syndic = salt.minion.Syndic(self.config) self.set_pidfile()
OSError
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/Syndic.prepare
def start(self): ''' Start the actual syndic. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`. ''' super(Syndic, self).start() if check_user(self.config['user']): self.action_log_info('Starting up') self.verify_hash_type() try: self.syndic.tune_in() except __HOLE__: self.action_log_info('Stopping') self.shutdown()
KeyboardInterrupt
dataset/ETHPy150Open saltstack/salt/salt/cli/daemons.py/Syndic.start
def write(self, data): """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). If the file has an :attr:`encoding` attribute, `data` can also be a :class:`unicode` (:class:`str` in python 3) instance, which will be encoded as :attr:`encoding` before being written. Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of :class:`str` (:class:`bytes` in python 3), a file-like object, or an instance of :class:`unicode` (:class:`str` in python 3). Unicode data is only allowed if the file has an :attr:`encoding` attribute. :Parameters: - `data`: string of bytes or file-like object to be written to the file .. versionadded:: 1.9 The ability to write :class:`unicode`, if the file has an :attr:`encoding` attribute. """ if self._closed: raise ValueError("cannot write to a closed file") try: # file-like read = data.read except AttributeError: # string if not isinstance(data, string_types): raise TypeError("can only write strings or file-like objects") if isinstance(data, unicode): try: data = data.encode(self.encoding) except __HOLE__: raise TypeError("must specify an encoding for file in " "order to write %s" % (text_type.__name__,)) read = StringIO(data).read if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete space = self.chunk_size - self._buffer.tell() if space: to_write = read(space) self._buffer.write(to_write) if len(to_write) < space: return # EOF or incomplete self.__flush_buffer() to_write = read(self.chunk_size) while to_write and len(to_write) == self.chunk_size: self.__flush_data(to_write) to_write = read(self.chunk_size) self._buffer.write(to_write)
AttributeError
dataset/ETHPy150Open blynch/CloudMemeBackend/gridfs/grid_file.py/GridIn.write
def load_and_fix_json(json_path): """Tries to load a json object from a file. If that fails, tries to fix common errors (no or extra , at end of the line). """ with open(json_path) as f: json_str = f.read() log.debug('Configuration file %s read correctly', json_path) config = None try: return json.loads(json_str) except ValueError as e: log.debug('Could not parse configuration %s: %s', json_str, e, exc_info=True) # Attempt to fix extra , json_str = re.sub(r",[ \n]*}", "}", json_str) # Attempt to fix missing , json_str = re.sub(r"([^{,]) *\n *(\")", r"\1,\n \2", json_str) try: log.debug('Attempting to reload automatically fixed configuration file %s', json_str) config = json.loads(json_str) with open(json_path, 'w') as f: json.dump(config, f, indent=2) log.debug('Fixed configuration saved in file %s', json_path) prompt("[Some errors in your jrnl config have been fixed for you.]") return config except __HOLE__ as e: log.debug('Could not load fixed configuration: %s', e, exc_info=True) prompt("[There seems to be something wrong with your jrnl config at {0}: {1}]".format(json_path, e.message)) prompt("[Entry was NOT added to your journal]") sys.exit(1)
ValueError
dataset/ETHPy150Open maebert/jrnl/jrnl/util.py/load_and_fix_json
def attach_enctype_error_multidict(request): """Since Flask 0.8 we're monkeypatching the files object in case a request is detected that does not use multipart form data but the files object is accessed. """ oldcls = request.files.__class__ class newcls(oldcls): def __getitem__(self, key): try: return oldcls.__getitem__(self, key) except __HOLE__: if key not in request.form: raise raise DebugFilesKeyError(request, key) newcls.__name__ = oldcls.__name__ newcls.__module__ = oldcls.__module__ request.files.__class__ = newcls
KeyError
dataset/ETHPy150Open pallets/flask/flask/debughelpers.py/attach_enctype_error_multidict
def FilterAlignedPairForPositions(seq1, seq2, method): """given the method, return a set of aligned sequences only containing certain positions. Available filters: all: do nothing. codon1,codon2,codon3: return 1st, 2nd, 3rd codon positions only. d4: only changes within fourfold-degenerate sites """ l1 = len(seq1) l2 = len(seq2) if method == "all": return seq1, seq2 elif method == "codon1": return ("".join([seq1[x] for x in range(0, l1, 3)]), "".join([seq2[x] for x in range(0, l2, 3)])) elif method == "codon2": return ("".join([seq1[x] for x in range(1, l1, 3)]), "".join([seq2[x] for x in range(1, l2, 3)])) elif method == "codon3": return ("".join([seq1[x] for x in range(2, l1, 3)]), "".join([seq2[x] for x in range(2, l2, 3)])) elif method == "d4": s1 = [] s2 = [] for x in range(0, l1, 3): codon1 = seq1[x:x + 3] codon2 = seq2[x:x + 3] try: aa1, deg11, deg12, deg13 = Genomics.GetDegeneracy(codon1) aa2, deg11, deg22, deg23 = Genomics.GetDegeneracy(codon2) except __HOLE__: continue if aa1 == aa2 and deg13 == 4 and deg23 == 4: s1.append(codon1[2]) s2.append(codon2[2]) return "".join(s1), "".join(s2) # ------------------------------------------------------------------------
KeyError
dataset/ETHPy150Open CGATOxford/cgat/scripts/fasta2distances.py/FilterAlignedPairForPositions
def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv parser = E.OptionParser( version="%prog version: $Id: fasta2distances.py 2781 2009-09-10 11:33:14Z andreas $") parser.add_option("--filters", dest="filters", type="string", help="Filters to use for filtering sequences [all|codon1|codon2|codon3|d4].") parser.add_option("--fields", dest="fields", type="string", help="Fields to output [aligned|nunaligned1|nunaligned2|identical|transitions|transversions|jc69|t92].") parser.set_defaults( filename_map=None, filters="all,codon1,codon2,codon3,d4", gap_char="-", fields="aligned,unaligned1,unaligned2,identical,transitions,transversions,jc69,t92", ) (options, args) = E.Start(parser, add_pipe_options=True) options.filters = options.filters.split(",") options.fields = options.fields.split(",") iterator = FastaIterator.FastaIterator(options.stdin) headers = ["id1", "id2"] for f in options.filters: headers += list(map(lambda x: "%s_%s" % (f, x), options.fields)) options.stdout.write("\t".join(headers) + "\n") while 1: try: cur_record = iterator.next() if cur_record is None: break first_record = cur_record cur_record = iterator.next() if cur_record is None: break second_record = cur_record except __HOLE__: break if len(first_record.sequence) != len(second_record.sequence): raise "sequences %s and %s of unequal length" % ( first_record.title, second_record.title) if len(first_record.sequence) % 3 != 0: raise "sequence %s not multiple of 3" % first_record.title # old: Bio.Alphabet.IUPAC.extended_dna.letters alphabet = "ACGT" + options.gap_char result = [] for f in options.filters: s1, s2 = FilterAlignedPairForPositions(first_record.sequence, second_record.sequence, f) info = Genomics.CalculatePairIndices(s1, s2, options.gap_char) for field in options.fields: if field == "aligned": c = "%i" % info.mNAligned elif field == "unaligned1": c = "%i" % info.mNUnaligned1 elif field == "unaligned2": c = "%i" % info.mNUnaligned2 elif field == "transversions": c = "%i" % info.mNTransversions elif field == "transitions": c = "%i" % info.mNTransitions elif field == "identical": c = "%i" % info.mNIdentical elif field == "jc69": try: c = "%6.4f" % CalculateDistanceJC69(info)[0] except ValueError: c = "nan" elif field == "t92": try: c = "%6.4f" % CalculateDistanceT92(info)[0] except ValueError: c = "nan" else: raise "Unknown field %s" % field result.append(c) options.stdout.write("%s\t%s\t%s\n" % (first_record.title, second_record.title, "\t".join(result))) E.Stop()
StopIteration
dataset/ETHPy150Open CGATOxford/cgat/scripts/fasta2distances.py/main
@respond_to("image me (?P<search_query>.*)$") def image_me(self, message, search_query): """image me ___ : Search google images for ___, and post a random one.""" data = { "q": search_query, "v": "1.0", "safe": "active", "rsz": "8" } r = requests.get("http://ajax.googleapis.com/ajax/services/search/images", params=data) try: results = r.json()["responseData"]["results"] except __HOLE__: results = [] if len(results) > 0: url = random.choice(results)["unescapedUrl"] self.say("%s" % url, message=message) else: self.say("Couldn't find anything!", message=message)
TypeError
dataset/ETHPy150Open skoczen/will/will/plugins/productivity/images.py/ImagesPlugin.image_me
def _free(self, block): # free location and try to merge with neighbours (arena, start, stop) = block try: prev_block = self._stop_to_block[(arena, start)] except KeyError: pass else: start, _ = self._absorb(prev_block) try: next_block = self._start_to_block[(arena, stop)] except __HOLE__: pass else: _, stop = self._absorb(next_block) block = (arena, start, stop) length = stop - start try: self._len_to_seq[length].append(block) except KeyError: self._len_to_seq[length] = [block] bisect.insort(self._lengths, length) self._start_to_block[(arena, start)] = block self._stop_to_block[(arena, stop)] = block
KeyError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/multiprocessing/heap.py/Heap._free
def process_request (self, request): try: request.django_root except __HOLE__: request.django_root = '' login_url = settings.LOGIN_URL + '?next=%s' % request.path if request.path.startswith(request.django_root): path = request.path[len(request.django_root):] else: return HttpResponseRedirect (login_url) if not request.user.is_authenticated () and not \ (path == settings.LOGIN_URL or self.matches(path) ): logger.debug("Redirecting locked down site %s to login." % path) return HttpResponseRedirect (login_url) return None
AttributeError
dataset/ETHPy150Open fp7-ofelia/ocf/expedient/src/python/expedient/common/middleware/sitelockdown.py/SiteLockDown.process_request
def processMessage(self, message): """Parse a message and post it as a metric.""" if self.factory.verbose: log.listener("Message received: %s" % (message,)) metric = message.routing_key for line in message.content.body.split("\n"): line = line.strip() if not line: continue try: if settings.get("AMQP_METRIC_NAME_IN_BODY", False): metric, value, timestamp = line.split() else: value, timestamp = line.split() datapoint = ( float(timestamp), float(value) ) except __HOLE__: log.listener("invalid message line: %s" % (line,)) continue events.metricReceived(metric, datapoint) if self.factory.verbose: log.listener("Metric posted: %s %s %s" % (metric, value, timestamp,))
ValueError
dataset/ETHPy150Open tmm1/graphite/carbon/lib/carbon/amqp_listener.py/AMQPGraphiteProtocol.processMessage
def process_constraints(data, constraint_fields): """ Callback to move constrained fields from incoming data into a 'constraints' key. :param data: Incoming argument dict :param constraint_fields: Constrained fields to move into 'constraints' dict """ # Always use a list so that we can handle bulk operations objects = data if isinstance(data, list) else [data] # Enforce that pattern is a string. if data['pattern'] is None: data['pattern'] = '' for obj in objects: constraints = {} for c_field in constraint_fields: try: c_value = obj.pop(c_field) except __HOLE__: continue else: # If the value is not set, translate it to False if c_value is None: c_value = False constraints[c_field] = c_value obj['constraints'] = constraints return data
KeyError
dataset/ETHPy150Open dropbox/pynsot/pynsot/commands/callbacks.py/process_constraints
def _enabled_item_name(enabled_item): """Returns the toggle item name Toggles are of the form: {namespace}:{toggle_item} or {toggle_item} The latter case is used occasionally if the namespace is "USER" """ try: return enabled_item.split(":")[1] except __HOLE__: return enabled_item.split(":")[0]
IndexError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/toggle_ui/views.py/_enabled_item_name
def get_connection(backend=None, template_prefix=None, template_suffix=None, fail_silently=False, **kwargs): """Load a templated e-mail backend and return an instance of it. If backend is None (default) settings.TEMPLATED_EMAIL_BACKEND is used. Both fail_silently and other keyword arguments are used in the constructor of the backend. """ # This method is mostly a copy of the backend loader present in # django.core.mail.get_connection klass_path = backend or getattr(settings, 'TEMPLATED_EMAIL_BACKEND', TemplateBackend) if isinstance(klass_path, six.string_types): try: # First check if class name is omitted and we have module in settings mod = import_module(klass_path) klass_name = 'TemplateBackend' except ImportError as e: # Fallback to class name try: mod_name, klass_name = klass_path.rsplit('.', 1) mod = import_module(mod_name) except ImportError as e: raise ImproperlyConfigured( ('Error importing templated email backend module %s: "%s"' % (mod_name, e))) try: klass = getattr(mod, klass_name) except __HOLE__: raise ImproperlyConfigured(('Module "%s" does not define a ' '"%s" class' % (mod_name, klass_name))) else: klass = klass_path return klass(fail_silently=fail_silently, template_prefix=template_prefix, template_suffix=template_suffix, **kwargs)
AttributeError
dataset/ETHPy150Open BradWhittington/django-templated-email/templated_email/__init__.py/get_connection
def load_settings_layer(self, file_name): try: return json.load(open(os.path.join(self.settings_dir, file_name))) except (IOError, __HOLE__): return {}
OSError
dataset/ETHPy150Open ohmu/poni/poni/core.py/Config.load_settings_layer
def add_config(self, config, parent=None, copy_dir=None): config_dir = os.path.join(self.path, CONFIG_DIR, config) if os.path.exists(config_dir): raise errors.UserError( "%s: config %r already exists" % (self.name, config)) if copy_dir: try: shutil.copytree(copy_dir, config_dir, symlinks=True) except (IOError, __HOLE__) as error: raise errors.Error("copying config files failed: %s: %s" % ( error.__class__.__name__, error)) else: os.makedirs(config_dir) conf_file = os.path.join(config_dir, CONFIG_CONF_FILE) conf = {} if parent: conf["parent"] = parent util.json_dump(conf, conf_file) settings_dir = os.path.join(config_dir, SETTINGS_DIR) if not os.path.exists(settings_dir): os.mkdir(settings_dir) # pre-created so it is there for copying files
OSError
dataset/ETHPy150Open ohmu/poni/poni/core.py/Node.add_config
def __init__(self, system, name, system_path, sub_count, extra=None): Item.__init__(self, "system", system, name, system_path, os.path.join(system_path, SYSTEM_CONF_FILE), extra) self["sub_count"] = sub_count try: self.update(json.load(open(self.conf_file))) except __HOLE__: pass
IOError
dataset/ETHPy150Open ohmu/poni/poni/core.py/System.__init__