function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def __init__(self): import os.path from test import test_timeout self.valid = False if _platform in _expectations: s = _expectations[_platform] self.expected = set(s.split()) # expected to be skipped on every platform, even Linux self.expected.add('test_linuxaudiodev') if not os.path.supports_unicode_filenames: self.expected.add('test_pep277') try: from test import test_socket_ssl except __HOLE__: pass else: if test_socket_ssl.skip_expected: self.expected.add('test_socket_ssl') if test_timeout.skip_expected: self.expected.add('test_timeout') if sys.maxint == 9223372036854775807L: self.expected.add('test_imageop') if not sys.platform in ("mac", "darwin"): MAC_ONLY = ["test_macos", "test_macostools", "test_aepack", "test_plistlib", "test_scriptpackages", "test_applesingle"] for skip in MAC_ONLY: self.expected.add(skip) elif len(u'\0'.encode('unicode-internal')) == 4: self.expected.add("test_macostools") if sys.platform != "win32": # test_sqlite is only reliable on Windows where the library # is distributed with Python WIN_ONLY = ["test_unicode_file", "test_winreg", "test_winsound", "test_startfile", "test_sqlite"] for skip in WIN_ONLY: self.expected.add(skip) if sys.platform != 'irix': IRIX_ONLY = ["test_imageop", "test_al", "test_cd", "test_cl", "test_gl", "test_imgfile"] for skip in IRIX_ONLY: self.expected.add(skip) if sys.platform != 'sunos5': self.expected.add('test_sunaudiodev') self.expected.add('test_nis') if not sys.py3kwarning: self.expected.add('test_py3kwarn') if test_support.is_jython: if os._name != 'posix': self.expected.update([ 'test_grp', 'test_mhlib', 'test_posix', 'test_pwd', 'test_signal']) if os._name != 'nt': self.expected.add('test_nt_paths_jy') self.valid = True
ImportError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/regrtest.py/_ExpectedSkips.__init__
def walk_together(*readers): """ Walk a set of readers and return lists of records from each reader, with None if no record present. Caller must check the inputs are sorted in the same way and use the same reference otherwise behaviour is undefined. """ nexts = [reader.next() for reader in readers] while True: min_next = min([x for x in nexts if x is not None]) # this line uses equality on Records, which checks the ALTs # not sure what to do with records that have overlapping but different # variation yield [x if x is None or x == min_next else None for x in nexts] # update nexts that we just yielded for i, n in enumerate(nexts): if n is not None and n == min_next: try: nexts[i] = readers[i].next() except __HOLE__: nexts[i] = None if all([x is None for x in nexts]): break
StopIteration
dataset/ETHPy150Open arq5x/cyvcf/cyvcf/utils.py/walk_together
def profiled(f, outputFile): def _(*args, **kwargs): if sys.version_info[0:2] != (2, 4): import profile prof = profile.Profile() try: result = prof.runcall(f, *args, **kwargs) prof.dump_stats(outputFile) except __HOLE__: pass prof.print_stats() return result else: # use hotshot, profile is broken in 2.4 import hotshot.stats prof = hotshot.Profile(outputFile) try: return prof.runcall(f, *args, **kwargs) finally: stats = hotshot.stats.load(outputFile) stats.strip_dirs() stats.sort_stats('cum') # 'time' stats.print_stats(100) return _
SystemExit
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/trial/util.py/profiled
def findObject(name): """Get a fully-named package, module, module-global object or attribute. Forked from twisted.python.reflect.namedAny. Returns a tuple of (bool, obj). If bool is True, the named object exists and is returned as obj. If bool is False, the named object does not exist and the value of obj is unspecified. """ names = name.split('.') topLevelPackage = None moduleNames = names[:] while not topLevelPackage: trialname = '.'.join(moduleNames) if len(trialname) == 0: return (False, None) try: topLevelPackage = __import__(trialname) except ImportError: # if the ImportError happened in the module being imported, # this is a failure that should be handed to our caller. # count stack frames to tell the difference. exc_info = sys.exc_info() if len(traceback.extract_tb(exc_info[2])) > 1: try: # Clean up garbage left in sys.modules. del sys.modules[trialname] except __HOLE__: # Python 2.4 has fixed this. Yay! pass raise exc_info[0], exc_info[1], exc_info[2] moduleNames.pop() obj = topLevelPackage for n in names[1:]: try: obj = getattr(obj, n) except AttributeError: return (False, obj) return (True, obj)
KeyError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/trial/util.py/findObject
def _removeSafely(path): """ Safely remove a path, recursively. If C{path} does not contain a node named C{_trial_marker}, a L{_NoTrialmarker} exception is raised and the path is not removed. """ if not path.child('_trial_marker').exists(): raise _NoTrialMarker( '%r is not a trial temporary path, refusing to remove it' % (path,)) try: path.remove() except __HOLE__, e: print ("could not remove %r, caught OSError [Errno %s]: %s" % (path, e.errno, e.strerror)) try: newPath = FilePath('_trial_temp_old%s' % (randrange(1000000),)) path.moveTo(newPath) except OSError, e: print ("could not rename path, caught OSError [Errno %s]: %s" % (e.errno,e.strerror)) raise
OSError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/trial/util.py/_removeSafely
def _get_class(self, name): klass = None name = identifier(name) try: return self._classcache[name] except __HOLE__: pass for mod in self.doc.dtds: try: klass = getattr(mod, name) except AttributeError: continue if klass: self._classcache[name] = klass return klass raise AttributeError
KeyError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POMparse.py/ContentHandler._get_class
def startElement(self, name, atts): "Handle an event for the beginning of an element." try: klass = self._get_class(name) except __HOLE__: raise POM.ValidationError("Undefined element tag: " + name) attr = {} for name, value in atts.items(): attr[keyword_identifier(POM.normalize_unicode(name))] = POM.unescape(value) obj = klass(**attr) self.stack.append(obj)
AttributeError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POMparse.py/ContentHandler.startElement
def endElement(self, name): "Handle an event for the end of an element." obj = self.stack.pop() try: self.stack[-1].append(obj) except __HOLE__: self.msg = obj
IndexError
dataset/ETHPy150Open kdart/pycopia/XML/pycopia/XML/POMparse.py/ContentHandler.endElement
def remove(path, force=False): ''' Remove the named file or directory :param str path: The path to the file or directory to remove. :param bool force: Remove even if marked Read-Only :return: True if successful, False if unsuccessful :rtype: bool CLI Example: .. code-block:: bash salt '*' file.remove C:\\Temp ''' # This must be a recursive function in windows to properly deal with # Symlinks. The shutil.rmtree function will remove the contents of # the Symlink source in windows. path = os.path.expanduser(path) # Does the file/folder exists if not os.path.exists(path): return 'File/Folder not found: {0}'.format(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') # Remove ReadOnly Attribute if force: # Get current file attributes file_attributes = win32api.GetFileAttributes(path) win32api.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL) try: if os.path.isfile(path): # A file and a symlinked file are removed the same way os.remove(path) elif is_link(path): # If it's a symlink directory, use the rmdir command os.rmdir(path) else: for name in os.listdir(path): item = '{0}\\{1}'.format(path, name) # If it's a normal directory, recurse to remove it's contents remove(item, force) # rmdir will work now because the directory is empty os.rmdir(path) except (OSError, __HOLE__) as exc: if force: # Reset attributes to the original if delete fails. win32api.SetFileAttributes(path, file_attributes) raise CommandExecutionError( 'Could not remove {0!r}: {1}'.format(path, exc) ) return True
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/win_file.py/remove
def do_export(self, args): try: #add arguments doParser = self.arg_export() try: doArgs = doParser.parse_args(args.split()) except SystemExit as e: return #call UForge API printer.out("Exporting template with id ["+doArgs.id+"] :") myAppliance = self.api.Users(self.login).Appliances(doArgs.id).Get() if myAppliance is None or type(myAppliance) is not Appliance: printer.out("No template") else: applianceExport = self.api.Users(self.login).Appliances(myAppliance.dbId).Exports().Export() status = applianceExport.status progress = ProgressBar(widgets=[Percentage(), Bar()], maxval=100).start() while not (status.complete or status.error): progress.update(status.percentage) status = self.api.Users(self.login).Appliances(myAppliance.dbId).Exports(applianceExport.dbId).Status.Get() time.sleep(2) progress.finish() if status.error: printer.out("Export error: "+status.message+"\n"+status.errorMessage, printer.ERROR) if status.detailedError: printer.out(status.detailedErrorMsg) else: printer.out("Downloading archive...") data = self.api.Users(self.login).Appliances(myAppliance.dbId).Exports(applianceExport.dbId).Downloads.Download() if doArgs.file is None: file = open("archive.tar.gz", "w") else: file = open(doArgs.file, "w") file.write(data) file.close() #Delete export archive on the server self.api.Users(self.login).Appliances(myAppliance.dbId).Exports(applianceExport.dbId).Delete() printer.out("Download complete of file ["+file.name+"]", printer.OK) return 0 except __HOLE__ as e: printer.out("File error: "+str(e), printer.ERROR) except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_export() except Exception as e: return handle_uforge_exception(e)
IOError
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.do_export
def do_import(self, args): try: #add arguments doParser = self.arg_import() try: doArgs = doParser.parse_args(args.split()) except __HOLE__ as e: return #call UForge API return self.import_stack(doArgs.file, True, doArgs.force, doArgs.rbundles, doArgs.use_major) except ArgumentParserError as e: printer.out("In Arguments: "+str(e)+"\n", printer.ERROR) self.help_import() except Exception as e: return handle_uforge_exception(e)
SystemExit
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.do_import
def do_validate(self, args): try: #add arguments doParser = self.arg_validate() try: doArgs = doParser.parse_args(args.split()) except __HOLE__ as e: return file = generics_utils.get_file(doArgs.file) if file is None: return 2 template=generics_utils.validate_json_file(file) if template is None: return 2 return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_validate()
SystemExit
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.do_validate
def do_create(self, args): try: #add arguments doParser = self.arg_create() try: doArgs = doParser.parse_args(args.split()) except SystemExit as e: return #-- #get json file (remote or local) file = generics_utils.get_file(doArgs.file) if file is None: return 2 template=validate_json_file(file) if template is None: return 2 if "builders" in template: template["builders"]=None archive_files=[] if "config" in template["stack"]: for config in template["stack"]["config"]: #add to list of file to tar if "source" in config: file_tar_path=constants.FOLDER_CONFIGS + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(config["source"])) archive_files.append([file_tar_path,config["source"]]) #changing source path to archive related source path config["source"]=file_tar_path else: printer.out("No source file found in config", printer.ERROR) return 2 try: if "bundles" in template["stack"]: for bundle in template["stack"]["bundles"]: if "files" in bundle: for files in bundle["files"]: #add to list of file to tar file_tar_path=constants.FOLDER_BUNDLES + os.sep + generics_utils.remove_URI_forbidden_char(bundle["name"]) + os.sep + generics_utils.remove_URI_forbidden_char(bundle["version"]) + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(files["source"])) archive_files.append([file_tar_path,files["source"]]) #changing source path to archive related source path files["source"]=file_tar_path else: printer.out("No files section found for bundle", printer.ERROR) return 2 if "license" in bundle and "source" in bundle["license"]: #add to list of file to tar file_tar_path=constants.FOLDER_BUNDLES + os.sep + generics_utils.remove_URI_forbidden_char(bundle["name"]) + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(bundle["license"]["source"])) archive_files.append([file_tar_path,bundle["license"]["source"]]) #changing source path to archive related source path bundle["license"]["source"]=file_tar_path except KeyError as e: printer.out("Error in bundle", printer.ERROR) return 2 if "source_logo" in template["stack"]: #add to list of file to tar file_tar_path=constants.FOLDER_LOGO + os.sep + generics_utils.remove_URI_forbidden_char(ntpath.basename(template["stack"]["source_logo"])) archive_files.append([file_tar_path,template["stack"]["source_logo"]]) #changing source path to archive related source path template["stack"]["source_logo"]=file_tar_path if os.path.isdir(constants.TMP_WORKING_DIR): #delete tmp dir shutil.rmtree(constants.TMP_WORKING_DIR) os.mkdir(constants.TMP_WORKING_DIR) file = open(constants.TMP_WORKING_DIR + os.sep + constants.TEMPLATE_JSON_NEW_FILE_NAME, "w") json.dump(template, file, indent=4, separators=(',', ': ')) file.close() archive_files.append([constants.TEMPLATE_JSON_FILE_NAME, constants.TMP_WORKING_DIR+ os.sep +constants.TEMPLATE_JSON_NEW_FILE_NAME]) if doArgs.archive_path is not None: tar_path = doArgs.archive_path else: tar_path = constants.TMP_WORKING_DIR+os.sep+"archive.tar.gz" tar = tarfile.open(tar_path, "w|gz") for file_tar_path,file_global_path in archive_files: file = generics_utils.get_file(file_global_path, constants.TMP_WORKING_DIR+os.sep+os.path.basename(file_global_path)) if file is None: printer.out("Downloaded bunlde file not found", printer.ERROR) return 2 tar.add(file, arcname=file_tar_path) tar.close() #arhive is created, doing import r = self.import_stack(tar_path, False, doArgs.force, doArgs.rbundles, doArgs.use_major) if r != 0: return r #delete tmp dir shutil.rmtree(constants.TMP_WORKING_DIR) return 0 except __HOLE__ as e: printer.out("OSError: "+str(e), printer.ERROR) except IOError as e: printer.out("File error: "+str(e), printer.ERROR) except ArgumentParserError as e: printer.out("In Arguments: "+str(e), printer.ERROR) self.help_create() except Exception as e: return handle_uforge_exception(e)
OSError
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.do_create
def do_build(self, args): try: #add arguments doParser = self.arg_build() try: doArgs = doParser.parse_args(args.split()) except SystemExit as e: return #-- template=validate_json_file(doArgs.file) if template is None: return 2 if doArgs.id: myAppliance = self.api.Users(self.login).Appliances(doArgs.id).Get() myAppliance = myAppliance.appliances.appliance else: #Get template which correpond to the template file myAppliance = self.api.Users(self.login).Appliances().Getall(Query="name=='"+template["stack"]["name"]+"';version=='"+template["stack"]["version"]+"'") myAppliance = myAppliance.appliances.appliance if myAppliance is None or len(myAppliance)!=1: printer.out("No template found on the plateform") return 0 myAppliance=myAppliance[0] rInstallProfile = self.api.Users(self.login).Appliances(myAppliance.dbId).Installprofile("").Get() if rInstallProfile is None: printer.out("No installation found on the template '"+template["stack"]["name"]+"'", printer.ERROR) return 0 try: i=1 if doArgs.junit is not None: test_results=[] for builder in template["builders"]: try: printer.out("Generating '"+builder["type"]+"' image ("+str(i)+"/"+str(len(template["builders"]))+")") if doArgs.junit is not None: test = TestCase('Generation '+builder["type"]) test_results.append(test) start_time = time.time() format_type = builder["type"] targetFormat = generate_utils.get_target_format_object(self.api, self.login, format_type) if targetFormat is None: printer.out("Builder type unknown: "+format_type, printer.ERROR) return 2 myimage = image() myinstallProfile = installProfile() if rInstallProfile.partitionAuto: if "installation" in builder: if "swapSize" in builder["installation"]: myinstallProfile.swapSize = builder["installation"]["swapSize"] if "diskSize" in builder["installation"]: myinstallProfile.diskSize = builder["installation"]["diskSize"] else: myinstallProfile.swapSize = rInstallProfile.swapSize myinstallProfile.diskSize = rInstallProfile.partitionTable.disks.disk[0].size func = getattr(generate_utils, "generate_"+generics_utils.remove_special_chars(targetFormat.format.name), None) if func: myimage,myinstallProfile = func(myimage, builder, myinstallProfile, self.api, self.login) else: printer.out("Builder type unknown: "+format_type, printer.ERROR) return 2 if myimage is None: return 2 myimage.targetFormat = targetFormat myimage.installProfile = myinstallProfile if doArgs.simulated is not None and doArgs.simulated: myimage.simulated=True if doArgs.forced is not None and doArgs.forced: myimage.forceCheckingDeps=True rImage = self.api.Users(self.login).Appliances(myAppliance.dbId).Images().Generate(myimage) status = rImage.status statusWidget = progressbar_widget.Status() statusWidget.status = status widgets = [Bar('>'), ' ', statusWidget, ' ', ReverseBar('<')] progress = ProgressBar(widgets=widgets, maxval=100).start() while not (status.complete or status.error or status.cancelled): statusWidget.status = status progress.update(status.percentage) status = self.api.Users(self.login).Appliances(myAppliance.dbId).Images(rImage.dbId).Status.Get() time.sleep(2) statusWidget.status = status progress.finish() if status.error: printer.out("Generation '"+builder["type"]+"' error: "+status.message+"\n"+status.errorMessage, printer.ERROR) if status.detailedError: printer.out(status.detailedErrorMsg) if doArgs.junit is not None: test.elapsed_sec=time.time() - start_time test.add_error_info("Error", status.message+"\n"+status.errorMessage) elif status.cancelled: printer.out("Generation '"+builder["type"]+"' canceled: "+status.message, printer.WARNING) if doArgs.junit is not None: test.elapsed_sec=time.time() - start_time test.add_failure_info("Canceled", status.message) else: printer.out("Generation '"+builder["type"]+"' ok", printer.OK) printer.out("Image URI: "+rImage.uri) printer.out("Image Id : "+generics_utils.extract_id(rImage.uri)) if doArgs.junit is not None: test.elapsed_sec=time.time() - start_time #the downloadUri already contains downloadKey at the end if rImage.downloadUri is not None: test.stdout=self.api._url+"/"+rImage.downloadUri i+=1 except Exception as e: if is_uforge_exception(e): print_uforge_exception(e) if doArgs.junit is not None and "test_results" in locals() and len(test_results)>0: test=test_results[len(test_results)-1] test.elapsed_sec=time.time() - start_time test.add_error_info("Error", get_uforge_exception(e)) else: raise if doArgs.junit is not None: testName = myAppliance.distributionName+" "+myAppliance.archName ts = TestSuite("Generation "+testName, test_results) with open(doArgs.junit, 'w') as f: TestSuite.to_file(f, [ts], prettyprint=False) return 0 except __HOLE__ as e: printer.out("unknown error in template json file", printer.ERROR) except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_build() except KeyboardInterrupt: printer.out("\n") if generics_utils.query_yes_no("Do you want to cancel the job ?"): if 'myAppliance' in locals() and 'rImage' in locals() and hasattr(myAppliance, 'dbId') and hasattr(rImage, 'dbId'): self.api.Users(self.login).Appliances(myAppliance.dbId).Images(rImage.dbId).Status.Cancel() else: printer.out("Impossible to cancel", printer.WARNING) else: printer.out("Exiting command") except Exception as e: print_uforge_exception(e) if doArgs.junit is not None and "test_results" in locals() and len(test_results)>0: test=test_results[len(test_results)-1] if "start_time" in locals(): elapse=time.time() - start_time else: elapse=0 test.elapsed_sec=elapse test.add_error_info("Error", get_uforge_exception(e)) else: return 2 finally: if "doArgs" in locals() and doArgs.junit is not None and "test_results" in locals() and len(test_results)>0: if "myAppliance" in locals(): testName = myAppliance.distributionName+" "+myAppliance.archName else: testName = "" ts = TestSuite("Generation "+testName, test_results) with open(doArgs.junit, 'w') as f: TestSuite.to_file(f, [ts], prettyprint=False)
KeyError
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.do_build
def import_stack(self, file, isImport, isForce, rbundles, isUseMajor): try: if isImport: printer.out("Importing template from ["+file+"] archive ...") else: if constants.TMP_WORKING_DIR in str(file): printer.out("Creating template from temporary ["+file+"] archive ...") else: printer.out("Creating template from ["+file+"] archive ...") file = open(file, "r") # The following code could not be used for the moment # appImport = applianceImport() # appImport.imported = isImport # appImport.forceRw = isForce # appImport.reuseBundles = rbundles # appImport.useMajor = isUseMajor # appImport = self.api.Users(self.login).Imports.Import(appImport) appImport = self.api.Users(self.login).Imports.Import(None, None, Imported=isImport, Force=isForce, Reusebundles=rbundles, Usemajor=isUseMajor) if appImport is None: if isImport: printer.out("error importing appliance", printer.ERROR) else: printer.out("error creating appliance", printer.ERROR) return 2 else: status = self.api.Users(self.login).Imports(appImport.dbId).Uploads.Upload(file) progress = ProgressBar(widgets=[Percentage(), Bar()], maxval=100).start() while not (status.complete or status.error): progress.update(status.percentage) status = self.api.Users(self.login).Imports(appImport.dbId).Status.Get() time.sleep(2) progress.finish() if status.error: if isImport: printer.out("Template import: "+status.message+"\n"+status.errorMessage, printer.ERROR) if status.detailedError: printer.out(status.detailedErrorMsg) else: printer.out("Template create: "+status.message+"\n"+status.errorMessage, printer.ERROR) else: if isImport: printer.out("Template import: DONE", printer.OK) else: printer.out("Template create: DONE", printer.OK) #get appliance import appImport = self.api.Users(self.login).Imports(appImport.dbId).Get() printer.out("Template URI: "+appImport.referenceUri) printer.out("Template Id : "+generics_utils.extract_id(appImport.referenceUri)) return 0 except __HOLE__ as e: printer.out("File error: "+str(e), printer.ERROR) return 2 except Exception as e: return handle_uforge_exception(e)
IOError
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.import_stack
def do_delete(self, args): try: #add arguments doParser = self.arg_delete() try: doArgs = doParser.parse_args(args.split()) except __HOLE__ as e: return #call UForge API printer.out("Searching template with id ["+doArgs.id+"] ...") myAppliance = self.api.Users(self.login).Appliances(doArgs.id).Get() if myAppliance is None or type(myAppliance) is not Appliance: printer.out("Template not found") else: table = Texttable(800) table.set_cols_dtype(["t","t","t","t","t","t","t","t","t","t"]) table.header(["Id", "Name", "Version", "OS", "Created", "Last modified", "# Imgs", "Updates", "Imp", "Shared"]) table.add_row([myAppliance.dbId, myAppliance.name, str(myAppliance.version), myAppliance.distributionName+" "+myAppliance.archName, myAppliance.created.strftime("%Y-%m-%d %H:%M:%S"), myAppliance.lastModified.strftime("%Y-%m-%d %H:%M:%S"), len(myAppliance.imageUris.uri),myAppliance.nbUpdates, "X" if myAppliance.imported else "", "X" if myAppliance.shared else ""]) print table.draw() + "\n" if doArgs.no_confirm: self.api.Users(self.login).Appliances(myAppliance.dbId).Delete() printer.out("Template deleted", printer.OK) elif generics_utils.query_yes_no("Do you really want to delete template with id "+str(myAppliance.dbId)): self.api.Users(self.login).Appliances(myAppliance.dbId).Delete() printer.out("Template deleted", printer.OK) return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_delete() except Exception as e: return handle_uforge_exception(e)
SystemExit
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.do_delete
def do_clone(self, args): try: #add arguments doParser = self.arg_clone() try: doArgs = doParser.parse_args(args.split()) except __HOLE__ as e: return #call UForge API printer.out("Clonnig template with id ["+doArgs.id+"] ...") myAppliance = appliance() myAppliance.name = doArgs.name myAppliance.version = doArgs.version rAppliance = self.api.Users(self.login).Appliances(doArgs.id).Clones.Clone(myAppliance) if type(rAppliance) is Appliance: printer.out("Clonned successfully", printer.OK) else: printer.out("Clone error", printer.ERROR) except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_clone() except Exception as e: return handle_uforge_exception(e)
SystemExit
dataset/ETHPy150Open usharesoft/hammr/src/hammr/commands/template/template.py/Template.do_clone
def make_exception(response, content, error_info=None, use_json=True): """Factory: create exception based on HTTP response code. :type response: :class:`httplib2.Response` or other HTTP response object :param response: A response object that defines a status code as the status attribute. :type content: string or dictionary :param content: The body of the HTTP error response. :type error_info: string :param error_info: Optional string giving extra information about the failed request. :type use_json: bool :param use_json: Flag indicating if ``content`` is expected to be JSON. :rtype: instance of :class:`GCloudError`, or a concrete subclass. :returns: Exception specific to the error response. """ if isinstance(content, six.binary_type): content = content.decode('utf-8') if isinstance(content, six.string_types): payload = None if use_json: try: payload = json.loads(content) except ValueError: # Expected JSON but received something else. pass if payload is None: payload = {'error': {'message': content}} else: payload = content message = payload.get('error', {}).get('message', '') errors = payload.get('error', {}).get('errors', ()) if error_info is not None: message += ' (%s)' % (error_info,) try: klass = _HTTP_CODE_TO_EXCEPTION[response.status] except __HOLE__: error = GCloudError(message, errors) error.code = response.status else: error = klass(message, errors) return error
KeyError
dataset/ETHPy150Open GoogleCloudPlatform/gcloud-python/gcloud/exceptions.py/make_exception
def get_stat(statistics_type): """ Get statistics from devfile in list of lists of words """ def filter_stat(): for x in check_output(["vnstat", "--dumpdb"]).decode("utf-8").splitlines(): if x.startswith("{};0;".format(statistics_type)): return x try: type, number, ts, rxm, txm, rxk, txk, fill = filter_stat().split(";") except OSError as e: print("Looks like you haven't installed or configured vnstat!") raise e except __HOLE__: raise RuntimeError("vnstat returned wrong output, maybe it's configured wrong or module is outdated") up = (int(txm) * 1024 + int(txk)) * 1024 down = (int(rxm) * 1024 + int(rxk)) * 1024 return { "up": up, "down": down, "total": up+down }
ValueError
dataset/ETHPy150Open ultrabug/py3status/py3status/modules/vnstat.py/get_stat
def _exif_orientation(im): """ Rotate and/or flip an image to respect the image's EXIF orientation data. """ try: exif = im._getexif() except (AttributeError, __HOLE__, KeyError, IOError): exif = None if exif: orientation = exif.get(0x0112) if orientation == 2: im = im.transpose(Image.FLIP_LEFT_RIGHT) elif orientation == 3: im = im.rotate(180) elif orientation == 4: im = im.transpose(Image.FLIP_TOP_BOTTOM) elif orientation == 5: im = im.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT) elif orientation == 6: im = im.rotate(-90) elif orientation == 7: im = im.rotate(90).transpose(Image.FLIP_LEFT_RIGHT) elif orientation == 8: im = im.rotate(90) return im
IndexError
dataset/ETHPy150Open hcarvalhoalves/django-rest-thumbnails/restthumbnails/processors.py/_exif_orientation
def save_image(image, format='JPEG', **options): """ Save a PIL image to memory and return a StringIO instance. """ destination = StringIO() if format == 'JPEG': options.setdefault('quality', 85) try: image.save(destination, format=format, optimize=1, **options) except __HOLE__: # Try again, without optimization (PIL can't optimize an image # larger than ImageFile.MAXBLOCK, which is 64k by default) pass image.save(destination, format=format, **options) if hasattr(destination, 'seek'): destination.seek(0) # FIXME: Copying to memory back and forth just because # storage expects an object which implements a `chunks` method return ContentFile(destination.read())
IOError
dataset/ETHPy150Open hcarvalhoalves/django-rest-thumbnails/restthumbnails/processors.py/save_image
@register.filter def get(d, key_name): try: value = d.get(key_name) except __HOLE__: from django.conf import settings value = settings.TEMPLATE_STRING_IF_INVALID return value #### # 'assign' from http://www.djangosnippets.org/snippets/539/
KeyError
dataset/ETHPy150Open pfibiger/grumblechat/tags/filters.py/get
def _optimize_charset(charset, fixup): # internal: optimize character set out = [] outappend = out.append charmap = [0]*256 try: for op, av in charset: if op is NEGATE: outappend((op, av)) elif op is LITERAL: charmap[fixup(av)] = 1 elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = 1 elif op is CATEGORY: # XXX: could append to charmap tail return charset # cannot compress except __HOLE__: # character set contains unicode characters return _optimize_unicode(charset, fixup) # compress character map i = p = n = 0 runs = [] runsappend = runs.append for c in charmap: if c: if n == 0: p = i n = n + 1 elif n: runsappend((p, n)) n = 0 i = i + 1 if n: runsappend((p, n)) if len(runs) <= 2: # use literal/range for p, n in runs: if n == 1: outappend((LITERAL, p)) else: outappend((RANGE, (p, p+n-1))) if len(out) < len(charset): return out else: # use bitmap data = _mk_bitmap(charmap) outappend((CHARSET, data)) return out return charset
IndexError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sre_compile.py/_optimize_charset
def _optimize_unicode(charset, fixup): # problems with optimization in Jython, forget about it for now return charset try: import array except __HOLE__: return charset charmap = [0]*65536 negate = 0 try: for op, av in charset: if op is NEGATE: negate = 1 elif op is LITERAL: charmap[fixup(av)] = 1 elif op is RANGE: for i in xrange(fixup(av[0]), fixup(av[1])+1): charmap[i] = 1 elif op is CATEGORY: # XXX: could expand category return charset # cannot compress except IndexError: # non-BMP characters return charset if negate: if sys.maxunicode != 65535: # XXX: negation does not work with big charsets return charset for i in xrange(65536): charmap[i] = not charmap[i] comps = {} mapping = [0]*256 block = 0 data = [] for i in xrange(256): chunk = tuple(charmap[i*256:(i+1)*256]) new = comps.setdefault(chunk, block) mapping[i] = new if new == block: block = block + 1 data = data + _mk_bitmap(chunk) header = [block] if _sre.CODESIZE == 2: code = 'H' else: # change this for Jython from 'I', since that will expand to # long, and cause needless complexity (or so it seems) code = 'i' # Convert block indices to byte array of 256 bytes mapping = array.array('b', mapping).tostring() # Convert byte array to word array mapping = array.array(code, mapping) assert mapping.itemsize == _sre.CODESIZE header = header + mapping.tolist() data[0:0] = header return [(BIGCHARSET, data)]
ImportError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/sre_compile.py/_optimize_unicode
def main(): p = argparse.ArgumentParser() p.add_argument('-l', '--audio-library', default='libportaudio.so') p.add_argument('filename') try: run(args=p.parse_args()) except __HOLE__: return
KeyboardInterrupt
dataset/ETHPy150Open romanz/amodem/scripts/record.py/main
@skipUnless(contrib_apps('admin', 'auth', 'sessions'), 'admin, auth or sessions not in INSTALLED_APPS') @skipUnless(user_model_quite_standard(), 'Too custom User model') def test_admin(self): """Test if synchronization can be performed via admin interface.""" path = reverse('synchro') user = User._default_manager.create_user('admin', 'mail', 'admin') self.client.login(username='admin', password='admin') # test if staff status is required response = self.client.get(path) try: self.assertTemplateUsed(response, 'admin/login.html') except __HOLE__: # Django >= 1.7 self.assertIn('location', response._headers) self.assertIn('/admin/login/', response._headers['location'][1]) user.is_staff = True user.save() # superuser self.assertTemplateUsed(self.client.get(path), 'synchro.html') # actual synchronization self.reset() TestModel.objects.create(name='James', cash=7) self.assertRemoteCount(0, TestModel) self.client.post(path, {'synchro': True}) # button clicked self.assertRemoteCount(1, TestModel) # resetting self.assertGreater(ChangeLog.objects.count(), 0) self.client.post(path, {'reset': True}) # button clicked self.assertEqual(ChangeLog.objects.count(), 0)
AssertionError
dataset/ETHPy150Open zlorf/django-synchro/synchro/tests.py/SimpleSynchroTests.test_admin
def test_manager_class(self): """Test if NaturalManager works.""" self.assertIsInstance(ModelWithKey.objects, NaturalManager) self.assertIsInstance(ModelWithKey.another_objects, NaturalManager) # Test if it subclasses user manager as well self.assertIsInstance(ModelWithKey.objects, CustomManager) self.assertIsInstance(ModelWithKey.another_objects, CustomManager) self.assertEqual('bar', ModelWithKey.objects.foo()) self.assertEqual('bar', ModelWithKey.another_objects.foo()) # Check proper MRO: NaturalManager, user manager, Manager self.assertTrue(hasattr(ModelWithKey.objects, 'get_by_natural_key')) self.assertTrue(hasattr(ModelWithKey.another_objects, 'get_by_natural_key')) self.assertEqual('Not a single object!', ModelWithKey.objects.none()) self.assertEqual('Not a single object!', ModelWithKey.another_objects.none()) self.assertSequenceEqual([], ModelWithKey.objects.all()) self.assertSequenceEqual([], ModelWithKey.another_objects.all()) # Test get_by_natural_key obj = ModelWithKey.objects.create(name='James') self.assertEqual(obj.pk, ModelWithKey.objects.get_by_natural_key('James').pk) self.assertEqual(obj.pk, ModelWithKey.another_objects.get_by_natural_key('James').pk) # Test instantiating (DJango #13313: manager must be instantiable without arguments) try: ModelWithKey.objects.__class__() ModelWithKey.another_objects.__class__() except __HOLE__: self.fail('Cannot instantiate.') # Test if class checking occurs def wrong(): class BadManager: pass class X(models.Model): x = models.IntegerField() objects = NaturalManager('x', manager=BadManager) self.assertRaises(ValidationError, wrong) # User manager must subclass Manager # Test if manager without fields raises exception def wrong2(): class X(models.Model): x = models.IntegerField() objects = NaturalManager() self.assertRaises(AssertionError, wrong2)
TypeError
dataset/ETHPy150Open zlorf/django-synchro/synchro/tests.py/AdvancedSynchroTests.test_manager_class
def testFitDiscretePowerLaw2(self): try: import networkx except __HOLE__: logging.debug("Networkx not found, can't run test") return nxGraph = networkx.barabasi_albert_graph(1000, 2) graph = SparseGraph.fromNetworkXGraph(nxGraph) degreeSeq = graph.outDegreeSequence() output = Util.fitDiscretePowerLaw(degreeSeq)
ImportError
dataset/ETHPy150Open charanpald/APGL/apgl/util/test/UtilTest.py/UtilTest.testFitDiscretePowerLaw2
def testPowerEigs(self): n = 10 numRuns = 10 for i in range(numRuns): A = numpy.random.rand(n, n) l, v = Util.powerEigs(A, 0.001) nptst.assert_array_almost_equal(v*l, A.dot(v), 2) u, V = numpy.linalg.eig(A) self.assertAlmostEquals(numpy.max(u), l, 2) try: nptst.assert_array_almost_equal(V[:, 0], v, 2) except __HOLE__: nptst.assert_array_almost_equal(V[:, 0], -v, 2)
AssertionError
dataset/ETHPy150Open charanpald/APGL/apgl/util/test/UtilTest.py/UtilTest.testPowerEigs
@logging_level.setter def logging_level(self, value): if value is None: value = self._default_logging_level if type(value) is str: try: level = _levelNames[value.upper()] except KeyError: raise ValueError('Unrecognized logging level: %s' % value) else: try: level = int(value) except __HOLE__: raise ValueError('Unrecognized logging level: %s' % value) self.logger.setLevel(level) return
ValueError
dataset/ETHPy150Open splunk/splunk-ref-pas-code/spikes/googledrive_addon/bin/splunklib/searchcommands/search_command.py/SearchCommand.logging_level
@property def search_results_info(self): """ Returns the search results info for this command invocation or None. The search results info object is created from the search results info file associated with the command invocation. Splunk does not pass the location of this file by default. You must request it by specifying these configuration settings in commands.conf: .. code-block:: python enableheader=true requires_srinfo=true The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The :code:`requires_srinfo` setting is false by default. Hence, you must set it. :return: :class:`SearchResultsInfo`, if :code:`enableheader` and :code:`requires_srinfo` are both :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value of :code:`None` is returned. """ if self._search_results_info is not None: return self._search_results_info try: info_path = self.input_header['infoPath'] except KeyError: return None def convert_field(field): return (field[1:] if field[0] == '_' else field).replace('.', '_') def convert_value(field, value): if field == 'countMap': split = value.split(';') value = dict((key, int(value)) for key, value in zip(split[0::2], split[1::2])) elif field == 'vix_families': value = ElementTree.fromstring(value) elif value == '': value = None else: try: value = float(value) if value.is_integer(): value = int(value) except __HOLE__: pass return value with open(info_path, 'rb') as f: from collections import namedtuple import csv reader = csv.reader(f, dialect='splunklib.searchcommands') fields = [convert_field(x) for x in reader.next()] values = [convert_value(f, v) for f, v in zip(fields, reader.next())] search_results_info_type = namedtuple("SearchResultsInfo", fields) self._search_results_info = search_results_info_type._make(values) return self._search_results_info
ValueError
dataset/ETHPy150Open splunk/splunk-ref-pas-code/spikes/googledrive_addon/bin/splunklib/searchcommands/search_command.py/SearchCommand.search_results_info
def process(self, args=argv, input_file=stdin, output_file=stdout): """ Processes search results as specified by command arguments. :param args: Sequence of command arguments :param input_file: Pipeline input file :param output_file: Pipeline output file """ self.logger.debug('%s arguments: %s' % (type(self).__name__, args)) self._configuration = None if len(args) >= 2 and args[1] == '__GETINFO__': ConfigurationSettings, operation, args, reader = self._prepare( args, input_file=None) try: self.parser.parse(args, self) except (SyntaxError, ValueError) as e: writer = csv.DictWriter(output_file, self, fieldnames=['ERROR']) writer.writerow({'ERROR': e}) self.logger.error(e) return self._configuration = ConfigurationSettings(self) writer = csv.DictWriter( output_file, self, self.configuration.keys(), mv_delimiter=',') writer.writerow(self.configuration.items()) elif len(args) >= 2 and args[1] == '__EXECUTE__': self.input_header.read(input_file) ConfigurationSettings, operation, args, reader = self._prepare( args, input_file) try: self.parser.parse(args, self) except (SyntaxError, __HOLE__) as e: from sys import exit self.messages.append("error_message", e) self.messages.write(output_file) self.logger.error(e) exit(1) self._configuration = ConfigurationSettings(self) if self.show_configuration: self.messages.append( 'info_message', '%s command configuration settings: %s' % (self.name, self._configuration)) writer = csv.DictWriter(output_file, self) self._execute(operation, reader, writer) else: file_name = path.basename(args[0]) message = ( 'Command {0} appears to be statically configured and static ' 'configuration is unsupported by splunklib.searchcommands. ' 'Please ensure that default/commands.conf contains this ' 'stanza: ' '[{0}] | ' 'filename = {1} | ' 'supports_getinfo = true | ' 'supports_rawargs = true | ' 'outputheader = true'.format(type(self).name, file_name)) self.messages.append('error_message', message) self.messages.write(output_file) self.logger.error(message)
ValueError
dataset/ETHPy150Open splunk/splunk-ref-pas-code/spikes/googledrive_addon/bin/splunklib/searchcommands/search_command.py/SearchCommand.process
def mod_list(only_persist=False): ''' Return a list of the loaded module names only_persist Only return the list of loaded persistent modules CLI Example: .. code-block:: bash salt '*' kmod.mod_list ''' mods = set() if only_persist: conf = _get_modules_conf() if os.path.exists(conf): try: with salt.utils.fopen(conf, 'r') as modules_file: for line in modules_file: line = line.strip() mod_name = _strip_module_name(line) if not line.startswith('#') and mod_name: mods.add(mod_name) except __HOLE__: log.error('kmod module could not open modules file at {0}'.format(conf)) else: for mod in lsmod(): mods.add(mod['module']) return sorted(list(mods))
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/kmod.py/mod_list
def find_imagemagick_configuration(): """Find out where ImageMagick is and how it was built. Return a dict of distutils extension-building arguments. """ # Easiest way: the user is telling us what to use. env_cflags = os.environ.get('SANPERA_IMAGEMAGICK_CFLAGS') env_ldflags = os.environ.get('SANPERA_IMAGEMAGICK_LDFLAGS') if env_cflags is not None or env_ldflags is not None: return dict( extra_compile_args=shlex.split(env_cflags or ''), extra_link_args=shlex.split(env_ldflags or ''), ) # Easy way: pkg-config, part of freedesktop # Note that ImageMagick ships with its own similar program `Magick-config`, # but it's just a tiny wrapper around `pkg-config`, so why it even exists # is a bit of a mystery. try: compile_args = check_output(['pkg-config', 'ImageMagick', '--cflags']).decode('utf-8') link_args = check_output(['pkg-config', 'ImageMagick', '--libs']).decode('utf-8') except __HOLE__: pass except CalledProcessError: # This means that pkg-config exists, but ImageMagick isn't registered # with it. Odd, but not worth giving up yet. pass else: return dict( extra_compile_args=shlex.split(compile_args), extra_link_args=shlex.split(link_args), ) # TODO this could use more fallback, but IM builds itself with different # names (as of some recent version, anyway) depending on quantum depth et # al. the `wand` project just brute-force searches for the one it wants. # perhaps we could do something similar. also, we only need the header # files for the build, so it would be nice to get away with only hunting # down the library for normal use. raise RuntimeError( "Can't find ImageMagick installation!\n" "If you're pretty sure you have it installed, please either install\n" "pkg-config or tell me how to find libraries on your platform." ) ################################################################################ # FFI setup
OSError
dataset/ETHPy150Open eevee/sanpera/sanpera/_api_build.py/find_imagemagick_configuration
@task() def import_feed(self, feed): from molly.apps.feeds.models import Item, vCard calendar = Calendar.from_string(urllib2.urlopen(feed.rss_url).read()) items = set() for component in calendar.walk(): if component.name == 'VEVENT': item, created = Item.objects.get_or_create(feed=feed, guid=str(component.get('UID'))) # Do not create the event if one the property is not correct, # first tries to parse DT as datetime then as date, if it still # fails, then ignore try: try: item.dt_start = vDatetime.from_ical(str( component.get('DTSTART'))) except ValueError, ve: item.dt_start = vDate.from_ical(str( component.get('DTSTART'))) if component.get('DTEND'): try: item.dt_end = vDatetime.from_ical(str( component.get('DTEND'))) except __HOLE__, ve: item.dt_end = vDate.from_ical(str( component.get('DTEND'))) item.title = vText.from_ical(str( component.get('SUMMARY')).strip()) if component.get('URL'): item.link = str(component.get('URL')) if component.get('DESCRIPTION'): item.description = sanitise_html(vText.from_ical(str( component.get('DESCRIPTION')))) if str(component.get('LOCATION')) != '': location, created = vCard.objects.get_or_create( name=vText.from_ical(str( component.get('LOCATION')).strip())) # in the future, we could imagine to (try to) geocode # the location to get a point field... location.save() item.venue = location try: item.last_modified = vDatetime.from_ical(str( component.get('LAST-MODIFIED'))) except Exception, e: item.last_modified = datetime.now() item.save() items.add(item) except ValueError, v: logger.error('Could not parse event %s' % v) for item in Item.objects.filter(feed=feed): if item not in items: item.delete() return items
ValueError
dataset/ETHPy150Open mollyproject/mollyproject/molly/apps/feeds/providers/ical.py/ICalFeedsProvider.import_feed
def attach(self, path): self.__path = path entry = None try: if type(path) is list: mtime = 0 for entry in path: entryTime = os.stat(entry).st_mtime if entryTime > mtime: mtime = entryTime self.mtime = mtime else: entry = path self.mtime = os.stat(entry).st_mtime except __HOLE__ as oserr: raise UserError("Invalid item path: %s" % entry) return self
OSError
dataset/ETHPy150Open zynga/jasy/jasy/item/Abstract.py/AbstractItem.attach
def visit_extract(self, extract, **kw): try: return "CAST(STRFTIME('%s', %s) AS INTEGER)" % ( self.extract_map[extract.field], self.process(extract.expr, **kw) ) except __HOLE__: raise exc.CompileError( "%s is not a valid extract argument." % extract.field)
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/dialects/sqlite/base.py/SQLiteCompiler.visit_extract
def set_isolation_level(self, connection, level): try: isolation_level = self._isolation_lookup[level.replace('_', ' ')] except __HOLE__: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) cursor = connection.cursor() cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level) cursor.close()
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/dialects/sqlite/base.py/SQLiteDialect.set_isolation_level
def _resolve_type_affinity(self, type_): """Return a data type from a reflected column, using affinity tules. SQLite's goal for universal compatibility introduces some complexity during reflection, as a column's defined type might not actually be a type that SQLite understands - or indeed, my not be defined *at all*. Internally, SQLite handles this with a 'data type affinity' for each column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER', 'REAL', or 'NONE' (raw bits). The algorithm that determines this is listed in http://www.sqlite.org/datatype3.html section 2.1. This method allows SQLAlchemy to support that algorithm, while still providing access to smarter reflection utilities by regcognizing column definitions that SQLite only supports through affinity (like DATE and DOUBLE). """ match = re.match(r'([\w ]+)(\(.*?\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = '' args = '' if coltype in self.ischema_names: coltype = self.ischema_names[coltype] elif 'INT' in coltype: coltype = sqltypes.INTEGER elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype: coltype = sqltypes.TEXT elif 'BLOB' in coltype or not coltype: coltype = sqltypes.NullType elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype: coltype = sqltypes.REAL else: coltype = sqltypes.NUMERIC if args is not None: args = re.findall(r'(\d+)', args) try: coltype = coltype(*[int(a) for a in args]) except __HOLE__: util.warn( "Could not instantiate type %s with " "reflected arguments %s; using no arguments." % (coltype, args)) coltype = coltype() else: coltype = coltype() return coltype
TypeError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/dialects/sqlite/base.py/SQLiteDialect._resolve_type_affinity
def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol): # sqlite won't return rcol if the table was created with REFERENCES # <tablename>, no col if rcol is None: rcol = lcol if self._broken_fk_pragma_quotes: rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) try: fk = fks[numerical_id] except __HOLE__: fk = { 'name': None, 'constrained_columns': [], 'referred_schema': None, 'referred_table': rtbl, 'referred_columns': [], } fkeys.append(fk) fks[numerical_id] = fk if lcol not in fk['constrained_columns']: fk['constrained_columns'].append(lcol) if rcol not in fk['referred_columns']: fk['referred_columns'].append(rcol) return fk
KeyError
dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/sqlalchemy/dialects/sqlite/base.py/SQLiteDialect._parse_fk
def runTest(self, server, test, sslVer): time.sleep(self.opt.pause) try: sslVer = test.getSSLVersion() if test.getSSLVersion() else sslVer ctx = SSL.Context(sslVer) ctx.use_privatekey_file(test.getKeyPath()) ctx.use_certificate_chain_file(test.getPemPath()) ctx.set_cipher_list(test.getCipherSuite()) server.set_context(ctx) except (__HOLE__, SSL.Error): return None rlist = [server] cont = True while (cont): try: if (VERBOSE): self.opt.log("Awaiting connection...") r, _, _ = select.select(rlist, [], []) except Exception as e: self.opt.log(str(e)) break for conn in r: if (conn == server): cli, _ = server.accept() rlist = [cli] elif (conn is not None): try: conn.recv(1024) connected = True except (SSL.WantReadError, SSL.WantWriteError, SSL.WantX509LookupError): if (VERBOSE): self.opt.log(str(e)) continue except (SSL.ZeroReturnError, SSL.Error) as e: if (VERBOSE): self.opt.log(str(e)) connected = False cont = False else: cont = False try: cli.shutdown() except SSL.Error as e: if (VERBOSE): self.opt.log(str(e)) return connected == test.getTestType()
ValueError
dataset/ETHPy150Open yymax/x509test/src/TestServer.py/TestServer.runTest
def __call__(self, python_proxy, ): prototype = getattr(winfuncs, self.func_name + "Prototype") params = getattr(winfuncs, self.func_name + "Params") python_proxy.prototype = prototype python_proxy.params = params python_proxy.errcheck = self.error_check params_name = [param[1] for param in params] if (self.error_check.__doc__): doc = python_proxy.__doc__ doc = doc if doc else "" python_proxy.__doc__ = doc + "\nErrcheck:\n " + self.error_check.__doc__ def generate_ctypes_function(): try: c_prototyped = prototype((self.func_name, getattr(ctypes.windll, self.APIDLL)), params) except (__HOLE__, WindowsError): raise ExportNotFound(self.func_name, self.APIDLL) c_prototyped.errcheck = self.error_check self._cprototyped = c_prototyped def perform_call(*args): if len(params_name) != len(args): print("ERROR:") print("Expected params: {0}".format(params_name)) print("Just Got params: {0}".format(args)) raise ValueError("I do not have all parameters: how is that possible ?") for param_name, param_value in zip(params_name, args): if param_value is NeededParameter: raise TypeError("{0}: Missing Mandatory parameter <{1}>".format(self.func_name, param_name)) if self._cprototyped is None: generate_ctypes_function() return self._cprototyped(*args) setattr(python_proxy, "ctypes_function", perform_call) setattr(python_proxy, "force_resolution", generate_ctypes_function) return python_proxy
AttributeError
dataset/ETHPy150Open hakril/PythonForWindows/windows/winproxy.py/ApiProxy.__call__
def force_resolution(self): try: c_prototyped = self.prototype((self.func_name, getattr(ctypes.windll, self.dll_name)), self.args) except __HOLE__: raise ExportNotFound(self.func_name, self.dll_name) c_prototyped.errcheck = functools.wraps(self.error_check)(functools.partial(self.error_check, self.func_name)) self._ctypes_function = c_prototyped
AttributeError
dataset/ETHPy150Open hakril/PythonForWindows/windows/winproxy.py/TransparentApiProxy.force_resolution
def __calc_timezone(self): # Set self.timezone by using time.tzname. # Do not worry about possibility of time.tzname[0] == timetzname[1] # and time.daylight; handle that in strptime . try: time.tzset() except __HOLE__: pass no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()]) if time.daylight: has_saving = frozenset([time.tzname[1].lower()]) else: has_saving = frozenset() self.timezone = (no_saving, has_saving)
AttributeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_strptime.py/LocaleTime.__calc_timezone
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a time struct based on the input string and the format string.""" global _TimeRE_cache, _regex_cache with _cache_lock: if _getlang() != _TimeRE_cache.locale_time.lang: _TimeRE_cache = TimeRE() _regex_cache.clear() if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: format_regex = _TimeRE_cache.compile(format) # KeyError raised when a bad format is found; can be specified as # \\, in which case it was a stray % but with a space after it except __HOLE__, err: bad_directive = err.args[0] if bad_directive == "\\": bad_directive = "%" del err raise ValueError("'%s' is a bad directive in format '%s'" % (bad_directive, format)) # IndexError only occurs when the format string is "%" except IndexError: raise ValueError("stray %% in format '%s'" % format) _regex_cache[format] = format_regex found = format_regex.match(data_string) if not found: raise ValueError("time data %r does not match format %r" % (data_string, format)) if len(data_string) != found.end(): raise ValueError("unconverted data remains: %s" % data_string[found.end():]) year = 1900 month = day = 1 hour = minute = second = fraction = 0 tz = -1 # Default to -1 to signify that values not known; not critical to have, # though week_of_year = -1 week_of_year_start = -1 # weekday and julian defaulted to -1 so as to signal need to calculate # values weekday = julian = -1 found_dict = found.groupdict() for group_key in found_dict.iterkeys(): # Directives not explicitly handled below: # c, x, X # handled by making out of other directives # U, W # worthless without day of the week if group_key == 'y': year = int(found_dict['y']) # Open Group specification for strptime() states that a %y #value in the range of [00, 68] is in the century 2000, while #[69,99] is in the century 1900 if year <= 68: year += 2000 else: year += 1900 elif group_key == 'Y': year = int(found_dict['Y']) elif group_key == 'm': month = int(found_dict['m']) elif group_key == 'B': month = locale_time.f_month.index(found_dict['B'].lower()) elif group_key == 'b': month = locale_time.a_month.index(found_dict['b'].lower()) elif group_key == 'd': day = int(found_dict['d']) elif group_key == 'H': hour = int(found_dict['H']) elif group_key == 'I': hour = int(found_dict['I']) ampm = found_dict.get('p', '').lower() # If there was no AM/PM indicator, we'll treat this like AM if ampm in ('', locale_time.am_pm[0]): # We're in AM so the hour is correct unless we're # looking at 12 midnight. # 12 midnight == 12 AM == hour 0 if hour == 12: hour = 0 elif ampm == locale_time.am_pm[1]: # We're in PM so we need to add 12 to the hour unless # we're looking at 12 noon. # 12 noon == 12 PM == hour 12 if hour != 12: hour += 12 elif group_key == 'M': minute = int(found_dict['M']) elif group_key == 'S': second = int(found_dict['S']) elif group_key == 'f': s = found_dict['f'] # Pad to always return microseconds. s += "0" * (6 - len(s)) fraction = int(s) elif group_key == 'A': weekday = locale_time.f_weekday.index(found_dict['A'].lower()) elif group_key == 'a': weekday = locale_time.a_weekday.index(found_dict['a'].lower()) elif group_key == 'w': weekday = int(found_dict['w']) if weekday == 0: weekday = 6 else: weekday -= 1 elif group_key == 'j': julian = int(found_dict['j']) elif group_key in ('U', 'W'): week_of_year = int(found_dict[group_key]) if group_key == 'U': # U starts week on Sunday. week_of_year_start = 6 else: # W starts week on Monday. week_of_year_start = 0 elif group_key == 'Z': # Since -1 is default value only need to worry about setting tz if # it can be something other than -1. found_zone = found_dict['Z'].lower() for value, tz_values in enumerate(locale_time.timezone): if found_zone in tz_values: # Deal with bad locale setup where timezone names are the # same and yet time.daylight is true; too ambiguous to # be able to tell what timezone has daylight savings if (time.tzname[0] == time.tzname[1] and time.daylight and found_zone not in ("utc", "gmt")): break else: tz = value break # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. if julian == -1 and week_of_year != -1 and weekday != -1: week_starts_Mon = True if week_of_year_start == 0 else False julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, week_starts_Mon) # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the week # calculation. if julian == -1: # Need to add 1 to result since first day of the year is 1, not 0. julian = datetime_date(year, month, day).toordinal() - \ datetime_date(year, 1, 1).toordinal() + 1 else: # Assume that if they bothered to include Julian day it will # be accurate. datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal()) year = datetime_result.year month = datetime_result.month day = datetime_result.day if weekday == -1: weekday = datetime_date(year, month, day).weekday() return (time.struct_time((year, month, day, hour, minute, second, weekday, julian, tz)), fraction)
KeyError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/_strptime.py/_strptime
def _import_module(module_label, classnames): """ Imports the module with the given name. Returns None if the module doesn't exist, but propagates any import errors. """ try: return __import__(module_label, fromlist=classnames) except __HOLE__: # There are 2 reasons why there could be an ImportError: # # 1. Module does not exist. In that case, we ignore the import and # return None # 2. Module exists but another ImportError occurred when trying to # import the module. In that case, it is important to propagate the # error. # # ImportError does not provide easy way to distinguish those two cases. # Fortunately, the traceback of the ImportError starts at __import__ # statement. If the traceback has more than one frame, it means that # application was found and ImportError originates within the local app __, __, exc_traceback = sys.exc_info() frames = traceback.extract_tb(exc_traceback) if len(frames) > 1: raise
ImportError
dataset/ETHPy150Open django-oscar/django-oscar/src/oscar/core/loading.py/_import_module
def create_model_instance(self, data): obj = super(SessionStore, self).create_model_instance(data) try: account_id = int(data.get('_auth_user_id')) except (ValueError, __HOLE__): account_id = None obj.account_id = account_id return obj
TypeError
dataset/ETHPy150Open django/django/tests/sessions_tests/models.py/SessionStore.create_model_instance
@staticmethod def validateInfo(doc): """ Ensures we have the necessary information to connect to HDFS instance, and uses snakebite to actually connect to it. """ info = doc.get('hdfs', {}) for field in ('host', 'port', 'path', 'webHdfsPort', 'user'): if field not in info: raise ValidationException('Missing %s field.' % field) if not info['webHdfsPort']: info['webHdfsPort'] = 50070 try: info['webHdfsPort'] = int(info['webHdfsPort']) info['port'] = int(info['port']) except __HOLE__: raise ValidationException('Port values must be numeric.', field='port') try: client = HdfsAssetstoreAdapter._getClient(doc) client.serverdefaults() except Exception: raise ValidationException('Could not connect to HDFS at %s:%d.' % (info['host'], info['port'])) # TODO test connection to webHDFS? Not now since it's not required if not posixpath.isabs(info['path']): raise ValidationException('Path must be absolute.', field='path') if not client.test(info['path'], exists=True, directory=True): res = client.mkdir([info['path']], create_parent=True).next() if not res['result']: raise ValidationException(res['error'], field='path') return doc
ValueError
dataset/ETHPy150Open girder/girder/plugins/hdfs_assetstore/server/assetstore.py/HdfsAssetstoreAdapter.validateInfo
@wq.command() @click.argument('source') @click.argument('source_options', required=False) @click.option('--format', '-f', default='csv', help='Output format') def cat(source, source_options, format): """ Display contents of a file or wq.io class. SOURCE can be either a filename or a Python path. SOURCE_OPTIONS is an optional string specifying init options in "name=value" format, separated by commas. The data will be printed to the terminal in CSV form, unless the format is set to JSON. Examples: \b wq cat example.json wq cat example.xlsx "start_row=5" wq cat wq.io.CsvNetIO "url=http://example.com/example.csv" """ # Parse option string options = {} if source_options: for opt in source_options.split(','): key, val = opt.split('=') if val.isdigit(): val = int(val) options[key] = val if os.path.exists(source): try: input = load_file(source, options=options) except IoException as e: raise click.ClickException(str(e)) else: parts = source.split('.') class_name = parts[-1] module_name = ".".join(parts[:-1]) try: module = importlib.import_module(module_name) IO = getattr(module, class_name) input = flattened(IO, **options) except (ImportError, __HOLE__, AttributeError, IoException) as e: raise click.ClickException(str(e)) if format == "json": OutputIO = JsonStringIO init = "[]" else: OutputIO = CsvStringIO init = "" output = OutputIO(data=input.data, string=init) output.data = input.data output.save() result = output.string if output.binary: result = result.decode('utf-8') print(result)
ValueError
dataset/ETHPy150Open wq/wq.io/commands.py/cat
def __getitem__(self, key): if key is None: return self elif isinstance(key, list) or isinstance(key, tuple): if len(key) > 1: return self.__getitem__(key[0]).__getitem__(key[1:]) # start a recursion elif len(key) == 1: return self.__getitem__(key[0]) else: return self # theme[][key] returns theme[key] else: try: return dict.__getitem__(self, key) except __HOLE__: if self.parent is not None: return self.parent.__getitem__(key) else: raise
KeyError
dataset/ETHPy150Open jorgecarleitao/pyglet-gui/pyglet_gui/theme/theme.py/ScopedDict.__getitem__
def get_test_runner(): """Get a test runner for the tests. Uses nose if available.""" result = [sys.executable] if USE_NOSE: try: import nose except __HOLE__: result = [sys.executable] else: result = ['nosetests'] return result
ImportError
dataset/ETHPy150Open enthought/mayavi/mayavi/tests/runtests.py/get_test_runner
def find_tests(tests): """Find test files given list of arguments which may be files, directories or modules.""" files = [] for test in tests: if isfile(test): files.append(test) elif isdir(test): files.extend(get_tests_in_dir(test)) else: # A module. try: # Import the module components = test.split('.') if len(components) > 1: modname = '.'.join(components[:-1]) symbol = components[-1] mod = __import__(modname, globals(), locals(), [symbol]) s = getattr(mod, symbol) d = dirname(s.__file__) else: modname = components[0] mod = __import__(modname, globals(), locals(), []) d = dirname(mod.__file__) files.extend(get_tests_in_dir(d)) except __HOLE__: msg = 'Warning: %s is neither a file/directory or '\ 'module. Ignoring.'%test print(msg) return files
ImportError
dataset/ETHPy150Open enthought/mayavi/mayavi/tests/runtests.py/find_tests
def __init__(self): """Load or initialize crypto keys.""" try: with open("key", "rb") as keys_file: keys = keys_file.read() except __HOLE__: keys = None if keys: self.pkey = public.PrivateKey(keys, STORE_ENC) else: kp = public.PrivateKey.generate() with open("key", "wb") as keys_file: keys_file.write(kp.encode(STORE_ENC)) self.pkey = kp
IOError
dataset/ETHPy150Open stal888/Tox-QuickDNS/cryptocore.py/CryptoCore.__init__
def try_to_parse_request(self, request_buff): try: msg = json.loads(request_buff) return msg except __HOLE__: return None
ValueError
dataset/ETHPy150Open lbryio/lbry/lbrynet/core/server/ServerRequestHandler.py/ServerRequestHandler.try_to_parse_request
def __getattr__(self, key): try: if not len(settings.CONFIG[key]) in (2, 3): raise AttributeError(key) default = settings.CONFIG[key][0] except __HOLE__: raise AttributeError(key) result = self._backend.get(key) if result is None: result = default setattr(self, key, default) return result return result
KeyError
dataset/ETHPy150Open jazzband/django-constance/constance/base.py/Config.__getattr__
def strip_html(text, mode='strict'): if mode == 'strict': text = filter(lambda c: ord(c) < 128, text) def fixup(m): text = m.group(0) if text[:1] == "<": return "" # ignore tags if text[:2] == "&#": try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass elif text[:1] == "&": import htmlentitydefs entity = htmlentitydefs.entitydefs.get(text[1:-1]) if entity: if entity[:2] == "&#": try: return unichr(int(entity[2:-1])) except __HOLE__: pass else: return unicode(entity, "iso-8859-1") return text # leave as is try: return re.sub("(?s)<[^>]*>|&#?\w+;", fixup, text) except UnicodeDecodeError: return re.sub("(?s)<[^>]*>|&#?\w+;", ' ', text)
ValueError
dataset/ETHPy150Open ipeirotis/Mturk-Tracker/app/utils/text.py/strip_html
def test_iter(self): "Iteration of NLMSA objects should return reasonable error." # try iterating over it try: for x in self.nlmsa: break # should fail before this assert 0, "should not be able to iterate over NLMSA" except __HOLE__: pass
NotImplementedError
dataset/ETHPy150Open cjlee112/pygr/tests/nlmsa_test.py/NLMSA_Test.test_iter
def decode_text(text, is_html=False, guess_charset=True, try_common_charsets=True, charsets=None, fallback_charset='utf-8'): if not isinstance(text, bytes): return text, None _charsets = [] if guess_charset: c = guess_text_charset(text, is_html=is_html) if c: _charsets.append(c) if charsets: _charsets.extend(charsets) if try_common_charsets: _charsets.extend(COMMON_CHARSETS) if fallback_charset: _charsets.append(fallback_charset) _last_exc = None for enc in _charsets: try: return to_unicode(text, charset=enc), enc except __HOLE__ as exc: _last_exc = exc raise _last_exc
UnicodeDecodeError
dataset/ETHPy150Open lavr/python-emails/emails/loader/helpers.py/decode_text
def getPluginDir(plugin_name): """Gets the directory of the given plugin""" filename = None try: filename = sys.modules[plugin_name].__file__ except __HOLE__: # It sometimes happens with Owner pass if filename == None: try: filename = sys.modules['supybot.plugins.' + plugin_name].__file__ except: # In the case where the plugin is not loaded by Supybot try: filename = sys.modules['plugin'].__file__ except: filename = sys.modules['__main__'].__file__ if filename.endswith(".pyc"): filename = filename[0:-1] allowed_files = ['__init__.py', 'config.py', 'plugin.py', 'test.py'] for allowed_file in allowed_files: if filename.endswith(allowed_file): return filename[0:-len(allowed_file)] raise PluginNotFound()
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/src/i18n.py/getPluginDir
def loadLocale(self, localeName=None): """(Re)loads the locale used by this class.""" self.translations = {} if localeName is None: localeName = currentLocale self.currentLocaleName = localeName self._loadL10nCode() try: try: translationFile = open(getLocalePath(self.name, localeName, 'po'), 'ru') except ValueError: # We are using Windows translationFile = open(getLocalePath(self.name, localeName, 'po'), 'r') self._parse(translationFile) except (__HOLE__, PluginNotFound): # The translation is unavailable pass finally: if 'translationFile' in locals(): translationFile.close()
IOError
dataset/ETHPy150Open ProgVal/Limnoria/src/i18n.py/_PluginInternationalization.loadLocale
def __call__(self, untranslated): """Main function. This is the function which is called when a plugin runs _()""" normalizedUntranslated = normalize(untranslated, True) try: string = self._translate(normalizedUntranslated) return self._addTracker(string, untranslated) except __HOLE__: pass if untranslated.__class__ is InternationalizedString: return untranslated._original else: return untranslated
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/src/i18n.py/_PluginInternationalization.__call__
def _loadL10nCode(self): """Open the file containing the code specific to this locale, and load its functions.""" if self.name != 'supybot': return path = self._getL10nCodePath() try: with open(path) as fd: exec(compile(fd.read(), path, 'exec')) except __HOLE__: # File doesn't exist pass functions = locals() functions.pop('self') self._l10nFunctions = functions # Remove old functions and come back to the native language
IOError
dataset/ETHPy150Open ProgVal/Limnoria/src/i18n.py/_PluginInternationalization._loadL10nCode
def internationalizeDocstring(obj): """Decorates functions and internationalize their docstring. Only useful for commands (commands' docstring is displayed on IRC)""" if obj.__doc__ == None: return obj plugin_module = sys.modules[obj.__module__] if '_' in plugin_module.__dict__: internationalizedCommands.update({hash(obj): obj}) try: obj.__doc__ = plugin_module._.__call__(obj.__doc__) # We use _.__call__() instead of _() because of a pygettext warning. except __HOLE__: # attribute '__doc__' of 'type' objects is not writable pass return obj
AttributeError
dataset/ETHPy150Open ProgVal/Limnoria/src/i18n.py/internationalizeDocstring
def get_field(d, key): try: rval = d[key] except __HOLE__: reraise_as(ValueError('Could not access "'+key+'" in \n'+str(d))) return rval
KeyError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/config/old_config.py/get_field
def resolve(d): """ given a dictionary d, returns the object described by the dictionary """ tag = get_tag(d) try: resolver = resolvers[tag] except __HOLE__: reraise_as(TypeError('config does not know of any object type "'+tag+'"')) return resolver(d)
KeyError
dataset/ETHPy150Open lisa-lab/pylearn2/pylearn2/config/old_config.py/resolve
def __call_internal__(self, *args, **kwargs): """ Override the call to be able to scale automatically the axis. """ self.source = self._source_function(*args, **kwargs) kwargs.pop('name', None) # Deal with both explicit warp scale and extent, this is # slightly hairy. The wigner example is a good test case for # this. if not 'warp_scale' in kwargs and not 'extent' in kwargs: try: xi, xf, yi, yf, _, _ = self.source.data.bounds zi, zf = self.source.data.scalar_range except __HOLE__: xi, xf, yi, yf, _, _ = self.source.image_data.bounds zi, zf = self.source.image_data.scalar_range aspect_ratios = [(zf - zi) / (xf - xi), (zf - zi) / (yf - yi)] if min(aspect_ratios) < 0.01 or max(aspect_ratios) > 100: print('Warning: the range of your scalar values differs by ' \ 'more than a factor 100 than the range of the grid values ' \ 'and you did not '\ 'specify a warp_scale. You could try warp_scale="auto".') if 'warp_scale' in kwargs and not kwargs['warp_scale'] == 'auto' \ and 'extent' in kwargs: # XXX: I should use the logging module. print('Warning: both warp_scale and extent keyword argument ' \ 'specified, the z bounds of the extents will be overridden') xi, xf, yi, yf, zi, zf = kwargs['extent'] zo = 0.5 * (zi + zf) try: si, sf = self.source.data.scalar_range except AttributeError: si, sf = self.source.image_data.scalar_range z_span = kwargs['warp_scale'] * abs(sf - si) zi = zo + si * kwargs['warp_scale'] zf = zi + z_span kwargs['extent'] = (xi, xf, yi, yf, zi, zf) kwargs['warp_scale'] = 1 elif kwargs.get('warp_scale', 1) == 'auto': if 'extent' in kwargs: if 'warp_scale' in kwargs: print("Warning: extent specified, warp_scale='auto' " \ "ignored.") else: try: xi, xf, yi, yf, _, _ = self.source.data.bounds zi, zf = self.source.data.scalar_range except AttributeError: xi, xf, yi, yf, _, _ = self.source.image_data.bounds zi, zf = self.source.image_data.scalar_range z0 = zf - zi dz = 0.3 * ((xf - xi) + (yf - yi)) zi = z0 - 0.5 * dz zf = z0 + 0.5 * dz kwargs['extent'] = (xi, xf, yi, yf, zi, zf) kwargs['warp_scale'] = 1. self.store_kwargs(kwargs) # Copy the pipeline so as not to modify it for the next call self.pipeline = self._pipeline[:] return self.build_pipeline()
AttributeError
dataset/ETHPy150Open enthought/mayavi/mayavi/tools/helper_functions.py/Surf.__call_internal__
def get_graph_data(self,obj): date_start = obj.date_deliver_start.replace(minute=0,second=0,microsecond=0) try: from django.utils import timezone except __HOLE__: now = datetime.datetime.now() else: now = timezone.now() opened_serie = [] for i in range(336): t = date_start + datetime.timedelta(hours=i) count_opened = obj.mails.exclude(viewed=None).filter(viewed__lt=t).count() opened_serie.append('[%s000,%s]' % (t.strftime('%s'),count_opened)) if t > now: break return { 'opened_serie': ','.join(opened_serie), }
ImportError
dataset/ETHPy150Open allink/pennyblack/pennyblack/models/job.py/JobStatisticAdmin.get_graph_data
def construct_mapping(self, node, deep=False): if isinstance(node, yaml.MappingNode): self.flatten_mapping(node) else: raise yaml.constructor.ConstructorError(None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark) mapping = OrderedDict() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) try: hash(key) except __HOLE__, exc: raise yaml.constructor.ConstructorError('while constructing a mapping', node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark) value = self.construct_object(value_node, deep=deep) mapping[key] = value return mapping
TypeError
dataset/ETHPy150Open mongolab/dex/dex/utils.py/OrderedDictYAMLLoader.construct_mapping
def __cacheit(maxsize): """caching decorator. important: the result of cached function must be *immutable* Examples ======== >>> from sympy.core.cache import cacheit >>> @cacheit ... def f(a, b): ... return a+b >>> @cacheit ... def f(a, b): ... return [a, b] # <-- WRONG, returns mutable object to force cacheit to check returned results mutability and consistency, set environment variable SYMPY_USE_CACHE to 'debug' """ def func_wrapper(func): cfunc = lru_cache(maxsize, typed=True)(func) # wraps here does not propagate all the necessary info # for py2.7, use update_wrapper below def wrapper(*args, **kwargs): try: retval = cfunc(*args, **kwargs) except __HOLE__: retval = func(*args, **kwargs) return retval wrapper.cache_info = cfunc.cache_info wrapper.cache_clear = cfunc.cache_clear # Some versions of update_wrapper erroneously assign the final # function of the wrapper chain to __wrapped__, see # https://bugs.python.org/issue17482 . # To work around this, we need to call update_wrapper first, then # assign to wrapper.__wrapped__. update_wrapper(wrapper, func) wrapper.__wrapped__ = cfunc.__wrapped__ CACHE.append(wrapper) return wrapper return func_wrapper
TypeError
dataset/ETHPy150Open sympy/sympy/sympy/core/cache.py/__cacheit
@property def icon(self): try: return "{} {}".format(VELLUM_TYPES[self.type]['icon'], VELLUM_TYPES[self.type]['icon_bs3']) except __HOLE__: return 'icon-question-sign'
KeyError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/reports/formdetails/readable.py/FormQuestion.icon
def get_form(self, form_id): try: form = next(form for form in self.forms if form.form_id == form_id) except __HOLE__: form = CaseFormMeta(form_id=form_id) self.forms.append(form) return form
StopIteration
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/reports/formdetails/readable.py/CaseProperty.get_form
def get_property(self, name, allow_parent=False): if not allow_parent: assert '/' not in name, "Add parent properties to the correct case type" try: prop = next(prop for prop in self.properties if prop.name == name) except __HOLE__: prop = CaseProperty(name=name) self.properties.append(prop) return prop
StopIteration
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/reports/formdetails/readable.py/CaseTypeMeta.get_property
def get_type(self, name): if not name: return CaseTypeMeta(name='') try: type_ = next(type_ for type_ in self.case_types if type_.name == name) except __HOLE__: type_ = CaseTypeMeta(name=name) self.case_types.append(type_) return type_
StopIteration
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/reports/formdetails/readable.py/AppCaseMetadata.get_type
def pop_from_form_data(relative_data, absolute_data, path): path = path.split('/') if path and path[0] == '': data = absolute_data # path[:2] will be ['', 'data'] so remove path = path[2:] else: data = relative_data while path and data: key, path = path[0], path[1:] try: if path: data = data[key] elif isinstance(data, dict): return data.pop(key) else: return None except __HOLE__: return None
KeyError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/reports/formdetails/readable.py/pop_from_form_data
def getKeys(self): bits = 0 try: packet = SerialGamePad._generateHeader(CMDTYPE.GET_BTNS, 0) self._com.write(packet) resp = self._com.read(1) if len(resp) == 0: SerialGamePad._comError() elif ord(resp) != RETURN_CODES.SUCCESS: SerialGamePad._printError(ord(resp)) resp = self._com.read(2) if len(resp) != 2: SerialGamePad._comError() bits = ord(resp[0]) + (ord(resp[1]) << 8) except __HOLE__: log.error("IO Error Communicatng With Game Pad!") index = 0 result = {} for m in self._map: result[m] = (bits & (1 << index) > 0) index += 1 return d(result)
IOError
dataset/ETHPy150Open ManiacalLabs/BiblioPixel/bibliopixel/serial_gamepad.py/SerialGamePad.getKeys
def sync_fuzz(self, requests, delay=0, follow_cookies=True): ''' This is the synchronous fuzzing engine. Useful for fuzzing with delays and fuzzing that follows cookies''' self.reset() http_client = HTTPClient() cookie = None for request in requests: try: if follow_cookies and cookie: request.headers = HTTPHelper.add_header_param( request.header, "Cookie", cookie) response = http_client.fetch(request) except __HOLE__ as e: if e.response: response = e.response self.responses.append(response) if follow_cookies: if "Set-Cookie" in response.headers: cookie = response.headers["Set-Cookie"] if delay: sleep(delay) return self.responses
HTTPError
dataset/ETHPy150Open owtf/owtf/framework/http/wafbypasser/core/fuzzer.py/Fuzzer.sync_fuzz
def get_plugins_by_name(self, *names): """ Return a list of plugins by plugin class, or name. """ self._import_plugins() plugin_instances = [] for name in names: if isinstance(name, six.string_types): try: plugin_instances.append(self.plugins[name.lower()]) except __HOLE__: raise PluginNotFound("No plugin named '{0}'.".format(name)) elif issubclass(name, ContentPlugin): # Will also allow classes instead of strings. plugin_instances.append(self.plugins[self._name_for_model[name.model]]) else: raise TypeError("get_plugins_by_name() expects a plugin name or class, not: {0}".format(name)) return plugin_instances
KeyError
dataset/ETHPy150Open edoburu/django-fluent-contents/fluent_contents/extensions/pluginpool.py/PluginPool.get_plugins_by_name
def get_plugin_by_model(self, model_class): """ Return the corresponding plugin for a given model. You can also use the :attr:`ContentItem.plugin <fluent_contents.models.ContentItem.plugin>` property directly. This is the low-level function that supports that feature. """ self._import_plugins() # could happen during rendering that no plugin scan happened yet. assert issubclass(model_class, ContentItem) # avoid confusion between model instance and class here! try: name = self._name_for_model[model_class] except __HOLE__: raise PluginNotFound("No plugin found for model '{0}'.".format(model_class.__name__)) return self.plugins[name]
KeyError
dataset/ETHPy150Open edoburu/django-fluent-contents/fluent_contents/extensions/pluginpool.py/PluginPool.get_plugin_by_model
def _get_plugin_by_content_type(self, contenttype): self._import_plugins() self._setup_lazy_indexes() ct_id = contenttype.id if isinstance(contenttype, ContentType) else int(contenttype) try: name = self._name_for_ctype_id[ct_id] except __HOLE__: # ContentType not found, likely a plugin is no longer registered or the app has been removed. try: # ContentType could be stale ct = contenttype if isinstance(contenttype, ContentType) else ContentType.objects.get_for_id(ct_id) except AttributeError: # should return the stale type but Django <1.6 raises an AttributeError in fact. ct_name = 'stale content type' else: ct_name = '{0}.{1}'.format(ct.app_label, ct.model) raise PluginNotFound("No plugin found for content type #{0} ({1}).".format(contenttype, ct_name)) return self.plugins[name]
KeyError
dataset/ETHPy150Open edoburu/django-fluent-contents/fluent_contents/extensions/pluginpool.py/PluginPool._get_plugin_by_content_type
def store(self): package, _ = Package.objects.get_or_create(name=self.name) for data in self.data.values(): try: release = Release.objects.get(package=package, version=data["version"]) except Release.DoesNotExist: release = Release(package=package, version=data["version"]) release.full_clean() release.save() # This is an extra database call but it should prevent ShareLocks Release.objects.filter(pk=release.pk).select_for_update() if release.hidden: release.hidden = False for key, value in data.iteritems(): if key in ["package", "version"]: # Short circuit package and version continue if key == "uris": ReleaseURI.objects.filter(release=release).delete() for label, uri in value.iteritems(): try: ReleaseURI.objects.get(release=release, label=label, uri=uri) except ReleaseURI.DoesNotExist: try: release_uri = ReleaseURI(release=release, label=label, uri=uri) release_uri.full_clean() release_uri.save(force_insert=True) except __HOLE__: logger.exception("%s, %s for %s-%s Invalid Data" % (label, uri, release.package.name, release.version)) elif key == "classifiers": release.classifiers.clear() for classifier in value: try: trove = TroveClassifier.objects.get(trove=classifier) except TroveClassifier.DoesNotExist: trove = TroveClassifier(trove=classifier) trove.full_clean() trove.save(force_insert=True) release.classifiers.add(trove) elif key in ["requires", "provides", "obsoletes"]: model = {"requires": ReleaseRequire, "provides": ReleaseProvide, "obsoletes": ReleaseObsolete}.get(key) model.objects.filter(release=release).delete() for item in value: try: model.objects.get(release=release, **item) except model.DoesNotExist: m = model(release=release, **item) m.full_clean() m.save(force_insert=True) elif key == "files": files = ReleaseFile.objects.filter(release=release) filenames = dict([(x.filename, x) for x in files]) for f in value: try: rf = ReleaseFile.objects.get( release=release, type=f["type"], filename=f["filename"], python_version=f["python_version"], ) for k, v in f.iteritems(): if k in ["digests", "file", "filename", "type", "python_version"]: continue setattr(rf, k, v) rf.hidden = False rf.full_clean() rf.save() except ReleaseFile.DoesNotExist: rf = ReleaseFile( release=release, type=f["type"], filename=f["filename"], python_version=f["python_version"], **dict([(k, v) for k, v in f.iteritems() if k not in ["digests", "file", "filename", "type", "python_version"]]) ) rf.hidden = False rf.full_clean() rf.save() if f["filename"] in filenames.keys(): del filenames[f["filename"]] if filenames: for rf in ReleaseFile.objects.filter(pk__in=[f.pk for f in filenames.values()]): rf.hidden = True rf.save() else: setattr(release, key, value) while True: try: release.full_clean() except ValidationError as e: if "download_uri" in e.message_dict: release.download_uri = "" logger.exception("%s-%s Release Validation Error %s" % (release.package.name, release.version, str(e.message_dict))) else: raise else: break release.save() # Mark unsynced as deleted when bulk processing if self.bulk: for release in Release.objects.filter(package=package).exclude(version__in=self.data.keys()): release.hidden = True release.save() self.stored = True
ValidationError
dataset/ETHPy150Open crate-archive/crate-site/crateweb/apps/pypi/processor.py/PyPIPackage.store
def download(self): # Check to Make sure fetch has been ran if not hasattr(self, "releases") or not hasattr(self, "release_data") or not hasattr(self, "release_url_data"): raise Exception("fetch and build must be called prior to running download") # @@@ Make a Custom Exception # Check to Make sure build has been ran if not hasattr(self, "data"): raise Exception("build must be called prior to running download") # @@@ Make a Custom Exception if not self.stored: raise Exception("package must be stored prior to downloading") # @@@ Make a Custom Exception pypi_pages = self.verify_and_sync_pages() for data in self.data.values(): try: if pypi_pages.get("has_sig"): simple_html = lxml.html.fromstring(pypi_pages["simple"]) simple_html.make_links_absolute(urlparse.urljoin(SIMPLE_URL, data["package"]) + "/") verified_md5_hashes = {} for link in simple_html.iterlinks(): m = _md5_re.search(link[2]) if m: url, md5_hash = m.groups() verified_md5_hashes[url] = md5_hash package = Package.objects.get(name=data["package"]) release = Release.objects.filter(package=package, version=data["version"]).select_for_update() for release_file in ReleaseFile.objects.filter(release=release, filename__in=[x["filename"] for x in data["files"]]).select_for_update(): file_data = [x for x in data["files"] if x["filename"] == release_file.filename][0] if pypi_pages.get("has_sig"): if verified_md5_hashes[file_data["file"]].lower() != file_data["digests"]["md5"].lower(): raise Exception("MD5 does not match simple API md5 [Verified by ServerSig]") # @@@ Custom Exception datastore_key = "crate:pypi:download:%(url)s" % {"url": file_data["file"]} stored_file_data = self.datastore.hgetall(datastore_key) headers = None if stored_file_data and self.skip_modified: # Stored data exists for this file if release_file.file: try: release_file.file.read() except __HOLE__: pass else: # We already have a file if stored_file_data["md5"].lower() == file_data["digests"]["md5"].lower(): # The supposed MD5 from PyPI matches our local headers = { "If-Modified-Since": stored_file_data["modified"], } resp = requests.get(file_data["file"], headers=headers, prefetch=True) if resp.status_code == 304: logger.info("[DOWNLOAD] skipping %(filename)s because it has not been modified" % {"filename": release_file.filename}) return logger.info("[DOWNLOAD] downloading %(filename)s" % {"filename": release_file.filename}) resp.raise_for_status() # Make sure the MD5 of the file we receive matched what we were told it is if hashlib.md5(resp.content).hexdigest().lower() != file_data["digests"]["md5"].lower(): raise PackageHashMismatch("%s does not match %s for %s %s" % ( hashlib.md5(resp.content).hexdigest().lower(), file_data["digests"]["md5"].lower(), file_data["type"], file_data["filename"], )) release_file.digest = "$".join(["sha256", hashlib.sha256(resp.content).hexdigest().lower()]) release_file.full_clean() release_file.file.save(file_data["filename"], ContentFile(resp.content), save=False) release_file.save() Event.objects.create( package=release_file.release.package.name, version=release_file.release.version, action=Event.ACTIONS.file_add, data={ "filename": release_file.filename, "digest": release_file.digest, "uri": release_file.get_absolute_url(), } ) # Store data relating to this file (if modified etc) stored_file_data = { "md5": file_data["digests"]["md5"].lower(), "modified": resp.headers.get("Last-Modified"), } if resp.headers.get("Last-Modified"): self.datastore.hmset(datastore_key, { "md5": file_data["digests"]["md5"].lower(), "modified": resp.headers["Last-Modified"], }) # Set a year expire on the key so that stale entries disappear self.datastore.expire(datastore_key, 31556926) else: self.datastore.delete(datastore_key) except requests.HTTPError: logger.exception("[DOWNLOAD ERROR]")
IOError
dataset/ETHPy150Open crate-archive/crate-site/crateweb/apps/pypi/processor.py/PyPIPackage.download
def verify_and_sync_pages(self): # Get the Server Key for PyPI if self.datastore.get(SERVERKEY_KEY): key = load_key(self.datastore.get(SERVERKEY_KEY)) else: serverkey = requests.get(SERVERKEY_URL, prefetch=True) key = load_key(serverkey.content) self.datastore.set(SERVERKEY_KEY, serverkey.content) try: # Download the "simple" page from PyPI for this package simple = requests.get(urlparse.urljoin(SIMPLE_URL, urllib.quote(self.name)), prefetch=True) simple.raise_for_status() except requests.HTTPError: if simple.status_code == 404: return {"has_sig": False} raise try: # Download the "serversig" page from PyPI for this package serversig = requests.get(urlparse.urljoin(SERVERSIG_URL, urllib.quote(self.name)), prefetch=True) serversig.raise_for_status() except requests.HTTPError: if serversig.status_code == 404: return {"has_sig": False} raise try: if not verify(key, simple.content, serversig.content): raise Exception("Simple API page does not match serversig") # @@@ This Should be Custom Exception except (__HOLE__, UnicodeEncodeError, ValueError): logger.exception("Exception trying to verify %s" % self.name) # @@@ Figure out a better way to handle this try: package = Package.objects.get(name=self.name) except Package.DoesNotExist: logger.exception("Error Trying To Verify %s (Querying Package)" % self.name) return simple_mirror, c = PyPIMirrorPage.objects.get_or_create(package=package, defaults={"content": simple.content}) if not c and simple_mirror.content != simple.content: simple_mirror.content = simple.content simple_mirror.save() serversig_mirror, c = PyPIServerSigPage.objects.get_or_create(package=package, defaults={"content": serversig.content.encode("base64")}) serversig_mirror.content = base64.b64encode(serversig.content) serversig_mirror.save() return { "simple": simple.content, "serversig": serversig.content, "has_sig": True, }
UnicodeDecodeError
dataset/ETHPy150Open crate-archive/crate-site/crateweb/apps/pypi/processor.py/PyPIPackage.verify_and_sync_pages
def validate(self, value): val = super(Integer, self).validate(value) if val is None: return try: return int(val) except (TypeError, __HOLE__): raise ValidationError("{0} {1} can't be converted to integral value".format(self.column_name, value))
ValueError
dataset/ETHPy150Open datastax/python-driver/cassandra/cqlengine/columns.py/Integer.validate
def validate(self, value): val = super(VarInt, self).validate(value) if val is None: return try: return int(val) except (TypeError, __HOLE__): raise ValidationError( "{0} {1} can't be converted to integral value".format(self.column_name, value))
ValueError
dataset/ETHPy150Open datastax/python-driver/cassandra/cqlengine/columns.py/VarInt.validate
def validate(self, value): val = super(UUID, self).validate(value) if val is None: return if isinstance(val, _UUID): return val if isinstance(val, six.string_types): try: return _UUID(val) except __HOLE__: # fall-through to error pass raise ValidationError("{0} {1} is not a valid uuid".format( self.column_name, value))
ValueError
dataset/ETHPy150Open datastax/python-driver/cassandra/cqlengine/columns.py/UUID.validate
def validate(self, value): value = super(BaseFloat, self).validate(value) if value is None: return try: return float(value) except (__HOLE__, ValueError): raise ValidationError("{0} {1} is not a valid float".format(self.column_name, value))
TypeError
dataset/ETHPy150Open datastax/python-driver/cassandra/cqlengine/columns.py/BaseFloat.validate
def paasta_log_line_passes_filter(line, levels, service, components, clusters): """Given a (JSON-formatted) log line, return True if the line should be displayed given the provided levels, components, and clusters; return False otherwise. """ try: parsed_line = json.loads(line) except __HOLE__: log.debug('Trouble parsing line as json. Skipping. Line: %r' % line) return False return ( parsed_line.get('level') in levels and parsed_line.get('component') in components and ( parsed_line.get('cluster') in clusters or parsed_line.get('cluster') == ANY_CLUSTER ) )
ValueError
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/logs.py/paasta_log_line_passes_filter
def marathon_log_line_passes_filter(line, levels, service, components, clusters): """Given a (JSON-formatted) log line where the message is a Marathon log line, return True if the line should be displayed given the provided service; return False otherwise.""" try: parsed_line = json.loads(line) except __HOLE__: log.debug('Trouble parsing line as json. Skipping. Line: %r' % line) return False return format_job_id(service, '') in parsed_line.get('message', '')
ValueError
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/logs.py/marathon_log_line_passes_filter
def chronos_log_line_passes_filter(line, levels, service, components, clusters): """Given a (JSON-formatted) log line where the message is a Marathon log line, return True if the line should be displayed given the provided service; return False otherwise.""" try: parsed_line = json.loads(line) except __HOLE__: log.debug('Trouble parsing line as json. Skipping. Line: %r' % line) return False return chronos_tools.compose_job_id(service, '') in parsed_line.get('message', '')
ValueError
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/logs.py/chronos_log_line_passes_filter
def prettify_component(component): try: return LOG_COMPONENTS[component]['color']('[%s]' % component) except __HOLE__: return "UNPRETTIFIABLE COMPONENT %s" % component
KeyError
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/logs.py/prettify_component
def prettify_log_line(line, requested_levels): """Given a line from the log, which is expected to be JSON and have all the things we expect, return a pretty formatted string containing relevant values. """ pretty_line = '' try: parsed_line = json.loads(line) pretty_level = prettify_level(parsed_line['level'], requested_levels) pretty_line = "%(timestamp)s %(component)s %(cluster)s %(instance)s - %(level)s%(message)s" % ({ 'timestamp': prettify_timestamp(parsed_line['timestamp']), 'component': prettify_component(parsed_line['component']), 'cluster': '[%s]' % parsed_line['cluster'], 'instance': '[%s]' % parsed_line['instance'], 'level': '%s ' % pretty_level, 'message': parsed_line['message'], }) except __HOLE__: log.debug('Trouble parsing line as json. Skipping. Line: %r' % line) pretty_line = "Invalid JSON: %s" % line except KeyError: log.debug('JSON parsed correctly but was missing a key. Skipping. Line: %r' % line) pretty_line = "JSON missing keys: %s" % line return pretty_line # The map of name -> LogReader subclasses, used by configure_log.
ValueError
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/logs.py/prettify_log_line
def tail_logs(self, service, levels, components, clusters, raw_mode=False): """Sergeant function for spawning off all the right log tailing functions. NOTE: This function spawns concurrent processes and doesn't necessarily worry about cleaning them up! That's because we expect to just exit the main process when this function returns (as main() does). Someone calling this function directly with something like "while True: tail_paasta_logs()" may be very sad. NOTE: We try pretty hard to supress KeyboardInterrupts to prevent big useless stack traces, but it turns out to be non-trivial and we fail ~10% of the time. We decided we could live with it and we're shipping this to see how it fares in real world testing. Here are some things we read about this problem: * http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool * http://jtushman.github.io/blog/2014/01/14/python-%7C-multiprocessing-and-interrupts/ * http://bryceboe.com/2010/08/26/python-multiprocessing-and-keyboardinterrupt/ We could also try harder to terminate processes from more places. We could use process.join() to ensure things have a chance to die. We punted these things. It's possible this whole multiprocessing strategy is wrong-headed. If you are reading this code to curse whoever wrote it, see discussion in PAASTA-214 and https://reviewboard.yelpcorp.com/r/87320/ and feel free to implement one of the other options. """ scribe_envs = set([]) for cluster in clusters: scribe_envs.update(self.determine_scribereader_envs(components, cluster)) log.info("Would connect to these envs to tail scribe logs: %s" % scribe_envs) queue = Queue() spawned_processes = [] for scribe_env in scribe_envs: # Tail stream_paasta_<service> for build or deploy components if any([component in components for component in DEFAULT_COMPONENTS]): # Start a thread that tails scribe in this env kw = { 'scribe_env': scribe_env, 'stream_name': get_log_name_for_service(service), 'service': service, 'levels': levels, 'components': components, 'clusters': clusters, 'queue': queue, 'filter_fn': paasta_log_line_passes_filter, } process = Process(target=self.scribe_tail, kwargs=kw) spawned_processes.append(process) process.start() # Tail Marathon logs for the relevant clusters for this service if 'marathon' in components: for cluster in clusters: kw = { 'scribe_env': scribe_env, 'stream_name': 'stream_marathon_%s' % cluster, 'service': service, 'levels': levels, 'components': components, 'clusters': [cluster], 'queue': queue, 'parse_fn': parse_marathon_log_line, 'filter_fn': marathon_log_line_passes_filter, } process = Process(target=self.scribe_tail, kwargs=kw) spawned_processes.append(process) process.start() # Tail Chronos logs for the relevant clusters for this service if 'chronos' in components: for cluster in clusters: kw = { 'scribe_env': scribe_env, 'stream_name': 'stream_chronos_%s' % cluster, 'service': service, 'levels': levels, 'components': components, 'clusters': [cluster], 'queue': queue, 'parse_fn': parse_chronos_log_line, 'filter_fn': chronos_log_line_passes_filter, } process = Process(target=self.scribe_tail, kwargs=kw) spawned_processes.append(process) process.start() # Pull things off the queue and output them. If any thread dies we are no # longer presenting the user with the full picture so we quit. # # This is convenient for testing, where a fake scribe_tail() can emit a # fake log and exit. Without the thread aliveness check, we would just sit # here forever even though the threads doing the tailing are all gone. # # NOTE: A noisy tailer in one scribe_env (such that the queue never gets # empty) will prevent us from ever noticing that another tailer has died. while True: try: # This is a blocking call with a timeout for a couple reasons: # # * If the queue is empty and we get_nowait(), we loop very tightly # and accomplish nothing. # # * Testing revealed a race condition where print_log() is called # and even prints its message, but this action isn't recorded on # the patched-in print_log(). This resulted in test flakes. A short # timeout seems to soothe this behavior: running this test 10 times # with a timeout of 0.0 resulted in 2 failures; running it with a # timeout of 0.1 resulted in 0 failures. # # * There's a race where thread1 emits its log line and exits # before thread2 has a chance to do anything, causing us to bail # out via the Queue Empty and thread aliveness check. # # We've decided to live with this for now and see if it's really a # problem. The threads in test code exit pretty much immediately # and a short timeout has been enough to ensure correct behavior # there, so IRL with longer start-up times for each thread this # will surely be fine. # # UPDATE: Actually this is leading to a test failure rate of about # 1/10 even with timeout of 1s. I'm adding a sleep to the threads # in test code to smooth this out, then pulling the trigger on # moving that test to integration land where it belongs. line = queue.get(True, 0.1) print_log(line, levels, raw_mode) except Empty: try: # If there's nothing in the queue, take this opportunity to make # sure all the tailers are still running. running_processes = [tt.is_alive() for tt in spawned_processes] if not running_processes or not all(running_processes): log.warn('Quitting because I expected %d log tailers to be alive but only %d are alive.' % ( len(spawned_processes), running_processes.count(True), )) for process in spawned_processes: if process.is_alive(): process.terminate() break except KeyboardInterrupt: # Die peacefully rather than printing N threads worth of stack # traces. # # This extra nested catch is because it's pretty easy to be in # the above try block when the user hits Ctrl-C which otherwise # dumps a stack trace. log.warn('Terminating.') break except __HOLE__: # Die peacefully rather than printing N threads worth of stack # traces. log.warn('Terminating.') break
KeyboardInterrupt
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/logs.py/ScribeLogReader.tail_logs
def scribe_tail(self, scribe_env, stream_name, service, levels, components, clusters, queue, filter_fn, parse_fn=None): """Creates a scribetailer for a particular environment. When it encounters a line that it should report, it sticks it into the provided queue. This code is designed to run in a thread as spawned by tail_paasta_logs(). """ try: log.debug("Going to tail %s scribe stream in %s" % (stream_name, scribe_env)) host_and_port = scribereader.get_env_scribe_host(scribe_env, True) host = host_and_port['host'] port = host_and_port['port'] tailer = scribereader.get_stream_tailer(stream_name, host, port) for line in tailer: if parse_fn: line = parse_fn(line, clusters, service) if filter_fn(line, levels, service, components, clusters): queue.put(line) except __HOLE__: # Die peacefully rather than printing N threads worth of stack # traces. pass except StreamTailerSetupError: log.error("Failed to setup stream tailing for %s in %s" % (stream_name, scribe_env)) log.error("Don't Panic! This can happen the first time a service is deployed because the log") log.error("doesn't exist yet. Please wait for the service to be deployed in %s and try again." % scribe_env) raise
KeyboardInterrupt
dataset/ETHPy150Open Yelp/paasta/paasta_tools/cli/cmds/logs.py/ScribeLogReader.scribe_tail
def guess(self, guessed_actor): try: actual_actor = next(self._actor_gen) if actual_actor == guessed_actor: self._good += 1 else: self._bad += 1 return actual_actor except __HOLE__: # End of scene self.add(self._act, self._scene, self._good, self._bad)
StopIteration
dataset/ETHPy150Open manahl/PythonTrainingExercises/Advanced/RomeoAndJuliet/util/result.py/Result.guess
def chi_square_p_value(matrix): """ Accepts a matrix (an array of arrays, where each child array represents a row) Example from http://math.hws.edu/javamath/ryan/ChiSquare.html: Suppose you conducted a drug trial on a group of animals and you hypothesized that the animals receiving the drug would survive better than those that did not receive the drug. You conduct the study and collect the following data: Ho: The survival of the animals is independent of drug treatment. Ha: The survival of the animals is associated with drug treatment. In that case, your matrix should be: [ [ Survivors in Test, Dead in Test ], [ Survivors in Control, Dead in Control ] ] Code adapted from http://codecomments.wordpress.com/2008/02/13/computing-chi-squared-p-value-from-contingency-table-in-python/ """ try: from scipy.stats import chisqprob except __HOLE__: from .stats import chisqprob num_rows = len(matrix) num_columns = len(matrix[0]) # Sanity checking if num_rows == 0: return None for row in matrix: if len(row) != num_columns: return None row_sums = [] # for each row for row in matrix: # add up all the values in the row row_sums.append(sum(row)) column_sums = [] # for each column i for i in range(num_columns): column_sum = 0.0 # get the i'th value from each row for row in matrix: column_sum += row[i] column_sums.append(column_sum) # the total sum could be calculated from either the rows or the columns # coerce to float to make subsequent division generate float results grand_total = float(sum(row_sums)) if grand_total <= 0: return None, None observed_test_statistic = 0.0 for i in range(num_rows): for j in range(num_columns): expected_value = (row_sums[i]/grand_total)*(column_sums[j]/grand_total)*grand_total if expected_value <= 0: return None, None observed_value = matrix[i][j] observed_test_statistic += ((observed_value - expected_value)**2) / expected_value degrees_freedom = (num_columns - 1) * (num_rows - 1) p_value = chisqprob(observed_test_statistic, degrees_freedom) return (observed_test_statistic, p_value)
ImportError
dataset/ETHPy150Open Miserlou/django-easy-split/easy_split/significance.py/chi_square_p_value
def __bootstrap(self): try: if _trace_hook: _sys.settrace(_trace_hook) if _profile_hook: _sys.setprofile(_profile_hook) try: self.run() except __HOLE__: pass except: # If sys.stderr is no more (most likely from interpreter # shutdown) use self.__stderr. Otherwise still use sys (as in # _sys) in case sys.stderr was redefined. if _sys: _sys.stderr.write("Exception in thread %s:" % self.getName()) _print_exc(file=_sys.stderr) else: # Do the best job possible w/o a huge amt. of code to # approx. a traceback stack trace exc_type, exc_value, exc_tb = self.__exc_info() try: print>>self.__stderr, ( "Exception in thread " + self.getName() + " (most likely raised during interpreter shutdown):") print>>self.__stderr, ( "Traceback (most recent call last):") while exc_tb: print>>self.__stderr, ( ' File "%s", line %s, in %s' % (exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno, exc_tb.tb_frame.f_code.co_name)) exc_tb = exc_tb.tb_next print>>self.__stderr, ("%s: %s" % (exc_type, exc_value)) # Make sure that exc_tb gets deleted since it is a memory # hog; deleting everything else is just for thoroughness finally: del exc_type, exc_value, exc_tb finally: self.__stop() try: self.__delete() except: pass
SystemExit
dataset/ETHPy150Open babble/babble/include/jython/Lib/threading.py/Thread.__bootstrap
def socketpair(family=None, type=SOCK_STREAM, proto=0): """socketpair([family[, type[, proto]]]) -> (socket object, socket object) Create a pair of socket objects from the sockets returned by the platform socketpair() function. The arguments are the same as for socket() except the default family is AF_UNIX if defined on the platform; otherwise, the default is AF_INET. """ if family is None: try: family = AF_UNIX except __HOLE__: family = AF_INET a, b = _socket.socketpair(family, type, proto) a = socket(family, type, proto, a.detach()) b = socket(family, type, proto, b.detach()) return a, b
NameError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/backports/socket.py/socketpair
def Link(self, name): """Update the repo metadata to use a different manifest. """ self.Override(name) try: if os.path.lexists(self.manifestFile): os.remove(self.manifestFile) # os.symlink('manifests/%s' % name, self.manifestFile) portable.os_symlink('manifests/%s' % name, self.manifestFile) except __HOLE__ as e: raise ManifestParseError('cannot link manifest %s: %s' % (name, str(e)))
OSError
dataset/ETHPy150Open esrlabs/git-repo/manifest_xml.py/XmlManifest.Link
def _Load(self): if not self._loaded: m = self.manifestProject b = m.GetBranch(m.CurrentBranch).merge if b is not None and b.startswith(R_HEADS): b = b[len(R_HEADS):] self.branch = b nodes = [] nodes.append(self._ParseManifestXml(self.manifestFile, self.manifestProject.worktree)) local = os.path.join(self.repodir, LOCAL_MANIFEST_NAME) if os.path.exists(local): if not self.localManifestWarning: self.localManifestWarning = True print('warning: %s is deprecated; put local manifests in `%s` instead' % (LOCAL_MANIFEST_NAME, os.path.join(self.repodir, LOCAL_MANIFESTS_DIR_NAME)), file=sys.stderr) nodes.append(self._ParseManifestXml(local, self.repodir)) local_dir = os.path.abspath(os.path.join(self.repodir, LOCAL_MANIFESTS_DIR_NAME)) try: for local_file in sorted(os.listdir(local_dir)): if local_file.endswith('.xml'): local = os.path.join(local_dir, local_file) nodes.append(self._ParseManifestXml(local, self.repodir)) except __HOLE__: pass try: self._ParseManifest(nodes) except ManifestParseError as e: # There was a problem parsing, unload ourselves in case they catch # this error and try again later, we will show the correct error self._Unload() raise e if self.IsMirror: self._AddMetaProjectMirror(self.repoProject) self._AddMetaProjectMirror(self.manifestProject) self._loaded = True
OSError
dataset/ETHPy150Open esrlabs/git-repo/manifest_xml.py/XmlManifest._Load
def _ParseManifestXml(self, path, include_root): try: root = xml.dom.minidom.parse(path) except (__HOLE__, xml.parsers.expat.ExpatError) as e: raise ManifestParseError("error parsing manifest %s: %s" % (path, e)) if not root or not root.childNodes: raise ManifestParseError("no root node in %s" % (path,)) for manifest in root.childNodes: if manifest.nodeName == 'manifest': break else: raise ManifestParseError("no <manifest> in %s" % (path,)) nodes = [] for node in manifest.childNodes: # pylint:disable=W0631 # We only get here if manifest is initialised if node.nodeName == 'include': name = self._reqatt(node, 'name') fp = os.path.join(include_root, name) if not os.path.isfile(fp): raise ManifestParseError("include %s doesn't exist or isn't a file" % (name,)) try: nodes.extend(self._ParseManifestXml(fp, include_root)) # should isolate this to the exact exception, but that's # tricky. actual parsing implementation may vary. except (KeyboardInterrupt, RuntimeError, SystemExit): raise except Exception as e: raise ManifestParseError( "failed parsing included manifest %s: %s", (name, e)) else: nodes.append(node) return nodes
OSError
dataset/ETHPy150Open esrlabs/git-repo/manifest_xml.py/XmlManifest._ParseManifestXml