function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def import_app_sitetree_module(app):
"""Imports sitetree module from a given app."""
module_name = settings.APP_MODULE_NAME
module = import_module(app)
try:
sub_module = import_module('%s.%s' % (app, module_name))
return sub_module
except __HOLE__:
if module_has_submodule(module, module_name):
raise
return None | ImportError | dataset/ETHPy150Open idlesign/django-sitetree/sitetree/utils.py/import_app_sitetree_module |
def get_app_n_model(settings_entry_name):
"""Returns tuple with application and tree[item] model class names."""
try:
app_name, model_name = getattr(settings, settings_entry_name).split('.')
except __HOLE__:
raise ImproperlyConfigured('`SITETREE_%s` must have the following format: `app_name.model_name`.' % settings_entry_name)
return app_name, model_name | ValueError | dataset/ETHPy150Open idlesign/django-sitetree/sitetree/utils.py/get_app_n_model |
def get_model_class(settings_entry_name):
"""Returns a certain sitetree model as defined in the project settings."""
app_name, model_name = get_app_n_model(settings_entry_name)
if apps_get_model is None:
model = get_model(app_name, model_name)
else:
try:
model = apps_get_model(app_name, model_name)
except (LookupError, __HOLE__):
model = None
if model is None:
raise ImproperlyConfigured('`SITETREE_%s` refers to model `%s` that has not been installed.' % (settings_entry_name, model_name))
return model | ValueError | dataset/ETHPy150Open idlesign/django-sitetree/sitetree/utils.py/get_model_class |
def _call(self, name, *args):
# In the future (once we don't pass any weird arg, such as SessionId and so on), use JSON
# First, serialize the data provided in the client side
try:
request_data = pickle.dumps(args)
except:
_, exc_instance, _ = sys.exc_info()
raise InternalClientCommunicationError("Unknown client error contacting %s: %r" % (self.url, exc_instance))
# Then, perform the request and deserialize the results
t0 = time.time()
try:
kwargs = {}
if name == 'test_me':
kwargs['timeout'] = (10, 60)
else:
kwargs['timeout'] = (60, 600)
content = requests.post(self.url + '/' + name, data = request_data, **kwargs).content
result = pickle.loads(content)
except:
tf = time.time()
_, exc_instance, _ = sys.exc_info()
raise InternalServerCommunicationError("Unknown server error contacting %s with HTTP after %s seconds: %r" % (self.url, tf - t0, exc_instance))
# result must be a dictionary which contains either 'result'
# with the resulting object or 'is_error' and some data about
# the exception
if result.get('is_error'):
error_type = result['error_type']
error_args = result['error_args']
if not isinstance(error_args, list) and not isinstance(error_args, tuple):
error_args = [error_args]
# If it's acceptable, raise the exception (e.g., don't raise a KeyboardInterrupt, a MemoryError, or a library error)
if error_type.startswith(ACCEPTABLE_EXC_TYPES):
exc_type = _load_type(error_type)
try:
exc_instance = exc_type(*error_args)
except __HOLE__:
# If we can't create it
log.error(__name__, 'Error on instantiating an exception %s(%r)' % (exc_type, error_args))
log.error_exc(__name__)
raise InternalCapturedServerCommunicationError(error_type, error_args)
else:
raise exc_instance
else:
# Otherwise wrap it
raise InternalCapturedServerCommunicationError(error_type, error_args)
# No error? return the result
return result['result'] | TypeError | dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/voodoo/gen/clients.py/HttpClient._call |
def _raise_for_status(response):
status = response.status_code
if 400 <= response.status_code < 500:
try:
body = response.json()
response.reason = body['errorMessage']
return response.raise_for_status()
except (__HOLE__, ValueError):
pass
try:
response.reason = PEBBLE_CODES[status]
return response.raise_for_status()
except KeyError:
pass
response.raise_for_status() | KeyError | dataset/ETHPy150Open MarSoft/PebbleNotes/gae/pypebbleapi.repo/pypebbleapi/timeline.py/_raise_for_status |
def create_folders(folders):
for folder in folders:
try:
new_folder = os.path.join(current_dir, 'devel', folder)
print "Creating %s" % new_folder
os.makedirs(new_folder)
except __HOLE__:
continue | OSError | dataset/ETHPy150Open comodit/synapse-agent/develop.py/create_folders |
def get_message(self):
data = self.message
if st_version == 3:
return HTMLParser().unescape(data)
else:
try:
data = data.decode('utf-8')
except __HOLE__:
data = data.decode(sublime.active_window().active_view().settings().get('fallback_encoding'))
return HTMLParser().unescape(data) | UnicodeDecodeError | dataset/ETHPy150Open benmatselby/sublime-phpcs/phpcs.py/CheckstyleError.get_message |
@staticmethod
def import_json():
"""Import a module for JSON"""
try:
import json
except __HOLE__:
import simplejson as json
else:
return json | ImportError | dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-extensions/build/lib/django_extensions/management/commands/print_settings.py/Command.import_json |
def load_file(self, filename):
try:
with io.open(filename, 'r', encoding=config.get('encoding')) as f:
return f.read()
except __HOLE__ as e:
raise I18nFileLoadError("error loading file {0}: {1}".format(filename, e.strerror)) | IOError | dataset/ETHPy150Open tuvistavie/python-i18n/i18n/loaders/loader.py/Loader.load_file |
def test_validation(self):
obj = ValidatingDocument()
try:
obj.full_clean()
except ValidationError as error:
self.assertFalse('allow_null' in error.message_dict)
else:
self.fail('Validation is broken')
obj.with_choices = 'c'
obj.not_null = 'foo'
obj.allow_blank = ''
obj.not_blank = ''
try:
obj.subschema = 'Not a schema'
except ValidationError as error:
pass
else:
self.fail('Setting a subschema should evaluate immediately')
try:
obj.full_clean()
except ValidationError as error:
self.assertFalse('not_null' in error.message_dict, str(error))
self.assertFalse('allow_blank' in error.message_dict, str(error))
self.assertTrue('with_choices' in error.message_dict, str(error))
self.assertTrue('not_blank' in error.message_dict, str(error))
self.assertTrue('subschema' in error.message_dict, str(error))
else:
self.fail('Validation is broken')
try:
obj.subschema = SimpleSchema2()
except __HOLE__ as error:
pass
else:
self.fail('Setting a subschema should evaluate immediately')
obj.with_choices = 'b'
obj.not_blank = 'foo'
obj.subschema = SimpleSchema()
obj.full_clean() | ValidationError | dataset/ETHPy150Open zbyte64/django-dockit/dockit/tests/schema/schema_tests.py/DocumentValidationTestChase.test_validation |
def get(self, name=None, arg=None):
"""Handle GET."""
if name == 'summary':
summary = summary_module.GetComputerSummary()
models.ReportsCache.SetStatsSummary(summary)
elif name == 'installcounts':
_GenerateInstallCounts()
elif name == 'trendinginstalls':
if arg:
try:
kwargs = {'since_hours': int(arg)}
except __HOLE__:
kwargs = {}
else:
kwargs = {}
_GenerateTrendingInstallsCache(**kwargs)
elif name == 'pendingcounts':
self._GeneratePendingCounts()
elif name == 'msu_user_summary':
if arg:
try:
kwargs = {'since_days': int(arg)}
except ValueError:
kwargs = {}
else:
kwargs = {}
self._GenerateMsuUserSummary(**kwargs)
else:
logging.warning('Unknown ReportsCache cron requested: %s', name)
self.response.set_status(404) | ValueError | dataset/ETHPy150Open google/simian/src/simian/mac/cron/reports_cache.py/ReportsCache.get |
def acquire_next(self):
"""Return the next waiting request, if any.
In-page requests are returned first.
"""
try:
request = self._wait_inpage_queue.popleft()
except __HOLE__:
try:
request = self._wait_queue.popleft()
except IndexError:
return
return self.acquire(request) | IndexError | dataset/ETHPy150Open brandicted/scrapy-webdriver/scrapy_webdriver/manager.py/WebdriverManager.acquire_next |
def using_git(cwd):
"""Test whether the directory cwd is contained in a git repository."""
try:
git_log = shell_out(["git", "log"], cwd=cwd)
return True
except (CalledProcessError, __HOLE__): # pragma: no cover
return False | OSError | dataset/ETHPy150Open hayd/pep8radius/pep8radius/vcs.py/using_git |
def using_hg(cwd):
"""Test whether the directory cwd is contained in a mercurial
repository."""
try:
hg_log = shell_out(["hg", "log"], cwd=cwd)
return True
except (CalledProcessError, __HOLE__):
return False | OSError | dataset/ETHPy150Open hayd/pep8radius/pep8radius/vcs.py/using_hg |
def using_bzr(cwd):
"""Test whether the directory cwd is contained in a bazaar repository."""
try:
bzr_log = shell_out(["bzr", "log"], cwd=cwd)
return True
except (CalledProcessError, __HOLE__):
return False | OSError | dataset/ETHPy150Open hayd/pep8radius/pep8radius/vcs.py/using_bzr |
@staticmethod
def from_string(vc):
"""Return the VersionControl superclass from a string, for example
VersionControl.from_string('git') will return Git."""
try:
# Note: this means all version controls must have
# a title naming convention (!)
vc = globals()[vc.title()]
assert(issubclass(vc, VersionControl))
return vc
except (KeyError, __HOLE__):
raise NotImplementedError("Unknown version control system.") | AssertionError | dataset/ETHPy150Open hayd/pep8radius/pep8radius/vcs.py/VersionControl.from_string |
def form(self):
fields = OrderedDict((
('required_css_class', 'required'),
('error_css_class', 'error'),
))
for field in self.fields.all():
field.add_formfield(fields, self)
validators = []
cfg = dict(self.CONFIG_OPTIONS)
for key, config in self.config.items():
try:
validators.append(cfg[key]['validate'])
except __HOLE__:
pass
class Form(forms.Form):
def clean(self):
data = super(Form, self).clean()
for validator in validators:
validator(self, data)
return data
return type('Form%s' % self.pk, (Form,), fields) | KeyError | dataset/ETHPy150Open feincms/form_designer/form_designer/models.py/Form.form |
def process(self, form, request):
ret = {}
cfg = dict(self.CONFIG_OPTIONS)
for key, config in self.config.items():
try:
process = cfg[key]['process']
except __HOLE__:
# ignore configs without process methods
continue
ret[key] = process(
model_instance=self,
form_instance=form,
request=request,
config=config)
return ret | KeyError | dataset/ETHPy150Open feincms/form_designer/form_designer/models.py/Form.process |
def main():
"""
%prog [-c] filename username
%prog -b[c] filename username password
%prog -D filename username
"""
# For now, we only care about the use cases that affect tests/functional.py
parser = optparse.OptionParser(usage=main.__doc__)
parser.add_option('-b', action='store_true', dest='batch', default=False,
help='Batch mode; password is passed on the command line IN THE CLEAR.'
)
parser.add_option('-c', action='store_true', dest='create', default=False,
help='Create a new htpasswd file, overwriting any existing file.')
parser.add_option('-D', action='store_true', dest='delete_user',
default=False, help='Remove the given user from the password file.')
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
options, args = parser.parse_args()
def syntax_error(msg):
"""Utility function for displaying fatal error messages with usage
help.
"""
printerr("Syntax error: " + msg, newline=True)
printerr(parser.format_help(), newline=True)
sys.exit(1)
# Non-option arguments
if len(args) < 2:
syntax_error("Insufficient number of arguments.\n")
filename, username = args[:2]
password = None
if options.delete_user:
if len(args) != 2:
syntax_error("Incorrect number of arguments.\n")
else:
if len(args) == 3 and options.batch:
password = args[2]
elif len(args) == 2 and not options.batch:
first = getpass.getpass("New password:")
second = getpass.getpass("Re-type new password:")
if first == second:
password = first
else:
printout("htpasswd: password verification error")
return
else:
syntax_error("Incorrect number of arguments.\n")
try:
passwdfile = HtpasswdFile(filename, create=options.create)
except __HOLE__:
syntax_error("File not found.\n")
else:
if options.delete_user:
passwdfile.delete(username)
else:
passwdfile.update(username, password)
passwdfile.save() | IOError | dataset/ETHPy150Open edgewall/trac/contrib/htpasswd.py/main |
def install(show = False, force = False):
git_exec_path = subprocess.Popen(["git", "--exec-path"],
stdout = subprocess.PIPE).communicate()[0].strip()
installed_link_path = os.path.join(git_exec_path, 'git-sap')
if show:
print(os.path.realpath(installed_link_path))
return
recreate = force and os.path.exists(installed_link_path)
if recreate:
try:
os.remove(installed_link_path)
except OSError as e:
usage("failed to remove old symlink: %s", e)
if not os.path.exists(installed_link_path):
try:
os.symlink(os.path.abspath(sys.argv[0]), installed_link_path)
print("symlink %s at: %s" % ("re-installed" if recreate else "installed",
installed_link_path))
except __HOLE__ as e:
usage("failed to install symlink: %s", e)
else:
print("symlink exists: %s" % installed_link_path) | OSError | dataset/ETHPy150Open jsirois/sapling/sapling.py/install |
def main():
(options, args, ferror) = parse_args()
if options.subcommand is "install":
if len(args) != 0:
ferror("list takes no arguments")
install(options.show, options.force)
return
# Fail fast if we're not in a repo
repo = open_repo(options.native)
if options.debug:
print("repo\t[%s]\t%s" % (repo.active_branch, repo.working_tree_dir))
if options.subcommand is "list":
# Fail fast if we don't have an invalid .saplings config
split_config = open_config(repo)
if len(args) != 0:
ferror("list takes no arguments")
list(repo, split_config, options.verbose)
elif options.subcommand is "split":
if options.branch:
if len(args) == 0:
ferror("At least 1 split path must be specified")
try:
splits = [ saplib.Split(repo, options.branch, args) ]
except __HOLE__ as e:
ferror(e)
else:
if len(args) == 0:
ferror("At least 1 split must be specified")
splits_by_name = open_config(repo).splits
try:
splits = [ splits_by_name[name] for name in args ]
except KeyError as e:
ferror("Split not defined: %s" % e)
split(splits, options.verbose, options.dry_run) | KeyError | dataset/ETHPy150Open jsirois/sapling/sapling.py/main |
def __eq__(self, other):
try:
other = Point(other).value
except __HOLE__:
pass
return self.value.__eq__(other) | ValueError | dataset/ETHPy150Open jerith/depixel/depixel/bspline.py/Point.__eq__ |
def _source(b, r, dirname, old_cwd):
tmpname = os.path.join(os.getcwd(), dirname[1:].replace('/', '-'))
exclude = []
pattern_pip = re.compile(r'\.egg-info/installed-files.txt$')
pattern_egg = re.compile(r'\.egg(?:-info)?(?:/|$)')
pattern_pth = re.compile(
r'lib/python[^/]+/(?:dist|site)-packages/easy-install.pth$')
pattern_bin = re.compile(
r'EASY-INSTALL(?:-ENTRY)?-SCRIPT|This file was generated by RubyGems')
# Create a partial shallow copy of the directory.
for dirpath, dirnames, filenames in os.walk(dirname):
# Definitely ignore the shallow copy directory.
if dirpath.startswith(tmpname):
continue
# Determine if this entire directory should be ignored by default.
ignored = r.ignore_file(dirpath)
dirpath2 = os.path.normpath(
os.path.join(tmpname, os.path.relpath(dirpath, dirname)))
# Create this directory in the shallow copy with matching mode, owner,
# and owning group. Suggest running as `root` if this doesn't work.
os.mkdir(dirpath2)
s = os.lstat(dirpath)
try:
try:
os.lchown(dirpath2, s.st_uid, s.st_gid)
except OverflowError:
logging.warning('{0} has uid:gid {1}:{2} - using chown(1)'.
format(dirpath, s.st_uid, s.st_gid))
p = subprocess.Popen(['chown',
'{0}:{1}'.format(s.st_uid, s.st_gid),
dirpath2],
close_fds=True)
p.communicate()
os.chmod(dirpath2, s.st_mode)
except OSError as e:
logging.warning('{0} caused {1} - try running as root'.
format(dirpath, errno.errorcode[e.errno]))
return
for filename in filenames:
pathname = os.path.join(dirpath, filename)
if r.ignore_source(pathname, ignored):
continue
pathname2 = os.path.join(dirpath2, filename)
# Exclude files that are part of the RubyGems package.
for globname in (
os.path.join('/usr/lib/ruby/gems/*/gems/rubygems-update-*/lib',
pathname[1:]),
os.path.join('/var/lib/gems/*/gems/rubygems-update-*/lib',
pathname[1:])):
if 0 < len(glob.glob(globname)):
continue
# Remember the path to all of `pip`'s `installed_files.txt` files.
if pattern_pip.search(pathname):
exclude.extend([os.path.join(dirpath2, line.rstrip())
for line in open(pathname)])
# Likewise remember the path to Python eggs.
if pattern_egg.search(pathname):
exclude.append(pathname2)
# Exclude `easy_install`'s bookkeeping file, too.
if pattern_pth.search(pathname):
continue
# Exclude executable placed by Python packages or RubyGems.
if pathname.startswith('/usr/local/bin/'):
try:
if pattern_bin.search(open(pathname).read()):
continue
except IOError as e:
pass
# Exclude share/applications/mimeinfo.cache, whatever that is.
if '/usr/local/share/applications/mimeinfo.cache' == pathname:
continue
# Clean up dangling symbolic links. This makes the assumption
# that no one intends to leave dangling symbolic links hanging
# around, which I think is a good assumption.
s = os.lstat(pathname)
if stat.S_ISLNK(s.st_mode):
try:
os.stat(pathname)
except OSError as e:
if errno.ENOENT == e.errno:
logging.warning('ignored dangling symbolic link {0}'.
format(pathname))
continue
# Hard link this file into the shallow copy. Suggest running as
# `root` if this doesn't work though in practice the check above
# will have already caught this problem.
try:
os.link(pathname, pathname2)
except OSError as e:
logging.warning('{0} caused {1} - try running as root'.
format(pathname, errno.errorcode[e.errno]))
return
# Unlink files that were remembered for exclusion above.
for pathname in exclude:
try:
os.unlink(pathname)
except OSError as e:
if e.errno not in (errno.EISDIR, errno.ENOENT):
raise e
# Remove empty directories. For any that hang around, match their
# access and modification times to the source, otherwise the hash of
# the tarball will not be deterministic.
for dirpath, dirnames, filenames in os.walk(tmpname, topdown=False):
try:
os.rmdir(dirpath)
except __HOLE__:
s = os.lstat(os.path.join(dirname, os.path.relpath(dirpath,
tmpname)))
os.utime(dirpath, (s.st_atime, s.st_mtime))
# If the shallow copy of still exists, create a tarball named by its
# SHA1 sum and include it in the blueprint.
try:
tar = tarfile.open('tmp.tar', 'w')
tar.add(tmpname, '.')
except OSError:
return
finally:
tar.close()
sha1 = hashlib.sha1()
f = open('tmp.tar', 'r')
[sha1.update(buf) for buf in iter(lambda: f.read(4096), '')]
f.close()
tarname = '{0}.tar'.format(sha1.hexdigest())
shutil.move('tmp.tar', os.path.join(old_cwd, tarname))
b.add_source(dirname, tarname) | OSError | dataset/ETHPy150Open devstructure/blueprint/blueprint/backend/sources.py/_source |
def sources(b, r):
logging.info('searching for software built from source')
for pathname, negate in r['source']:
if negate and os.path.isdir(pathname) \
and not r.ignore_source(pathname):
# Note before creating a working directory within pathname what
# it's atime and mtime should be.
s = os.lstat(pathname)
# Create a working directory within pathname to avoid potential
# EXDEV when creating the shallow copy and tarball.
try:
with context_managers.mkdtemp(pathname) as c:
# Restore the parent of the working directory to its
# original atime and mtime, as if pretending the working
# directory never actually existed.
os.utime(pathname, (s.st_atime, s.st_mtime))
# Create the shallow copy and possibly tarball of the
# relevant parts of pathname.
_source(b, r, pathname, c.cwd)
# Once more restore the atime and mtime after the working
# directory is destroyed.
os.utime(pathname, (s.st_atime, s.st_mtime))
# If creating the temporary directory fails, bail with a warning.
except __HOLE__ as e:
logging.warning('{0} caused {1} - try running as root'.
format(pathname, errno.errorcode[e.errno]))
if 0 < len(b.sources):
b.arch = util.arch() | OSError | dataset/ETHPy150Open devstructure/blueprint/blueprint/backend/sources.py/sources |
def generate_qitest_json(self):
""" The qitest.cmake is written from CMake """
qitest_cmake_path = os.path.join(self.build_directory, "qitest.cmake")
tests = list()
if os.path.exists(qitest_cmake_path):
with open(qitest_cmake_path, "r") as fp:
lines = fp.readlines()
else:
lines = list()
parser = argparse.ArgumentParser()
parser.add_argument("cmd", nargs="+")
parser.add_argument("--name", required=True)
parser.add_argument("--gtest", action="store_true",
help="Tell qitest this is a test using gtest")
parser.add_argument("--timeout", type=int)
parser.add_argument("--nightly", action="store_true")
parser.add_argument("--perf", action="store_true")
parser.add_argument("--working-directory")
parser.add_argument("--env", action="append",
dest="environment")
parser.set_defaults(nightly=False, perf=False)
def log_error(message, line):
test_name = line.split(";")[1]
mess = "Could not parse test options for test: '%s'\n" % test_name
mess += "Error was: %s" % message
ui.error(mess)
for line in lines:
parser.error = lambda message : log_error(message, line)
line = line.strip()
try:
args = parser.parse_args(args=line.split(";"))
except __HOLE__:
break
test = vars(args)
as_list = test["environment"]
if as_list:
test["environment"] = dict(x.split("=") for x in as_list)
tests.append(test)
with open(self.qitest_json, "w") as fp:
json.dump(tests, fp, indent=2) | SystemExit | dataset/ETHPy150Open aldebaran/qibuild/python/qibuild/project.py/BuildProject.generate_qitest_json |
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ftp.iana.org/tz.
"""
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
try:
with tar_open(filename) as tf:
for name in zonegroups:
tf.extract(name, tmpdir)
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
try:
check_call(["zic", "-d", zonedir] + filepaths)
except __HOLE__ as e:
_print_on_nosuchfile(e)
raise
# write metadata file
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
json.dump(metadata, f, indent=4, sort_keys=True)
target = os.path.join(moduledir, ZONEFILENAME)
with tar_open(target, "w:%s" % format) as tf:
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
finally:
shutil.rmtree(tmpdir) | OSError | dataset/ETHPy150Open SickRage/SickRage/lib/dateutil/zoneinfo/rebuild.py/rebuild |
def set_key(target, key, value=None):
ctx = get_context()
# clearing key
if value is None:
try:
del target.metadata[key]
target.user_set_metadata.remove(key)
except (KeyError, __HOLE__):
pass
# setting key
else:
target.metadata[key] = value
restricted_keys = ctx.conf.metadata.get('restricted_keys', [])
if key not in target.user_set_metadata and key not in restricted_keys:
target.user_set_metadata.append(key)
target.metadata[key] = value | ValueError | dataset/ETHPy150Open mammon-ircd/mammon/mammon/core/ircv3/metadata.py/set_key |
def testLock():
LOOPS = 50
# The import lock may already be held, e.g. if the test suite is run
# via "import test.autotest".
lock_held_at_start = imp.lock_held()
verify_lock_state(lock_held_at_start)
for i in range(LOOPS):
imp.acquire_lock()
verify_lock_state(True)
for i in range(LOOPS):
imp.release_lock()
# The original state should be restored now.
verify_lock_state(lock_held_at_start)
if not lock_held_at_start:
try:
imp.release_lock()
except __HOLE__:
pass
else:
raise TestFailed("release_lock() without lock should raise "
"RuntimeError") | RuntimeError | dataset/ETHPy150Open babble/babble/include/jython/Lib/test/test_imp.py/testLock |
def _have_module(module_name):
try:
import_module(module_name)
return True
except __HOLE__:
return False | ImportError | dataset/ETHPy150Open pydata/pandas/pandas/io/tests/test_html.py/_have_module |
@network
def test_invalid_url(self):
try:
with tm.assertRaises(URLError):
self.read_html('http://www.a23950sdfa908sd.com',
match='.*Water.*')
except __HOLE__ as e:
tm.assert_equal(str(e), 'No tables found') | ValueError | dataset/ETHPy150Open pydata/pandas/pandas/io/tests/test_html.py/TestReadHtml.test_invalid_url |
@slow
def test_banklist_header(self):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except __HOLE__:
return x
df = self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'})[0]
ground_truth = read_csv(os.path.join(DATA_PATH, 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
self.assertEqual(df.shape, ground_truth.shape)
old = ['First Vietnamese American BankIn Vietnamese',
'Westernbank Puerto RicoEn Espanol',
'R-G Premier Bank of Puerto RicoEn Espanol',
'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
'Sanderson State Bank', 'Washington Mutual Bank',
'Silver State Bank', 'AmTrade International Bank',
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ['Closing Date', 'Updated Date']
converted[date_cols] = converted[date_cols]._convert(datetime=True,
coerce=True)
tm.assert_frame_equal(converted, gtnew) | AttributeError | dataset/ETHPy150Open pydata/pandas/pandas/io/tests/test_html.py/TestReadHtml.test_banklist_header |
def GetAPIScope(api_name):
"""Retrieves the scope for the given API name.
Args:
api_name: A string identifying the name of the API we want to retrieve a
scope for.
Returns:
A string that is the scope for the given API name.
Raises:
GoogleAdsValueError: If the given api_name is invalid; accepted valus are
"adwords" and "dfp".
"""
try:
return SCOPES[api_name]
except __HOLE__:
raise googleads.errors.GoogleAdsValueError(
'Invalid API name "%s" provided. Acceptable values are: %s'
% (api_name, SCOPES.keys())) | KeyError | dataset/ETHPy150Open googleads/googleads-python-lib/googleads/oauth2.py/GetAPIScope |
def __init__(self, scope, client_email, key_file,
private_key_password='notasecret', sub=None, proxy_info=None,
disable_ssl_certificate_validation=False, ca_certs=None):
"""Initializes a GoogleServiceAccountClient.
Args:
scope: The scope of the API you're authorizing for.
client_email: A string containing your Service Account's email.
key_file: A string containing the path to your key file.
[optional]
private_key_password: A string containing the password for your key file.
sub: A string containing the email address of a user account you want to
impersonate.
proxy_info: A ProxyInfo instance identifying the proxy used for all
requests.
disable_ssl_certificate_validation: A boolean indicating whether ssl
certificate validation should be disabled while using a proxy.
ca_certs: A string identifying the path to a file containing root CA
certificates for SSL server certificate validation.
Raises:
GoogleAdsValueError: If the given key file does not exist.
"""
try:
with open(key_file, 'rb') as f:
private_key = f.read()
except __HOLE__:
raise googleads.errors.GoogleAdsValueError('The specified key file (%s)'
' does not exist.' % key_file)
self.oauth2credentials = oauth2client.client.SignedJwtAssertionCredentials(
client_email, private_key, scope,
private_key_password=private_key_password,
user_agent=self._USER_AGENT, sub=sub)
self.proxy_info = proxy_info
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
self.ca_certs = ca_certs
self.Refresh() | IOError | dataset/ETHPy150Open googleads/googleads-python-lib/googleads/oauth2.py/GoogleServiceAccountClient.__init__ |
def open(self):
'''
Build the base query object for this wrapper,
then add all of the counters required for the query.
Raise a QueryError if we can't complete the functions.
If we are already open, then do nothing.
'''
if not self.active: # to prevent having multiple open queries
# curpaths are made accessible here because of the possibility of volatile paths
# which may be dynamically altered by subclasses.
self.curpaths = copy.copy(self.paths)
try:
base = win32pdh.OpenQuery()
for path in self.paths:
try:
self.counters.append(win32pdh.AddCounter(base, path))
except win32api.error: # we passed a bad path
self.counters.append(0)
pass
self._base = base
self.active = 1
return 0 # open succeeded
except: # if we encounter any errors, kill the Query
try:
self.killbase(base)
except __HOLE__: # failed in creating query
pass
self.active = 0
self.curpaths = []
raise QueryError(self)
return 1 # already open | NameError | dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/site-packages/win32/lib/win32pdhquery.py/BaseQuery.open |
def close(self):
'''
Makes certain that the underlying query object has been closed,
and that all counters have been removed from it. This is
important for reference counting.
You should only need to call close if you have previously called
open. The collectdata methods all can handle opening and
closing the query. Calling close multiple times is acceptable.
'''
try:
self.killbase(self._base)
except __HOLE__:
self.killbase() | AttributeError | dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/site-packages/win32/lib/win32pdhquery.py/BaseQuery.close |
def getinstpaths(self,object,counter,machine=None,objtype='Process',format = win32pdh.PDH_FMT_LONG):
'''
### Not an end-user function
Calculate the paths for an instance object. Should alter
to allow processing for lists of object-counter pairs.
'''
items, instances = win32pdh.EnumObjectItems(None,None,objtype, -1)
# find out how many instances of this element we have...
instances.sort()
try:
cur = instances.index(object)
except __HOLE__:
return [] # no instances of this object
temp = [object]
try:
while instances[cur+1] == object:
temp.append(object)
cur = cur+1
except IndexError: # if we went over the end
pass
paths = []
for ind in range(len(temp)):
# can this raise an error?
paths.append(win32pdh.MakeCounterPath( (machine,'Process',object,None,ind,counter) ) )
return paths # should also return the number of elements for naming purposes | ValueError | dataset/ETHPy150Open eBay/restcommander/play-1.2.4/python/Lib/site-packages/win32/lib/win32pdhquery.py/Query.getinstpaths |
@click.command()
@click.argument('message_file_path')
def commit_msg(message_file_path):
"""
This hook is invoked by git commit, and can be bypassed with --no-verify option. It takes a single parameter,
the name of the file that holds the proposed commit log message.
Exiting with non-zero status causes the git commit to abort.
:param message_file_path: the name of the file that holds the proposed commit log message
:type message_file_path: string
"""
repository = git.Repo()
branch = repository.active_branch.name
user_configuration = config.UserConfiguration()
logger = output.get_root_logger('commit-msg')
logger.setLevel(user_configuration.verbosity)
logger.debug('Starting Commit-Msg Hook')
logger.debug('Path to commit message file: %s', message_file_path)
logger.debug('Repository Working Dir: %s', repository.working_dir)
logger.debug('Current branch: %s', branch)
try:
repository_configuration = config.load_repository_configuration(repository.working_dir)
except __HOLE__ as e:
logger.error(str(e))
raise click.Abort
logger.debug('Loaded repository configuration: %s', repository_configuration['CONFIG_FILE'])
logger.debug('Opening commit message file')
try:
with open(message_file_path) as message_file:
str_commit_message = message_file.read()
except IOError:
logger.error('Commit message file (%s) not found', message_file_path)
raise click.Abort
logger.debug('Commit Message: %s', str_commit_message)
commit_message = message.CommitMessage(branch, str_commit_message)
failed_checks = checks.run_checks('commit_msg', user_configuration, repository_configuration, commit_message)
if failed_checks:
s = '' if failed_checks == 1 else 's'
logger.error('%d check%s failed', failed_checks, s)
sys.exit(failed_checks) | ValueError | dataset/ETHPy150Open zalando/turnstile/turnstile/commit_msg.py/commit_msg |
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except __HOLE__:
return self.default | KeyError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_descrtut.py/defaultdict.__getitem__ |
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except __HOLE__:
return self.default | KeyError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_descrtut.py/defaultdict2.__getitem__ |
def convert_data(_data):
map = {
DBAnnotation.vtType: Annotation,
DBAbstraction.vtType: Abstraction,
DBConnection.vtType: Connection,
DBLocation.vtType: Location,
DBModule.vtType: Module,
DBFunction.vtType: ModuleFunction,
DBGroup.vtType: Group,
DBParameter.vtType: ModuleParam,
DBPluginData.vtType: PluginData,
DBPort.vtType: Port,
DBPortSpec.vtType: PortSpec,
DBControlParameter.vtType: ModuleControlParam,
}
try:
map[_data.vtType].convert(_data)
except __HOLE__:
raise TypeError('cannot convert data of type %s' % _data.vtType) | KeyError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/core/vistrail/operation.py/convert_data |
def choices_from_params(self):
out = []
for p in self.params.getlist(self.query_param):
try:
choice = self.choice_from_param(p)
out.append(choice)
except __HOLE__:
pass
for p in self.params.getlist(self.query_param + '--isnull'):
out.append(self.choice_from_param(None))
return out | ValueError | dataset/ETHPy150Open ionelmc/django-easyfilters/src/django_easyfilters/filters.py/Filter.choices_from_params |
def choice_from_param(self, param):
"""
Returns a native Python object representing something that has been
chosen for a filter, converted from the string value in param.
"""
try:
return self.field_obj.to_python(param)
except __HOLE__:
raise ValueError() | ValidationError | dataset/ETHPy150Open ionelmc/django-easyfilters/src/django_easyfilters/filters.py/Filter.choice_from_param |
def choice_from_param(self, param):
try:
return self.rel_field.to_python(param)
except __HOLE__:
raise ValueError() | ValidationError | dataset/ETHPy150Open ionelmc/django-easyfilters/src/django_easyfilters/filters.py/RelatedObjectMixin.choice_from_param |
def make_numeric_range_choice(to_python, to_str):
"""
Returns a Choice class that represents a numeric choice range,
using the passed in 'to_python' and 'to_str' callables to do
conversion to/from native data types.
"""
@python_2_unicode_compatible
@total_ordering
class NumericRangeChoice(object):
def __init__(self, values):
# Values are instances of RangeEnd
self.values = tuple(values)
def display(self):
return '-'.join([str(v.value) for v in self.values])
@classmethod
def from_param(cls, param):
if param is None:
return NullChoice
vals = []
for p in param.split('..', 1):
inclusive = False
if p.endswith('i'):
inclusive = True
p = p[:-1]
try:
val = to_python(p)
vals.append(RangeEnd(val, inclusive))
except __HOLE__:
raise ValueError()
return cls(vals)
def make_lookup(self, field_name):
if self.values is None:
return {field_name: None}
elif len(self.values) == 1:
return {field_name: self.values[0].value}
else:
start, end = self.values[0], self.values[1]
return {field_name + '__gt' +
('e' if start.inclusive else ''): start.value,
field_name + '__lt' +
('e' if end.inclusive else ''): end.value}
def __str__(self):
return '..'.join([to_str(v.value) + ('i' if v.inclusive else '')
for v in self.values])
def __repr__(self):
return '<NumericRangeChoice %s>' % self
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __cmp__(self, other):
# 'greater' means more specific.
if other is None:
return cmp(self.values, ())
else:
if other is NullChoice:
return -1
if len(self.values) != len(other.values):
# one value is more specific than two
return -cmp(len(self.values), len(other.values))
elif len(self.values) == 1:
return 0
else:
# Larger difference means less specific
return -cmp(self.values[1].value - self.values[0].value,
other.values[1].value - other.values[0].value)
return NumericRangeChoice | ValidationError | dataset/ETHPy150Open ionelmc/django-easyfilters/src/django_easyfilters/filters.py/make_numeric_range_choice |
def decompose_field(field):
from api.base.serializers import (
HideIfRetraction, HideIfRegistration,
HideIfDisabled, AllowMissing
)
WRAPPER_FIELDS = (HideIfRetraction, HideIfRegistration, HideIfDisabled, AllowMissing)
while isinstance(field, WRAPPER_FIELDS):
try:
field = getattr(field, 'field')
except __HOLE__:
break
return field | AttributeError | dataset/ETHPy150Open CenterForOpenScience/osf.io/api/base/utils.py/decompose_field |
def add_node(self, node):
if not isinstance(node, Node):
try:
node = Node.registry[node]
except __HOLE__:
logger.error("Unable to find Node '%s' in registry.", node)
logger.debug("Adding node '%s' to monitoring group '%s'.", node.name,
self.name)
self.nodes[node.name] = node
node.monitoring_groups[self.name] = self | KeyError | dataset/ETHPy150Open cloudtools/nymms/nymms/resources.py/MonitoringGroup.add_node |
def add_monitor(self, monitor):
if not isinstance(monitor, Monitor):
try:
monitor = Monitor.registry[monitor]
except __HOLE__:
logger.error("Unable to find Monitor '%s' in registry.",
monitor)
logger.debug("Adding monitor '%s' to monitoring group '%s'.",
monitor.name, self.name)
self.monitors[monitor.name] = monitor
monitor.monitoring_groups[self.name] = self | KeyError | dataset/ETHPy150Open cloudtools/nymms/nymms/resources.py/MonitoringGroup.add_monitor |
def __init__(self, name, realm=None, address=None, node_monitor=None,
monitoring_groups=None, **kwargs):
self.name = name
self.realm = realm
self.address = address or name
self.node_monitor = node_monitor
self.monitoring_groups = WeakValueDictionary()
self._tasks = []
if monitoring_groups:
for group in monitoring_groups:
if not isinstance(group, MonitoringGroup):
try:
group = MonitoringGroup.registry[group]
except __HOLE__:
logger.error("Unable to find MonitoringGroup '%s' "
"in registry, skipping.", group)
group.add_node(self)
super(Node, self).__init__(name, **kwargs) | KeyError | dataset/ETHPy150Open cloudtools/nymms/nymms/resources.py/Node.__init__ |
def __init__(self, name, command, realm=None, monitoring_groups=None,
**kwargs):
self.name = name
self.realm = realm
if not isinstance(command, Command):
try:
command = Command.registry[command]
except KeyError:
logger.error("Unable to find Command '%s' in registry.",
command)
raise
self.command = command
self.monitoring_groups = WeakValueDictionary()
if monitoring_groups:
for group in monitoring_groups:
if not isinstance(group, MonitoringGroup):
try:
group = MonitoringGroup.registry[group]
except __HOLE__:
logger.error("Unable to find MonitoringGroup '%s' in "
"registry.", group)
raise
group.add_monitor(self)
super(Monitor, self).__init__(name, **kwargs) | KeyError | dataset/ETHPy150Open cloudtools/nymms/nymms/resources.py/Monitor.__init__ |
def __ProcessProperties(self, kind_name, namespace, entity_key_size,
prop_lists):
for prop_list in prop_lists:
for prop in prop_list:
try:
value = datastore_types.FromPropertyPb(prop)
self.__AggregateProperty(kind_name, namespace, entity_key_size,
prop, value)
except (AssertionError, AttributeError, TypeError, __HOLE__), e:
logging.error('Cannot process property %r, exception %s' %
(prop, e)) | ValueError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/datastore/datastore_stats_generator.py/DatastoreStatsProcessor.__ProcessProperties |
def do_wan_type(self, line):
try:
type=eval("LinksysSession.WAN_CONNECT_"+line.strip().upper())
self.session.set_connection_type(type)
except __HOLE__:
print_stderr("linksys: unknown connection type.")
return 0 | ValueError | dataset/ETHPy150Open cloudaice/simple-data/misc/virtenv/share/doc/pycurl/examples/linksys.py/LinksysInterpreter.do_wan_type |
def next_time(time_string):
try:
parsed = list(time.strptime(time_string, "%H:%M"))
except (TypeError, __HOLE__):
return float(time_string)
now = time.localtime()
current = list(now)
current[3:6] = parsed[3:6]
current_time = time.time()
delta = time.mktime(current) - current_time
if delta <= 0.0:
current[2] += 1
return time.mktime(current) - current_time
return delta | ValueError | dataset/ETHPy150Open abusesa/abusehelper/abusehelper/core/mailer.py/next_time |
def _get_cpu_count():
try:
return cpu_count()
except __HOLE__:
raise RuntimeError('Could not determine CPU count and no '
'--instance-count supplied.') | NotImplementedError | dataset/ETHPy150Open mbr/flask-appconfig/flask_appconfig/server_backends.py/_get_cpu_count |
def __init__(self, *args, **kwargs):
try:
val = args[3]
except __HOLE__:
val = kwargs['releaselevel']
if val not in ('', 'a', 'alpha', 'b', 'beta'):
raise ValueError("Release-level must be one of 'a', 'alpha', 'b' or 'beta' but '{}' given".format(val))
# other values must be numbers
map(int, [a for a in args if a != args[3]])
map(int, [a for a in kwargs.values() if a != kwargs['releaselevel']]) | IndexError | dataset/ETHPy150Open Aeronautics/aero/aero/__version__.py/V.__init__ |
@resource.register('.*\.json(\.gz)?')
def resource_json_ambiguous(path, **kwargs):
""" Try to guess if this file is line-delimited or not """
if os.path.exists(path):
f = open(path)
try:
one = next(f)
except __HOLE__: # gzip
f.close()
return resource_json(path, **kwargs)
try:
next(f)
except StopIteration: # only one line
f.close()
return resource_json(path, **kwargs)
try:
json.loads(one)
return resource_jsonlines(path, **kwargs)
except:
return resource_json(path, **kwargs)
finally:
f.close()
# File doesn't exist, is the dshape variable length?
dshape = kwargs.get('expected_dshape', None)
if dshape and dshape[0] == var:
return resource_jsonlines(path, **kwargs)
else:
return resource_json(path, **kwargs) | UnicodeDecodeError | dataset/ETHPy150Open blaze/odo/odo/backends/json.py/resource_json_ambiguous |
@register.tag
def gravatar_url(_parser, token):
try:
_tag_name, email = token.split_contents()
except __HOLE__:
raise template.TemplateSyntaxError(
'{} tag requires a single argument'.format(
token.contents.split()[0]))
return GravatarUrlNode(email) | ValueError | dataset/ETHPy150Open deis/deis/controller/web/templatetags/gravatar_tags.py/gravatar_url |
def get_request_url(self, request_id):
"""Returns the URL the request e.g. 'http://localhost:8080/foo?bar=baz'.
Args:
request_id: The string id of the request making the API call.
Returns:
The URL of the request as a string.
"""
try:
host = os.environ['HTTP_HOST']
except __HOLE__:
host = os.environ['SERVER_NAME']
port = os.environ['SERVER_PORT']
if port != '80':
host += ':' + port
url = 'http://' + host
url += urllib.quote(os.environ.get('PATH_INFO', '/'))
if os.environ.get('QUERY_STRING'):
url += '?' + os.environ['QUERY_STRING']
return url | KeyError | dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/request_info.py/_LocalRequestInfo.get_request_url |
def read_string(self):
len = self._read_size()
byte_payload = self.trans.read(len)
if self.decode_response:
try:
byte_payload = byte_payload.decode('utf-8')
except __HOLE__:
pass
return byte_payload | UnicodeDecodeError | dataset/ETHPy150Open eleme/thriftpy/thriftpy/protocol/compact.py/TCompactProtocol.read_string |
def read_struct(self, obj):
self.read_struct_begin()
while True:
fname, ftype, fid = self.read_field_begin()
if ftype == TType.STOP:
break
if fid not in obj.thrift_spec:
self.skip(ftype)
continue
try:
field = obj.thrift_spec[fid]
except __HOLE__:
self.skip(ftype)
raise
else:
if field is not None and ftype == field[0]:
fname = field[1]
fspec = field[2]
val = self.read_val(ftype, fspec)
setattr(obj, fname, val)
else:
self.skip(ftype)
self.read_field_end()
self.read_struct_end() | IndexError | dataset/ETHPy150Open eleme/thriftpy/thriftpy/protocol/compact.py/TCompactProtocol.read_struct |
def _get(self, name, obj):
try:
attr = getattr(obj, name)
except __HOLE__:
return
if callable(attr):
attr = attr(obj)
return attr | AttributeError | dataset/ETHPy150Open quantmind/lux/lux/extensions/sitemap/__init__.py/BaseSitemap._get |
def _ball_drain_while_active(self, balls, **kwargs):
if balls <= 0:
return {'balls': balls}
no_balls_in_play = False
try:
if not self.machine.game.balls_in_play:
no_balls_in_play = True
except __HOLE__:
no_balls_in_play = True
if no_balls_in_play:
self.log.debug("Received request to save ball, but no balls are in"
" play. Discarding request.")
return {'balls': balls}
self.log.debug("Ball(s) drained while active. Requesting new one(s). "
"Autolaunch: %s", self.config['auto_launch'])
self.machine.events.post('ball_save_{}_saving_ball'.format(self.name),
balls=balls)
self.source_playfield.add_ball(balls=balls,
player_controlled=self.config['auto_launch']^1)
if not self.unlimited_saves:
self.saves_remaining -= balls
if self.debug:
self.log.debug("Saves remaining: %s", self.saves_remaining)
elif self.debug:
self.log.debug("Unlimited Saves enabled")
if self.saves_remaining <= 0:
if self.debug:
self.log.debug("Disabling since there are no saves remaining")
self.disable()
return {'balls': 0} | AttributeError | dataset/ETHPy150Open missionpinball/mpf/mpf/devices/ball_save.py/BallSave._ball_drain_while_active |
def set_recursion_limit(self):
"""Set explicit recursion limit if set in the environment.
This is set here to make sure we're setting it always
when we initialize Django, also when we're loading celery (which
is calling django.setup too).
This is only being used for the amo-validator so initializing this late
should be fine.
"""
if 'RECURSION_LIMIT' in os.environ:
try:
limit = int(os.environ['RECURSION_LIMIT'])
except __HOLE__:
log.warning('Unable to parse RECURSION_LIMIT "{}"'.format(
os.environ['RECURSION_LIMIT']))
else:
sys.setrecursionlimit(limit)
log.info('Set RECURSION_LIMIT to {}'.format(limit)) | TypeError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/core/apps.py/CoreConfig.set_recursion_limit |
def _runlevel():
'''
Return the current runlevel
'''
if 'upstart._runlevel' in __context__:
return __context__['upstart._runlevel']
out = __salt__['cmd.run'](['runlevel', '{0}'.format(_find_utmp())], python_shell=False)
try:
ret = out.split()[1]
except __HOLE__:
# The runlevel is unknown, return the default
ret = _default_runlevel()
__context__['upstart._runlevel'] = ret
return ret | IndexError | dataset/ETHPy150Open saltstack/salt/salt/modules/upstart.py/_runlevel |
def getInternalQueue(self, thread_id):
""" returns intenal command queue for a given thread.
if new queue is created, notify the RDB about it """
try:
return self.cmdQueue[thread_id]
except __HOLE__:
self.internalQueueLock.acquire()
try:
self.cmdQueue[thread_id] = PydevQueue.Queue()
all_threads = threading.enumerate()
cmd = None
for t in all_threads:
if GetThreadId(t) == thread_id:
if not hasattr(t, 'additionalInfo'):
#see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329
#Let's create the additional info right away!
t.additionalInfo = PyDBAdditionalThreadInfo()
self.RUNNING_THREAD_IDS[thread_id] = t
cmd = self.cmdFactory.makeThreadCreatedMessage(t)
break
if cmd:
PydevdLog(2, "found a new thread ", str(thread_id))
self.writer.addCommand(cmd)
else:
PydevdLog(0, "could not find thread by id to register")
finally:
self.internalQueueLock.release()
return self.cmdQueue[thread_id] | KeyError | dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/common/diagnostic/pydevDebug/pydevd.py/PyDB.getInternalQueue |
def processNetCommand(self, cmd_id, seq, text):
'''Processes a command received from the Java side
@param cmd_id: the id of the command
@param seq: the sequence of the command
@param text: the text received in the command
@note: this method is run as a big switch... after doing some tests, it's not clear whether changing it for
a dict id --> function call will have better performance result. A simple test with xrange(10000000) showed
that the gains from having a fast access to what should be executed are lost because of the function call in
a way that if we had 10 elements in the switch the if..elif are better -- but growing the number of choices
makes the solution with the dispatch look better -- so, if this gets more than 20-25 choices at some time,
it may be worth refactoring it (actually, reordering the ifs so that the ones used mostly come before
probably will give better performance).
'''
self.acquire()
try:
try:
cmd = None
if cmd_id == CMD_RUN:
self.readyToRun = True
elif cmd_id == CMD_VERSION:
# response is version number
cmd = self.cmdFactory.makeVersionMessage(seq)
elif cmd_id == CMD_LIST_THREADS:
# response is a list of threads
cmd = self.cmdFactory.makeListThreadsMessage(seq)
elif cmd_id == CMD_THREAD_KILL:
int_cmd = InternalTerminateThread(text)
self.postInternalCommand(int_cmd, text)
elif cmd_id == CMD_THREAD_SUSPEND:
t = PydevdFindThreadById(text)
if t:
additionalInfo = None
try:
additionalInfo = t.additionalInfo
except __HOLE__:
pass #that's ok, no info currently set
if additionalInfo is not None:
for frame in additionalInfo.IterFrames():
frame.f_trace = self.trace_dispatch
SetTraceForParents(frame, self.trace_dispatch)
del frame
self.setSuspend(t, CMD_THREAD_SUSPEND)
elif cmd_id == CMD_THREAD_RUN:
t = PydevdFindThreadById(text)
if t:
t.additionalInfo.pydev_step_cmd = None
t.additionalInfo.pydev_step_stop = None
t.additionalInfo.pydev_state = STATE_RUN
elif cmd_id == CMD_STEP_INTO or cmd_id == CMD_STEP_OVER or cmd_id == CMD_STEP_RETURN:
#we received some command to make a single step
t = PydevdFindThreadById(text)
if t:
t.additionalInfo.pydev_step_cmd = cmd_id
t.additionalInfo.pydev_state = STATE_RUN
elif cmd_id == CMD_RUN_TO_LINE:
#we received some command to make a single step
thread_id, line, func_name = text.split('\t', 2)
t = PydevdFindThreadById(thread_id)
if t:
t.additionalInfo.pydev_step_cmd = cmd_id
t.additionalInfo.pydev_next_line = int(line)
t.additionalInfo.pydev_func_name = func_name
t.additionalInfo.pydev_state = STATE_RUN
elif cmd_id == CMD_RELOAD_CODE:
#we received some command to make a reload of a module
module_name = text.strip()
from pydevd_reload import xreload
if not DictContains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if DictContains(sys.modules, new_module_name):
module_name = new_module_name
if not DictContains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "'+module_name+'".\n')
sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Reloading: '+module_name+'\n')
xreload(sys.modules[module_name])
elif cmd_id == CMD_CHANGE_VARIABLE:
#the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change
try:
thread_id, frame_id, scope, attr_and_value = text.split('\t', 3)
tab_index = attr_and_value.rindex('\t')
attr = attr_and_value[0:tab_index].replace('\t', '.')
value = attr_and_value[tab_index + 1:]
int_cmd = InternalChangeVariable(seq, thread_id, frame_id, scope, attr, value)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_VARIABLE:
#we received some command to get a variable
#the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes*
try:
thread_id, frame_id, scopeattrs = text.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_COMPLETIONS:
#we received some command to get a variable
#the text is: thread_id\tframe_id\tactivation token
try:
thread_id, frame_id, scope, act_tok = text.split('\t', 3)
int_cmd = InternalGetCompletions(seq, thread_id, frame_id, act_tok)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_FRAME:
thread_id, frame_id, scope = text.split('\t', 2)
int_cmd = InternalGetFrame(seq, thread_id, frame_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_SET_BREAK:
#func name: 'None': match anything. Empty: match global, specified: only method context.
#command to add some breakpoint.
# text is file\tline. Add to breakpoints dictionary
file, line, condition = text.split('\t', 2)
if condition.startswith('**FUNC**'):
func_name, condition = condition.split('\t', 1)
#We must restore new lines and tabs as done in
#AbstractDebugTarget.breakpointAdded
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n').\
replace("@_@TAB_CHAR@_@", '\t').strip()
func_name = func_name[8:]
else:
func_name = 'None' #Match anything if not specified.
file = NormFileToServer(file)
if not os.path.exists(file):
sys.stderr.write('pydev debugger: warning: trying to add breakpoint'\
' to file that does not exist: %s (will have no effect)\n' % (file,))
line = int(line)
if DEBUG_TRACE_BREAKPOINTS > 0:
sys.stderr.write('Added breakpoint:%s - line:%s - func_name:%s\n' % (file, line, func_name))
if DictContains(self.breakpoints, file):
breakDict = self.breakpoints[file]
else:
breakDict = {}
if len(condition) <= 0 or condition == None or condition == "None":
breakDict[line] = (True, None, func_name)
else:
breakDict[line] = (True, condition, func_name)
self.breakpoints[file] = breakDict
#and enable the tracing for existing threads (because there may be frames being executed that
#are currently untraced).
threads = threadingEnumerate()
for t in threads:
if not t.getName().startswith('pydevd.'):
#TODoo: optimize so that we only actually add that tracing if it's in
#the new breakpoint context.
additionalInfo = None
try:
additionalInfo = t.additionalInfo
except AttributeError:
pass #that's ok, no info currently set
if additionalInfo is not None:
for frame in additionalInfo.IterFrames():
frame.f_trace = self.trace_dispatch
SetTraceForParents(frame, self.trace_dispatch)
del frame
elif cmd_id == CMD_REMOVE_BREAK:
#command to remove some breakpoint
#text is file\tline. Remove from breakpoints dictionary
file, line = text.split('\t', 1)
file = NormFileToServer(file)
try:
line = int(line)
except ValueError:
pass
else:
try:
del self.breakpoints[file][line] #remove the breakpoint in that line
if DEBUG_TRACE_BREAKPOINTS > 0:
sys.stderr.write('Removed breakpoint:%s\n' % (file,))
except KeyError:
#ok, it's not there...
if DEBUG_TRACE_BREAKPOINTS > 0:
#Sometimes, when adding a breakpoint, it adds a remove command before (don't really know why)
sys.stderr.write("breakpoint not found: %s - %s\n" % (file, line))
elif cmd_id == CMD_EVALUATE_EXPRESSION or cmd_id == CMD_EXEC_EXPRESSION:
#command to evaluate the given expression
#text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression = text.split('\t', 3)
int_cmd = InternalEvaluateExpression(seq, thread_id, frame_id, expression,
cmd_id == CMD_EXEC_EXPRESSION)
self.postInternalCommand(int_cmd, thread_id)
else:
#I have no idea what this is all about
cmd = self.cmdFactory.makeErrorMessage(seq, "unexpected command " + str(cmd_id))
if cmd is not None:
self.writer.addCommand(cmd)
del cmd
except Exception:
traceback.print_exc()
cmd = self.cmdFactory.makeErrorMessage(seq,
"Unexpected exception in processNetCommand.\nInitial params: %s" % ((cmd_id, seq, text),))
self.writer.addCommand(cmd)
finally:
self.release() | AttributeError | dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/common/diagnostic/pydevDebug/pydevd.py/PyDB.processNetCommand |
def trace_dispatch(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
'''
try:
if self._finishDebuggingSession:
#that was not working very well because jython gave some socket errors
threads = threadingEnumerate()
for t in threads:
if hasattr(t, 'doKill'):
t.doKill()
return None
filename, base = GetFilenameAndBase(frame)
is_file_to_ignore = DictContains(DONT_TRACE, base) #we don't want to debug threading or anything related to pydevd
if not self.force_post_mortem_stop: #If we're in post mortem mode, we might not have another chance to show that info!
if is_file_to_ignore:
return None
#print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name)
try:
#this shouldn't give an exception, but it could happen... (python bug)
#see http://mail.python.org/pipermail/python-bugs-list/2007-June/038796.html
#and related bug: http://bugs.python.org/issue1733757
t = threadingCurrentThread()
except:
frame.f_trace = self.trace_dispatch
return self.trace_dispatch
try:
additionalInfo = t.additionalInfo
except:
additionalInfo = t.additionalInfo = PyDBAdditionalThreadInfo()
if self.force_post_mortem_stop: #If we're in post mortem mode, we might not have another chance to show that info!
if additionalInfo.pydev_force_stop_at_exception:
self.force_post_mortem_stop -= 1
frame, frames_byid = additionalInfo.pydev_force_stop_at_exception
thread_id = GetThreadId(t)
used_id = pydevd_vars.addAdditionalFrameById(thread_id, frames_byid)
try:
self.setSuspend(t, CMD_STEP_INTO)
self.doWaitSuspend(t, frame, 'exception', None)
finally:
additionalInfo.pydev_force_stop_at_exception = None
pydevd_vars.removeAdditionalFrameById(thread_id)
# if thread is not alive, cancel trace_dispatch processing
if not t.isAlive():
self.processThreadNotAlive(GetThreadId(t))
return None # suspend tracing
if is_file_to_ignore:
return None
#each new frame...
return additionalInfo.CreateDbFrame(self, filename, additionalInfo, t, frame).trace_dispatch(frame, event, arg)
except __HOLE__:
return None
except Exception:
#Log it
traceback.print_exc()
return None | SystemExit | dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/common/diagnostic/pydevDebug/pydevd.py/PyDB.trace_dispatch |
def settrace(host='localhost', stdoutToServer=False, stderrToServer=False, port=5678, suspend=True, trace_only_current_thread=True):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all future threads will also have the tracing enabled.
'''
global connected
global bufferStdOutToServer
global bufferStdErrToServer
if not connected :
connected = True
bufferStdOutToServer = stdoutToServer
bufferStdErrToServer = stderrToServer
pydevd_vm_type.SetupType()
debugger = PyDB()
debugger.connect(host, port)
net = NetCommand(str(CMD_THREAD_CREATE), 0, '<xml><thread name="pydevd.reader" id="-1"/></xml>')
debugger.writer.addCommand(net)
net = NetCommand(str(CMD_THREAD_CREATE), 0, '<xml><thread name="pydevd.writer" id="-1"/></xml>')
debugger.writer.addCommand(net)
if bufferStdOutToServer:
sys.stdoutBuf = pydevd_io.IOBuf()
sys.stdout = pydevd_io.IORedirector(sys.stdout, sys.stdoutBuf) #@UndefinedVariable
if bufferStdErrToServer:
sys.stderrBuf = pydevd_io.IOBuf()
sys.stderr = pydevd_io.IORedirector(sys.stderr, sys.stderrBuf) #@UndefinedVariable
SetTraceForParents(GetFrame(), debugger.trace_dispatch)
t = threadingCurrentThread()
try:
additionalInfo = t.additionalInfo
except AttributeError:
additionalInfo = PyDBAdditionalThreadInfo()
t.additionalInfo = additionalInfo
while not debugger.readyToRun:
time.sleep(0.1) # busy wait until we receive run command
if suspend:
debugger.setSuspend(t, CMD_SET_BREAK)
#note that we do that through pydevd_tracing.SetTrace so that the tracing
#is not warned to the user!
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
#Trace future threads?
try:
#not available in jython!
threading.settrace(debugger.trace_dispatch) # for all future threads
except:
pass
try:
thread.start_new_thread = pydev_start_new_thread
thread.start_new = pydev_start_new_thread
except:
pass
PyDBCommandThread(debugger).start()
else:
#ok, we're already in debug mode, with all set, so, let's just set the break
debugger = GetGlobalDebugger()
SetTraceForParents(GetFrame(), debugger.trace_dispatch)
t = threadingCurrentThread()
try:
additionalInfo = t.additionalInfo
except __HOLE__:
additionalInfo = PyDBAdditionalThreadInfo()
t.additionalInfo = additionalInfo
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
#Trace future threads?
try:
#not available in jython!
threading.settrace(debugger.trace_dispatch) # for all future threads
except:
pass
try:
thread.start_new_thread = pydev_start_new_thread
thread.start_new = pydev_start_new_thread
except:
pass
if suspend:
debugger.setSuspend(t, CMD_SET_BREAK) | AttributeError | dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/common/diagnostic/pydevDebug/pydevd.py/settrace |
def AddTransfer(self, throttle_name, token_count):
"""Add a count to the amount this thread has transferred.
Each time a thread transfers some data, it should call this method to
note the amount sent. The counts may be rotated if sufficient time
has passed since the last rotation.
Args:
throttle_name: The name of the throttle to add to.
token_count: The number to add to the throttle counter.
"""
self.VerifyThrottleName(throttle_name)
transferred = self.transferred[throttle_name]
try:
transferred[id(threading.currentThread())] += token_count
except __HOLE__:
thread = threading.currentThread()
raise ThreadNotRegisteredError(
'Unregistered thread accessing throttled datastore stub: id = %s\n'
'name = %s' % (id(thread), thread.getName()))
if self.last_rotate[throttle_name] + self.ROTATE_PERIOD < self.get_time():
self._RotateCounts(throttle_name) | KeyError | dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/ext/remote_api/throttle.py/Throttle.AddTransfer |
def add_driver(notification_driver):
"""Add a notification driver at runtime."""
# Make sure the driver list is initialized.
_get_drivers()
if isinstance(notification_driver, basestring):
# Load and add
try:
driver = importutils.import_module(notification_driver)
_drivers[notification_driver] = driver
except __HOLE__:
LOG.exception(_("Failed to load notifier %s. "
"These notifications will not be sent.") %
notification_driver)
else:
# Driver is already loaded; just add the object.
_drivers[notification_driver] = notification_driver | ImportError | dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/openstack/common/notifier/api.py/add_driver |
def get_processes(self):
"""Get the processes running on the device.
Returns a dictionary (PID, Process)."""
processes = {}
# Get ps output
cmd = ["ps", "-Z"]
# Split by newlines and remove first line ("LABEL USER PID PPID NAME")
# TODO: surround with try/except?
psz = subprocess.check_output(self.shell + cmd).split('\n')[1:]
for line in psz:
line = line.strip("\r")
if line:
try:
p = Process(line, self.android_version)
except __HOLE__ as e:
self.log.warning(e)
else:
processes[p.pid] = p
return processes | ValueError | dataset/ETHPy150Open seandroid-analytics/seal/sealib/device.py/Device.get_processes |
def get_files(self, path="/"):
"""Get the files under the given path from a connected device.
The path must be a directory.
Returns a dictionary (filename, File)."""
files_dict = {}
listing = []
path = os.path.normpath(path)
cmd = ["ls", "-lRZ", "'" + path + "'"]
# Get the File object for the top-level path
# We could not get it otherwise
files_dict.update(self.get_dir(path))
# If the device is slow there can be errors produced when running down
# /proc (ls: /proc/10/exe: No such file or directory) particularly on
# the emulator. On exception this will just output a string containing
# all the entries, therefore on error convert output to a list as if
# nothing happened.
try:
listing = subprocess.check_output(self.shell + cmd).split('\n')
except subprocess.CalledProcessError as e:
listing = e.output.split('\n')
# Parse ls -lRZ output for a directory
# In Android <=6.0 the output of ls -lRZ "<DIRECTORY>" begins
# with a blank line, in >=6.0.1 it doesn't.
# This is taken care of when parsing here.
new_dir = False
first_run = True
for line in listing:
line = line.strip("\r")
# Skip "total" line
if line.startswith("total "):
if first_run:
first_run = False
continue
# If the current line is empty, expect a new directory in the next
if not line:
new_dir = True
if first_run:
first_run = False
continue
# Initialise new directory
if new_dir or first_run:
directory = line.strip(':')
new_dir = False
if first_run:
first_run = False
continue
# Regular line describing a file
try:
f = File(line, directory, self.android_version)
except __HOLE__ as e:
if first_run:
# If this is the very first line of the output, the
# command failed outright
self.log.error(e)
return None
self.log.error("In directory \"%s\"", directory)
self.log.error(e)
else:
files_dict[f.absname] = f
return files_dict | ValueError | dataset/ETHPy150Open seandroid-analytics/seal/sealib/device.py/Device.get_files |
def get_file(self, path):
"""Get the file matching the given path from a connected device.
The path must be a file.
Returns a dictionary (filename, File)."""
path = os.path.normpath(path)
cmd = ["ls", "-lZ", "'" + path + "'"]
listing = subprocess.check_output(self.shell + cmd).split('\n')
line = listing[0].strip("\r")
# Parse ls -lZ output for a single file
try:
f = File(line, os.path.dirname(path), self.android_version)
except __HOLE__ as e:
self.log.error(e)
return None
else:
return {f.absname: f} | ValueError | dataset/ETHPy150Open seandroid-analytics/seal/sealib/device.py/Device.get_file |
def get_dir(self, path):
"""Get the directory matching the given path from a connected device.
The path must be a directory.
This only returns information on the single directory ("ls -ldZ"): to
get information about all the directory content recursively, use
get_files(path).
Returns a dictionary (filename, File)."""
path = os.path.normpath(path)
cmd = ["ls", "-ldZ", "'" + path + "'"]
listing = subprocess.check_output(self.shell + cmd).split('\n')
line = listing[0].strip("\r")
# Parse ls -ldZ output for a directory
try:
f = File(line, os.path.dirname(path), self.android_version)
except __HOLE__ as e:
self.log.error(e)
return None
else:
return {f.absname: f} | ValueError | dataset/ETHPy150Open seandroid-analytics/seal/sealib/device.py/Device.get_dir |
def __init__(self, data, table_name=None, default_dialect=None,
save_metadata_to=None, metadata_source=None,
varying_length_text=False, uniques=False,
pk_name=None, force_pk=False, data_size_cushion=0,
_parent_table=None, _fk_field_name=None, reorder=False,
loglevel=logging.WARN, limit=None):
"""
Initialize a Table and load its data.
If ``varying_length_text`` is ``True``,
text columns will be TEXT rather than VARCHAR.
This *improves* performance in PostgreSQL.
If a ``metadata<timestamp>`` YAML file generated
from a previous ddlgenerator run is
provided, *only* ``INSERT`` statements will be produced,
and the table structure
determined during the previous run will be assumed.
"""
self.source = data
logging.getLogger().setLevel(loglevel)
self.varying_length_text = varying_length_text
self.table_name = table_name
self.data_size_cushion = data_size_cushion
self._find_table_name(data)
# Send anything but Python data objects to
# data_dispenser.sources.Source
if isinstance(data, Source):
self.data = data
elif hasattr(data, 'lower') or hasattr(data, 'read'):
self.data = Source(data, limit=limit)
else:
try:
self.data = iter(data)
except __HOLE__:
self.data = Source(data)
if ( self.table_name.startswith('generated_table')
and hasattr(self.data, 'table_name')):
self.table_name = self.data.table_name
self.table_name = self.table_name.lower()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
children = {}
self.pk_name = next(col.name for col in self.data.generator.sqla_columns if col.primary_key)
else:
self.data = reshape.walk_and_clean(self.data)
(self.data, self.pk_name, children, child_fk_names
) = reshape.unnest_children(data=self.data,
parent_name=self.table_name,
pk_name=pk_name,
force_pk=force_pk)
self.default_dialect = default_dialect
self.comments = {}
child_metadata_sources = {}
if metadata_source:
if isinstance(metadata_source, OrderedDict):
logging.info('Column metadata passed in as OrderedDict')
self.columns = metadata_source
else:
logging.info('Pulling column metadata from file %s'
% metadata_source)
with open(metadata_source) as infile:
self.columns = yaml.load(infile.read())
for (col_name, col) in self.columns.items():
if isinstance(col, OrderedDict):
child_metadata_sources[col_name] = col
self.columns.pop(col_name)
else:
self._fill_metadata_from_sample(col)
else:
self._determine_types()
if reorder:
ordered_columns = OrderedDict()
if pk_name and pk_name in self.columns:
ordered_columns[pk_name] = self.columns.pop(pk_name)
for (c, v) in sorted(self.columns.items()):
ordered_columns[c] = v
self.columns = ordered_columns
if _parent_table:
fk = sa.ForeignKey('%s.%s' % (_parent_table.table_name,
_parent_table.pk_name))
else:
fk = None
self.table = sa.Table(self.table_name, metadata,
*[sa.Column(cname, col['satype'],
fk if fk and (_fk_field_name == cname)
else None,
primary_key=(cname == self.pk_name),
unique=(uniques and col['is_unique']),
nullable=col['is_nullable'],
doc=self.comments.get(cname))
for (cname, col) in self.columns.items()
if True
])
self.children = {child_name: Table(child_data, table_name=child_name,
default_dialect=self.default_dialect,
varying_length_text=varying_length_text,
uniques=uniques, pk_name=pk_name,
force_pk=force_pk, data_size_cushion=data_size_cushion,
_parent_table=self, reorder=reorder,
_fk_field_name=child_fk_names[child_name],
metadata_source=child_metadata_sources.get(child_name),
loglevel=loglevel)
for (child_name, child_data) in children.items()}
if save_metadata_to:
if not save_metadata_to.endswith(('.yml', 'yaml')):
save_metadata_to += '.yaml'
with open(save_metadata_to, 'w') as outfile:
outfile.write(yaml.dump(self._saveable_metadata()))
logging.info('Pass ``--save-metadata-to %s`` next time to re-use structure' %
save_metadata_to) | TypeError | dataset/ETHPy150Open catherinedevlin/ddl-generator/ddlgenerator/ddlgenerator.py/Table.__init__ |
def django_models(self, metadata_source=None):
sql = self.sql(dialect='postgresql', inserts=False, creates=True,
drops=True, metadata_source=metadata_source)
u = sql.split(';\n')
try:
import django
except __HOLE__:
print('Cannot find Django on the current path. Is it installed?')
django = None
if django:
from django.conf import settings
from django.core import management
from django import setup
import sqlite3
import os
db_filename = 'generated_db.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
for i in u:
c.execute(i)
if not settings.configured:
settings.configure(
DEBUG='on',
SECRET_KEY='1234',
ALLOWED_HOSTS='localhost',
DATABASES = {'default' : {'NAME':db_filename,'ENGINE':'django.db.backends.sqlite3'}},
)
django.setup()
management.call_command('inspectdb', interactive=False)
os.remove(db_filename) | ImportError | dataset/ETHPy150Open catherinedevlin/ddl-generator/ddlgenerator/ddlgenerator.py/Table.django_models |
@classmethod
def do_request(self, method, url, *args, **kwargs):
auth_token = kwargs.get('token', None)
params = kwargs.get('params', None)
headers = kwargs.get('headers', None)
proxy = kwargs.get('proxy', None)
if not auth_token:
try:
config = ConfigParser.ConfigParser()
config.read(os.path.expanduser('~/.do.cfg'))
auth_token = config.get('docli', 'auth_token') or os.getenv('do_auth_token')
except:
auth_token = None
if not auth_token:
data = {'has_error':True, 'error_message':'Authentication token not provided.'}
return data
if headers:
headers.update({'Authorization': 'Bearer ' + auth_token})
else:
headers = {'Authorization': 'Bearer ' + auth_token}
if proxy:
proxy = {'http': proxy}
request_method = {'GET':requests.get, 'POST': requests.post, 'PUT': requests.put, 'DELETE': requests.delete}
request_url = self.api_url + url
req = request_method[method]
try:
res = req(request_url, headers=headers, params=params, proxies=proxy)
except (requests.exceptions.ConnectionError, requests.exceptions.RequestException) as e:
data = {'has_error':True, 'error_message':e.message}
return data
if res.status_code == 204:
data = {'has_error':False, 'error_message':''}
return data
try:
data = res.json()
data.update({'has_error':False, 'error_message':''})
except __HOLE__ as e:
msg = "Cannot read response, %s" %(e.message)
data = {'has_error':True, 'error_message':msg}
if not res.ok:
msg = data['message']
data.update({'has_error':True, 'error_message':msg})
return data | ValueError | dataset/ETHPy150Open yspanchal/docli/docli/commands/base_request.py/DigitalOcean.do_request |
def handle(self, *args, **options):
for permission in StoredPermission.objects.all():
try:
Permission.get(
{'pk': '%s.%s' % (permission.namespace, permission.name)},
proxy_only=True
)
except __HOLE__:
permission.delete() | KeyError | dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/permissions/management/commands/purgepermissions.py/Command.handle |
def __init__(self, n):
if n:
try:
value = int(n)
except __HOLE__ as e:
raise FilterValidationError(e)
now = timezone.now()
frm_dt = now - timedelta(seconds=value)
super(SecondsFilter, self).__init__(value, start_time__gt=frm_dt)
else:
# Empty query
super(SecondsFilter, self).__init__() | ValueError | dataset/ETHPy150Open django-silk/silk/silk/request_filters.py/SecondsFilter.__init__ |
def _parse(dt, fmt):
"""attempt to coerce dt into a datetime given fmt, otherwise raise
a FilterValidationError"""
try:
dt = datetime.strptime(dt, fmt)
except TypeError:
if not isinstance(dt, datetime):
raise FilterValidationError('Must be a datetime object')
except __HOLE__ as e:
raise FilterValidationError(e)
return dt | ValueError | dataset/ETHPy150Open django-silk/silk/silk/request_filters.py/_parse |
def __init__(self, n):
try:
value = int(n)
except __HOLE__ as e:
raise FilterValidationError(e)
super(NumQueriesFilter, self).__init__(value, num_queries__gte=n) | ValueError | dataset/ETHPy150Open django-silk/silk/silk/request_filters.py/NumQueriesFilter.__init__ |
def __init__(self, n):
try:
value = int(n)
except __HOLE__ as e:
raise FilterValidationError(e)
super(TimeSpentOnQueriesFilter, self).__init__(value, db_time__gte=n) | ValueError | dataset/ETHPy150Open django-silk/silk/silk/request_filters.py/TimeSpentOnQueriesFilter.__init__ |
def __init__(self, n):
try:
value = int(n)
except __HOLE__ as e:
raise FilterValidationError(e)
super(OverallTimeFilter, self).__init__(value, time_taken__gte=n) | ValueError | dataset/ETHPy150Open django-silk/silk/silk/request_filters.py/OverallTimeFilter.__init__ |
def setColor(self, color):
# Check that the color does not already exist
exists = False
for c in self._colorHistory:
if color == c:
exists = True
break
# If the color does not exist then add it
if not exists:
self._colorHistory.append(color)
colorList = list(self._colorHistory)
# Invert order of colors
colorList.reverse()
# Move the selected color to the front of the list
colorList.insert(0, colorList.pop( colorList.index(color) ))
# Create 2d color map
colors = [([None] * self._columns) for _ in range(self._rows)]
iterator = iter(colorList)
for row in range(self._rows):
for col in range(self._columns):
try:
colors[row][col] = iterator.next()
except __HOLE__:
colors[row][col] = Color.WHITE
self._grid.setColorGrid(colors)
self._grid.requestRepaint() | StopIteration | dataset/ETHPy150Open rwl/muntjac/muntjac/addon/colorpicker/color_picker_history.py/ColorPickerHistory.setColor |
@property
def json(self):
try:
return json.loads(self.value)
except (TypeError, __HOLE__):
return {} | ValueError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/zadmin/models.py/Config.json |
def save_messages(self, msgs):
msg_ids = ['.'.join(msg['id']) for msg in msgs]
cache.set('validation.job_id:%s' % self.job_id, msg_ids)
for msg, key in zip(msgs, msg_ids):
if isinstance(msg['description'], list):
des = []
for _m in msg['description']:
if isinstance(_m, list):
for x in _m:
des.append(x)
else:
des.append(_m)
des = '; '.join(des)
else:
des = msg['description']
cache.set('validation.msg_key:' + key,
{'long_message': des,
'message': msg['message'],
'type': msg.get('compatibility_type',
msg.get('type'))
})
aa = ('validation.job_id:%s.msg_key:%s:addons_affected'
% (self.job_id, key))
try:
cache.incr(aa)
except __HOLE__:
cache.set(aa, 1) | ValueError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/zadmin/models.py/ValidationJobTally.save_messages |
def run_action(module_name, args=None, forward_args=None):
"""
Run an action using its module path and a list of arguments
If forward_args is given, it must be an argparse.Namespace object.
This namespace will be merged with args before being
passed to the do() method of module_name.
"""
if not args:
args = list()
ui.debug("running", module_name, " ".join(args))
action_name = module_name.split(".")[-1]
package_name = ".".join(module_name.split(".")[:-1])
try:
_tmp = __import__(package_name, globals(), locals(), [action_name])
except __HOLE__, err:
raise InvalidAction(module_name, str(err))
try:
module = getattr(_tmp, action_name)
except AttributeError, err:
raise InvalidAction(module_name, "Could not find module %s in package %s" %
(module_name, package_name))
check_module(module)
parser = argparse.ArgumentParser()
module.configure_parser(parser)
# Quick hack to prevent argparse.parse_args to
# - print usage to the console
# - call SystemExit
# Instead, raise a nice Exception
def custom_exit():
return
parser.exit = custom_exit
def error(message):
mess = "Invalid arguments when calling run_action(%s)\n" % module_name
mess += message + "\n"
mess += "args: %s\n" % " ".join(args)
mess += "forward_args: %s\n" % forward_args
raise qisys.error.Error(mess)
parser.error = error
if forward_args:
parsed_args = parser.parse_args(args=args, namespace=copy.deepcopy(forward_args))
else:
parsed_args = parser.parse_args(args=args)
return module.do(parsed_args) | ImportError | dataset/ETHPy150Open aldebaran/qibuild/python/qisys/script.py/run_action |
def main_wrapper(module, args):
"""This wraps the main method of an action so that:
* when an exception is raised that derived from ``qisys.error.Error``,
simply dispaly the error message and exit
* when sys.exit() or ui.fatal() is called, just exit
* also handle KeyboardInterrupt
* Other cases imply there's a bug in qiBuild, so either:
* Generate a full backtrace or using cgitb
* Start a debugging session if ``--pdb`` was used
"""
try:
module.do(args)
except qisys.error.Error as e:
# Normal exception raised from qibuild, display a message
# and exit
message = message_from_exception(e)
ui.error(message)
sys.exit(2)
except SystemExit as e:
# sys.exit() or ui.fatal() has been called, assume
# message has already been displayed and exit
sys.exit(e.code)
except __HOLE__:
ui.info("Interrupted by user, quitting")
sys.exit(2)
except Exception as e:
tb = sys.exc_info()[2]
# Oh, oh we have an crash:
if args.pdb:
print ""
print "### Exception:", e
print "### Starting a debugger"
try:
#pylint: disable-msg=F0401
import ipdb
ipdb.post_mortem(tb)
sys.exit(0)
except ImportError:
import pdb
pdb.post_mortem(tb)
sys.exit(0)
# Raise, this will be caught by cgitb that was enabled in qisys.main
raise | KeyboardInterrupt | dataset/ETHPy150Open aldebaran/qibuild/python/qisys/script.py/main_wrapper |
def action_modules_from_package(package_name):
"""Returns a suitable list of modules from
a package.
Example:
assuming you have:
actions/foo/__init__.py
actions/foo/spam.py
actions/foo/eggs.py
then
action_modules_from_package("actions.foo") returns:
[actions.foo.spam, actions.foo.eggs]
"""
res = list()
splitted = package_name.split(".")[1:]
last_part = ".".join(splitted)
package = __import__(package_name, globals(), locals(), [last_part])
base_path = os.path.dirname(package.__file__)
module_paths = os.listdir(base_path)
module_paths = [x[:-3] for x in module_paths if x.endswith(".py")]
module_paths.remove("__init__")
for module_path in module_paths:
try:
_tmp = __import__(package_name, globals(), locals(), [module_path], -1)
module = getattr(_tmp, module_path)
res.append(module)
except __HOLE__, err:
print "Skipping %s (%s)" % (module_path, err)
continue
res.sort(key=operator.attrgetter("__name__"))
return res | ImportError | dataset/ETHPy150Open aldebaran/qibuild/python/qisys/script.py/action_modules_from_package |
def test_parses_dates_with_better_error_message(self):
try:
parse_date('foo')
self.fail('Should have failed to parse')
except __HOLE__ as e:
self.assertIn('Unable to parse date value: foo', str(e)) | ValueError | dataset/ETHPy150Open aws/aws-cli/tests/unit/customizations/cloudtrail/test_validation.py/TestValidation.test_parses_dates_with_better_error_message |
def test_ensures_cloudtrail_arns_are_valid(self):
try:
assert_cloudtrail_arn_is_valid('foo:bar:baz')
self.fail('Should have failed')
except __HOLE__ as e:
self.assertIn('Invalid trail ARN provided: foo:bar:baz', str(e)) | ValueError | dataset/ETHPy150Open aws/aws-cli/tests/unit/customizations/cloudtrail/test_validation.py/TestValidation.test_ensures_cloudtrail_arns_are_valid |
def test_ensures_cloudtrail_arns_are_valid_when_missing_resource(self):
try:
assert_cloudtrail_arn_is_valid(
'arn:aws:cloudtrail:us-east-1:%s:foo' % TEST_ACCOUNT_ID)
self.fail('Should have failed')
except __HOLE__ as e:
self.assertIn('Invalid trail ARN provided', str(e)) | ValueError | dataset/ETHPy150Open aws/aws-cli/tests/unit/customizations/cloudtrail/test_validation.py/TestValidation.test_ensures_cloudtrail_arns_are_valid_when_missing_resource |
def subcheck__get_fake_values(self, cls):
res1 = get_fake_values(cls, annotate=False, seed=0)
res2 = get_fake_values(cls, annotate=True, seed=0)
if hasattr(cls, 'lower'):
cls = class_by_name[cls]
attrs = cls._necessary_attrs + cls._recommended_attrs
attrnames = [attr[0] for attr in attrs]
attrtypes = [attr[1] for attr in attrs]
attritems = zip(attrnames, attrtypes)
attrannnames = attrnames + list(self.annotations.keys())
self.assertEqual(sorted(attrnames), sorted(res1.keys()))
self.assertEqual(sorted(attrannnames), sorted(res2.keys()))
items11 = [(name, type(value)) for name, value in res1.items()]
self.assertEqual(sorted(attritems), sorted(items11))
for name, value in res1.items():
try:
self.assertEqual(res2[name], value)
except __HOLE__:
assert_arrays_equal(res2[name], value)
for name, value in self.annotations.items():
self.assertFalse(name in res1)
self.assertEqual(res2[name], value)
for attr in attrs:
name = attr[0]
if len(attr) < 3:
continue
dim = attr[2]
self.assertEqual(dim, res1[name].ndim)
self.assertEqual(dim, res2[name].ndim)
if len(attr) < 4:
continue
dtype = attr[3]
self.assertEqual(dtype.kind, res1[name].dtype.kind)
self.assertEqual(dtype.kind, res2[name].dtype.kind) | ValueError | dataset/ETHPy150Open NeuralEnsemble/python-neo/neo/test/coretest/test_generate_datasets.py/Test__get_fake_values.subcheck__get_fake_values |
def subcheck__generate_datasets(self, cls, cascade, seed=None):
self.annotations['seed'] = seed
if seed is None:
res = fake_neo(obj_type=cls, cascade=cascade)
else:
res = fake_neo(obj_type=cls, cascade=cascade, seed=seed)
if not hasattr(cls, 'lower'):
self.assertTrue(isinstance(res, cls))
else:
self.assertEqual(res.__class__.__name__, cls)
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
resattr = get_fake_values(cls, annotate=False, seed=0)
if seed is not None:
for name, value in resattr.items():
if name in ['channel_names',
'channel_indexes',
'channel_index']:
continue
try:
try:
resvalue = getattr(res, name)
except AttributeError:
if name == 'signal':
continue
raise
try:
self.assertEqual(resvalue, value)
except __HOLE__:
assert_arrays_equal(resvalue, value)
except BaseException as exc:
exc.args += ('from %s' % name,)
raise
if not getattr(res, '_child_objects', ()):
pass
elif not cascade:
self.assertEqual(res.children, ())
else:
self.assertNotEqual(res.children, ())
if cls in ['RecordingChannelGroup', RecordingChannelGroup]:
for i, rchan in enumerate(res.recordingchannels):
self.assertEqual(rchan.name, res.channel_names[i].astype(str))
self.assertEqual(rchan.index, res.channel_indexes[i])
for i, unit in enumerate(res.units):
for sigarr in res.analogsignalarrays:
self.assertEqual(unit.channel_indexes[0],
sigarr.channel_index[i]) | ValueError | dataset/ETHPy150Open NeuralEnsemble/python-neo/neo/test/coretest/test_generate_datasets.py/Test__generate_datasets.subcheck__generate_datasets |
def _get_linked_doctypes(doctype):
ret = {}
# find fields where this doctype is linked
ret.update(get_linked_fields(doctype))
ret.update(get_dynamic_linked_fields(doctype))
# find links of parents
links = frappe.db.sql("""select dt from `tabCustom Field`
where (fieldtype="Table" and options=%s)""", (doctype))
links += frappe.db.sql("""select parent from tabDocField
where (fieldtype="Table" and options=%s)""", (doctype))
for dt, in links:
if not dt in ret:
ret[dt] = {"get_parent": True}
for dt in ret.keys():
try:
doctype_module = load_doctype_module(dt)
except __HOLE__:
# in case of Custom DocType
continue
if getattr(doctype_module, "exclude_from_linked_with", False):
del ret[dt]
return ret | ImportError | dataset/ETHPy150Open frappe/frappe/frappe/desk/form/linked_with.py/_get_linked_doctypes |
def Encode(self):
try:
return self._CEncode()
except __HOLE__:
e = Encoder()
self.Output(e)
return e.buffer().tostring() | NotImplementedError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/net/proto/ProtocolBuffer.py/ProtocolMessage.Encode |
def SerializePartialToString(self):
try:
return self._CEncodePartial()
except (NotImplementedError, __HOLE__):
e = Encoder()
self.OutputPartial(e)
return e.buffer().tostring() | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/net/proto/ProtocolBuffer.py/ProtocolMessage.SerializePartialToString |
def MergePartialFromString(self, s):
try:
self._CMergeFromString(s)
except __HOLE__:
a = array.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.TryMerge(d) | NotImplementedError | dataset/ETHPy150Open AppScale/appscale/AppServer/google/net/proto/ProtocolBuffer.py/ProtocolMessage.MergePartialFromString |
def _fetch(self, value, arg):
try:
message = self.spam[self._mailbox][value - 1]
message_id = value
except __HOLE__:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == value:
message = item
message_id = x + 1
break
parts = "headers"
if arg in ("(ALL)", "(RFC822 FLAGS)"):
parts = "complete"
return ("OK", (("%s " % message_id, message[parts]), message["flags"])) | TypeError | dataset/ETHPy150Open uwdata/termite-data-server/web2py/gluon/contrib/mockimaplib.py/Connection._fetch |
def _get_messages(self, query):
if query.strip().isdigit():
return [self.spam[self._mailbox][int(query.strip()) - 1],]
elif query[1:-1].strip().isdigit():
return [self.spam[self._mailbox][int(query[1:-1].strip()) -1],]
elif query[1:-1].replace("UID", "").strip().isdigit():
for item in self.spam[self._mailbox]:
if item["uid"] == query[1:-1].replace("UID", "").strip():
return [item,]
messages = []
try:
for m in self.results[self._mailbox][query]:
try:
self.spam[self._mailbox][m - 1]["id"] = m
messages.append(self.spam[self._mailbox][m - 1])
except __HOLE__:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == m:
item["id"] = x + 1
messages.append(item)
break
except IndexError:
# message removed
pass
return messages
except KeyError:
raise ValueError("The client issued an unexpected query: %s" % query) | TypeError | dataset/ETHPy150Open uwdata/termite-data-server/web2py/gluon/contrib/mockimaplib.py/Connection._get_messages |
def append(self, mailbox, flags, struct_time, message):
"""
result, data = self.connection.append(mailbox, flags, struct_time, message)
if result == "OK":
uid = int(re.findall("\d+", str(data))[-1])
"""
last = self.spam[mailbox][-1]
try:
uid = int(last["uid"]) +1
except __HOLE__:
alluids = []
for _mailbox in self.spam.keys():
for item in self.spam[_mailbox]:
try:
alluids.append(int(item["uid"]))
except:
pass
if len(alluids) > 0:
uid = max(alluids) + 1
else:
uid = 1
flags = "FLAGS " + flags
item = {"uid": str(uid), "headers": message, "complete": message, "flags": flags}
self.spam[mailbox].append(item)
return ("OK", "spam spam %s spam" % uid) | ValueError | dataset/ETHPy150Open uwdata/termite-data-server/web2py/gluon/contrib/mockimaplib.py/Connection.append |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.