function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def _read_callback(self, data=None):
"""Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
"""
try:
if data is not None:
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if reply is not False:
try:
callback = self.__callback_queue.popleft()
# normal client (1 reply = 1 callback)
callback(reply)
except __HOLE__:
# pubsub clients
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
# something nasty occured (corrupt stream => no way to recover)
LOG.warning("corrupted stream => disconnect")
self.disconnect() | IndexError | dataset/ETHPy150Open thefab/tornadis/tornadis/client.py/Client._read_callback |
@staticmethod
def translate_to_python(c, top_class=None, text_query=True):
try:
i = c.index('(')
except __HOLE__:
if text_query:
return TextQuery(c)
raise ValueError("Invalid QueryCondition syntax")
clsname = c[:i]
cls = find_subclass(top_class or QueryCondition, clsname)
if cls is not None:
return cls(*eval(c[i+1:-1]))
elif text_query:
return TextQuery(c)
else:
raise ValueError("No such condition type: %s" % clsname) | ValueError | dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/persistent_archive/queries.py/QueryCondition.translate_to_python |
def get(self, request, *args, **kwargs):
try:
import memcache
except __HOLE__:
return HttpResponseNotFound()
if not request.user.is_authenticated() and request.user.is_staff:
return HttpResponseForbidden()
if settings.CACHES['default']['BACKEND'] != 'django.core.cache.backends.memcached.MemcachedCache':
return HttpResponseNotFound()
context = {'stats': self.make_stats()}
return self.render_to_response(context) | ImportError | dataset/ETHPy150Open niwinz/niwi-web/src/niwi/contrib/memcache_status/views.py/MemcacheStatusView.get |
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
'''Find reference cycles
:param list objects:
A list of objects to find cycles in. It is often useful to pass in
gc.garbage to find the cycles that are preventing some objects from
being garbage collected.
:param file outstream:
The stream for output.
:param bool show_progress:
If True, print the number of objects reached as they are found.
'''
def print_path(path):
for i, step in enumerate(path):
# next “wraps around”
next = path[(i + 1) % len(path)]
outstream.write(' %s -- ' % str(type(step)))
written = False
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write('[%s]' % repr(key))
written = True
break
if key is next:
outstream.write('[key] = %s' % repr(val))
written = True
break
elif isinstance(step, (list, tuple)):
for i, item in enumerate(step):
if item is next:
outstream.write('[%d]' % i)
written = True
elif getattr(type(step), '__getattribute__', None) in (object.__getattribute__, type.__getattribute__):
for attr in chain(dir(step), getattr(step, '__dict__', ())):
if getattr(step, attr, None) is next:
try:
outstream.write('%r.%s' % (step, attr))
except TypeError:
outstream.write('.%s' % (step, attr))
written = True
break
if not written:
outstream.write(repr(step))
outstream.write(' ->\n')
outstream.write('\n')
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write('%d\r' % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we’ve found our way back to the start, this is
# a cycle, so print it out
if referent is start:
try:
outstream.write('Cyclic reference: %r\n' % referent)
except TypeError:
try:
outstream.write('Cyclic reference: %i (%r)\n' % (id(referent), type(referent)))
except TypeError:
outstream.write('Cyclic reference: %i\n' % id(referent))
print_path(current_path)
# Don’t go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven’t seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + (obj,))
for obj in objects:
# We are not interested in non-powerline cyclic references
try:
if not type(obj).__module__.startswith('powerline'):
continue
except __HOLE__:
continue
recurse(obj, obj, {}, ()) | AttributeError | dataset/ETHPy150Open powerline/powerline/powerline/lib/debug.py/print_cycles |
@staticmethod
def is_datetime(value):
"""Verifies that value is a valid Datetime type, or can be converted to it.
Returns:
bool
"""
try:
dt = Datetime(value)
dt # shut up pyflakes
return True
except __HOLE__:
return False | ValueError | dataset/ETHPy150Open bokeh/bokeh/bokeh/charts/data_source.py/ChartDataSource.is_datetime |
def main(main_name):
"""The main entry point to the bootstrapper. Call this with the module name to
use as your main app."""
# prelaunch should bypass full bootstrap
if prelaunch_client.is_prelaunch_client(sys.argv):
# This is a lightweight import due to lazy initialization of the message loop.
import message_loop
if message_loop.supports_prelaunch():
return sys.exit(prelaunch_client.main(sys.argv))
# Remove the prelaunch command from the argv and proceed as normal.
prelaunch_client.remove_prelaunch_from_sys_argv()
if sys.platform == 'darwin':
if ('--chrome' in sys.argv):
sys.argv.insert(1, '--main-name')
sys.argv.insert(2, main_name)
sys.exit(run())
if ('--curses' in sys.argv):
sys.argv.insert(1, '--main-name')
sys.argv.insert(2, main_name)
sys.exit(run())
# Try using chrome.
import message_loop_chrome
if message_loop_chrome.supported():
sys.argv.insert(1, '--main-name')
sys.argv.insert(2, main_name)
sys.exit(run())
# To use wx-widgets on darwin, we need to be in 32 bit mode. Import of wx
# will fail if you run python in 64 bit mode, which is default in 10.6+. :'(
# It is depressingly hard to force python into 32 bit mode reliably across
# computers, for some reason. So, we try two approaches known to work... one
# after the other.
wx_found_but_failed = False
try:
import wx
except __HOLE__:
if str(sys.exc_value).find("no appropriate 64-bit") != -1:
wx_found_but_failed = True
if wx_found_but_failed:
# Switch the executable to /usr/bin/python2.6 if we are implicitly running
# 2.6 via /usr/bin/python. For some reason, neither the arch trick nor the
# env trick work if you use /usr/bin/python
if sys.version.startswith("2.6") and sys.executable == '/usr/bin/python':
if os.path.exists('/usr/bin/python2.6'):
executable = '/usr/bin/python2.6'
else:
executable = sys.executable
else:
executable = sys.executable
# try using the versioner trick
if '--triedenv' not in sys.argv:
os.putenv('VERSIONER_PYTHON_PREFER_32_BIT', 'yes')
args = [executable, sys.argv[0], '--triedenv']
args.extend(sys.argv[1:])
os.execve(args[0], args, os.environ)
# last chance...
if '--triedarch' not in sys.argv:
args = ["/usr/bin/arch", "-i386", executable, sys.argv[0], '--triedarch']
args.extend(sys.argv[1:])
os.execv(args[0], args)
# did we already try one of the tricks below? Bail out to prevent recursion...
print "Your system's python is 64 bit, and all the tricks we know to get it into 32b mode failed."
sys.exit(255)
else:
try:
sys.argv.remove('--triedenv')
except:
pass
try:
sys.argv.remove('--triedarch')
except:
pass
sys.argv.insert(1, '--main-name')
sys.argv.insert(2, main_name)
sys.exit(run())
else:
sys.argv.insert(1, '--main-name')
sys.argv.insert(2, main_name)
sys.exit(run()) | ImportError | dataset/ETHPy150Open natduca/quickopen/src/bootstrap.py/main |
def next(self):
try:
x = self.rp[self.ix]
except __HOLE__:
raise StopIteration
self.ix += 1
return x | IndexError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/RefPat.py/RefPatIter.next |
def __getattr__(self, s):
if not self.is_initialized:
raise AttributeError, s
try:
return getattr(self.__class__, s)
except __HOLE__:
pass
try:
row = self.get_row_named(s)
except ValueError:
raise AttributeError, s
return row.set | AttributeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/RefPat.py/ReferencePattern.__getattr__ |
def generate(self, ix=None):
while ix is None or ix < 0 or ix >= len(self.lines):
try:
self.lines.append(self.lg.next())
except __HOLE__:
self.isfullygenerated = 1
return
self.lines[-1].index = len(self.lines) - 1 | StopIteration | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/RefPat.py/ReferencePattern.generate |
def get_row(self, key):
try:
[][key]
except TypeError:
return self.get_row_named(key)
except __HOLE__:
return self.get_row_indexed(key) | IndexError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/RefPat.py/ReferencePattern.get_row |
def get_row_named(self, name):
row = self.get_row_indexed(0)
for ix in str_as_ixl(name):
try:
row = row.getchild(ix)
except __HOLE__:
raise ValueError, 'Reference pattern has no row named %r'%name
return row | IndexError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/RefPat.py/ReferencePattern.get_row_named |
def iterlines(self, start=None):
if start is None:
start = 0
while 1:
try:
yield self.get_row_indexed(start)
except __HOLE__:
return
start += 1 | IndexError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/RefPat.py/ReferencePattern.iterlines |
def set(self, x, y, z):
"""Sets the components of this vector.
x -- x component
y -- y component
z -- z component
"""
v = self._v
try:
v[0] = x * 1.0
v[1] = y * 1.0
v[2] = z * 1.0
except __HOLE__:
raise TypeError("Must be a number")
return self | TypeError | dataset/ETHPy150Open PythonProgramming/Beginning-Game-Development-with-Python-and-Pygame/Chapter 10/gameobjects/vector3.py/Vector3.set |
def __getitem__(self, index):
"""Retrieves a component, given its index.
index -- 0, 1 or 2 for x, y or z
"""
try:
return self._v[index]
except __HOLE__:
raise IndexError("There are 3 values in this object, index should be 0, 1 or 2!") | IndexError | dataset/ETHPy150Open PythonProgramming/Beginning-Game-Development-with-Python-and-Pygame/Chapter 10/gameobjects/vector3.py/Vector3.__getitem__ |
def __setitem__(self, index, value):
"""Sets a component, given its index.
index -- 0, 1 or 2 for x, y or z
value -- New (float) value of component
"""
try:
self._v[index] = 1.0 * value
except IndexError:
raise IndexError("There are 3 values in this object, index should be 0, 1 or 2!")
except __HOLE__:
raise TypeError("Must be a number") | TypeError | dataset/ETHPy150Open PythonProgramming/Beginning-Game-Development-with-Python-and-Pygame/Chapter 10/gameobjects/vector3.py/Vector3.__setitem__ |
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, __HOLE__):
# IronPython throws ValueErrors instead of OverflowErrors
return None | ValueError | dataset/ETHPy150Open SickRage/SickRage/lib/feedparser/datetimes/w3dtf.py/_parse_date_w3dtf |
def validate(self):
log.debug("Trying to validate stream descriptor for %s", str(self.raw_info['stream_name']))
try:
hex_stream_name = self.raw_info['stream_name']
key = self.raw_info['key']
hex_suggested_file_name = self.raw_info['suggested_file_name']
stream_hash = self.raw_info['stream_hash']
blobs = self.raw_info['blobs']
except __HOLE__ as e:
raise InvalidStreamDescriptorError("Missing '%s'" % (e.args[0]))
for c in hex_suggested_file_name:
if c not in '0123456789abcdef':
raise InvalidStreamDescriptorError("Suggested file name is not a hex-encoded string")
h = get_lbry_hash_obj()
h.update(hex_stream_name)
h.update(key)
h.update(hex_suggested_file_name)
def get_blob_hashsum(b):
length = b['length']
if length != 0:
blob_hash = b['blob_hash']
else:
blob_hash = None
blob_num = b['blob_num']
iv = b['iv']
blob_hashsum = get_lbry_hash_obj()
if length != 0:
blob_hashsum.update(blob_hash)
blob_hashsum.update(str(blob_num))
blob_hashsum.update(iv)
blob_hashsum.update(str(length))
return blob_hashsum.digest()
blobs_hashsum = get_lbry_hash_obj()
for blob in blobs:
blobs_hashsum.update(get_blob_hashsum(blob))
if blobs[-1]['length'] != 0:
raise InvalidStreamDescriptorError("Does not end with a zero-length blob.")
h.update(blobs_hashsum.digest())
if h.hexdigest() != stream_hash:
raise InvalidStreamDescriptorError("Stream hash does not match stream metadata")
log.debug("It is validated")
return defer.succeed(True) | KeyError | dataset/ETHPy150Open lbryio/lbry/lbrynet/lbryfile/StreamDescriptor.py/LBRYFileStreamDescriptorValidator.validate |
def __init__(self, gen):
self.content = None
self.gen = gen
github_url = GITHUB_API.format(self.gen.settings['GITHUB_USER'])
try:
f = urlopen(github_url)
# 3 vs 2 makes us have to do nasty stuff to get encoding without
# being 3 or 2 specific. So... Yeah.
encoding = f.headers['content-type'].split('charset=')[-1]
c = f.read().decode(encoding)
except __HOLE__:
logger.warning("unable to open {0}".format(github_url))
return
self.content = json.loads(c) | HTTPError | dataset/ETHPy150Open kura/pelican-githubprojects/pelican_githubprojects/github.py/GithubProjects.__init__ |
def status(self):
"""Print the status of the build system"""
is_locked = False
try:
self.lock_fd = open(self.LOCK_FILE, 'w+')
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
fcntl.flock(self.lock_fd, fcntl.LOCK_UN)
except __HOLE__, exc_value:
if exc_value[0] == 11:
is_locked = True
state = {
'IS_LOCKED': True if is_locked else False,
'LOCK_FILE': self.LOCK_FILE,
'STOP_UPDATE_FILE_EXISTS': (
True if os.path.exists(self.STOP_UPDATE_FILE) else False
),
'STOP_UPDATE_FILE': self.STOP_UPDATE_FILE,
'STAGE_DIR_EXISTS': (
True if os.path.exists(self.STAGE_DIR) else False
),
'STAGE_DIR': self.STAGE_DIR,
'PROD_DIR_EXISTS': (
True if os.path.exists(self.PROD_DIR) else False
),
'PROD_DIR': self.PROD_DIR,
'LAST_RUN_FILE': self.LAST_RUN_FILE,
}
for k, v in state.iteritems():
print '{0}={1}'.format(k, v) | IOError | dataset/ETHPy150Open mozilla/inventory/mozdns/mozbind/builder.py/DNSBuilder.status |
def build_staging(self, force=False):
"""
Create the stage folder. Fail if it already exists unless
force=True.
"""
if os.path.exists(self.STAGE_DIR) and not force:
raise BuildError("The DNS build scripts tried to build the staging"
" area but the area already exists.")
try:
os.makedirs(self.STAGE_DIR)
except __HOLE__:
if not force:
raise | OSError | dataset/ETHPy150Open mozilla/inventory/mozdns/mozbind/builder.py/DNSBuilder.build_staging |
def clear_staging(self, force=False):
"""
rm -rf the staging area. Fail if the staging area doesn't exist.
"""
self.log("Attempting rm -rf staging "
"area. ({0})...".format(self.STAGE_DIR))
if os.path.exists(self.STAGE_DIR) or force:
try:
shutil.rmtree(self.STAGE_DIR)
except __HOLE__, e:
if e.errno == 2:
self.log("Staging was not present.",
log_level='LOG_WARNING')
else:
raise
self.log("Staging area cleared")
else:
if not force:
raise BuildError("The DNS build scripts tried to remove the "
"staging area but the staging area didn't "
"exist.") | OSError | dataset/ETHPy150Open mozilla/inventory/mozdns/mozbind/builder.py/DNSBuilder.clear_staging |
def lock(self):
"""
Trys to write a lock file. Returns True if we get the lock, else return
False.
"""
try:
if not os.path.exists(os.path.dirname(self.LOCK_FILE)):
os.makedirs(os.path.dirname(self.LOCK_FILE))
self.log("Attempting acquire mutext "
"({0})...".format(self.LOCK_FILE))
self.lock_fd = open(self.LOCK_FILE, 'w+')
fcntl.flock(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.log(self.format_title("Mutex Acquired"))
return True
except __HOLE__, exc_value:
self.lock_fd = None
# IOError: [Errno 11] Resource temporarily unavailable
if exc_value[0] == 11:
self.log(
"DNS build script attempted to acquire the "
"build mutux but another process already has it."
)
fail_mail(
"An attempt was made to start the DNS build script "
"while an instance of the script was already running. "
"The attempt was denied.",
subject="Concurrent DNS builds attempted.")
return False
else:
raise | IOError | dataset/ETHPy150Open mozilla/inventory/mozdns/mozbind/builder.py/DNSBuilder.lock |
def stage_to_prod(self, src):
"""
Copy file over to PROD_DIR. Return the new location of the
file.
"""
if not src.startswith(self.STAGE_DIR):
raise BuildError(
"Improper file '{0}' passed to stage_to_prod".format(src)
)
dst = src.replace(self.STAGE_DIR, self.PROD_DIR)
dst_dir = os.path.dirname(dst)
if self.STAGE_ONLY:
self.log("Did not copy {0} to {1}".format(src, dst))
return dst
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
# copy2 will copy file metadata
try:
shutil.copy2(src, dst)
self.log("Copied {0} to {1}".format(src, dst))
except (__HOLE__, os.error) as why:
raise BuildError(
"cp -p {0} {1} caused {2}".format(src, dst, str(why))
)
except shutil.Error:
raise
return dst | IOError | dataset/ETHPy150Open mozilla/inventory/mozdns/mozbind/builder.py/DNSBuilder.stage_to_prod |
def route(self, msg):
k = self._digest(msg)
if isinstance(k, (tuple, list)):
key, args = k[0], k[1:]
else:
key, args = k, ()
try:
fn = self._table[key]
except __HOLE__ as e:
# Check for default handler, key=None
if None in self._table:
fn = self._table[None]
else:
raise RuntimeError('No handler for key: %s, and default handler not defined' % str(e.args))
fn(msg, *args) | KeyError | dataset/ETHPy150Open nickoala/telepot/telepot/helper.py/Router.route |
def notify(self):
try:
self.spinner = (self.spinner + 1) % 2
os.fchmod(self._tmp.fileno(), self.spinner)
except __HOLE__:
# python < 2.6
self._tmp.truncate(0)
os.write(self._tmp.fileno(), b"X") | AttributeError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/gunicorn/workers/workertmp.py/WorkerTmp.notify |
def write_to_files(self, base="."):
base = os.path.join(base, "records")
try:
os.mkdir(base)
except __HOLE__: pass
f = open(os.path.join(base, "%s"%self.id), "w")
self.model = bound_graph()
for m in self.meds:
self.add_all(self.rdf_med(m))
for p in self.problems:
self.add_all(self.rdf_problem(p))
for l in self.labs:
try:
self.add_all(self.rdf_lab(l))
except: pass
for v in self.vitals:
self.add_all(self.rdf_vital(v))
self.add_all(self.rdf_demographics())
f.write(serialize_rdf(self.model))
f.close() | OSError | dataset/ETHPy150Open smart-classic/smart_server/smart/lib/i2b2_export.py/i2b2Patient.write_to_files |
def run(self):
try:
if self.mode != "dir":
args = {}
if self.path:
args["InitialDir"] = os.path.dirname(self.path)
path = os.path.splitext(os.path.dirname(self.path))
args["File"] = path[0]
args["DefExt"] = path[1]
args["Title"] = self.title if self.title else "Pick a file..."
args["CustomFilter"] = 'Other file types\x00*.*\x00'
args["FilterIndex"] = 1
filters = ""
for f in self.filters:
if type(f) == str:
filters += (f + "\x00") * 2
else:
filters += f[0] + "\x00" + ";".join(f[1:]) + "\x00"
args["Filter"] = filters
flags = (win32con.OFN_EXTENSIONDIFFERENT |
win32con.OFN_OVERWRITEPROMPT)
if self.multiple:
flags |= win32con.OFN_ALLOWmultiple | win32con.OFN_EXPLORER
if self.show_hidden:
flags |= win32con.OFN_FORCESHOWHIDDEN
args["Flags"] = flags
if self.mode == "open":
self.fname, _, _ = win32gui.GetOpenFileNameW(**args)
elif self.mode == "save":
self.fname, _, _ = win32gui.GetSaveFileNameW(**args)
if self.fname:
if self.multiple:
seq = str(self.fname).split("\x00")
dir_n, base_n = seq[0], seq[1:]
self.selection = [os.path.join(dir_n, i)
for i in base_n]
else:
self.selection = str(self.fname).split("\x00")
else:
# From http://goo.gl/UDqCqo
pidl, display_name, image_list = shell.SHBrowseForFolder(
win32gui.GetDesktopWindow(),
None,
self.title if self.title else "Pick a folder...",
0, None, None
)
self.selection = [str(shell.SHGetPathFromIDList(pidl))]
return self.selection
except (__HOLE__, pywintypes.error):
return None | RuntimeError | dataset/ETHPy150Open kivy/plyer/plyer/platforms/win/filechooser.py/Win32FileChooser.run |
def obj_get(self, bundle, **kwargs):
domain = kwargs['domain']
pk = kwargs['pk']
try:
user = self.Meta.object_class.get_by_user_id(pk, domain)
except __HOLE__:
user = None
return user | KeyError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/api/resources/v0_1.py/UserResource.obj_get |
def _safe_bool(bundle, param, default=False):
try:
return string_to_boolean(bundle.request.GET.get(param))
except __HOLE__:
return default | ValueError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/api/resources/v0_1.py/_safe_bool |
def _get_backend_from_engine(engine):
try:
engine_config = dict(utils.load_yaml(engine_cfg))[engine]
this_engine_cfg_file = engine_config['cfg']
this_engine_cfg = dict(utils.load_yaml(this_engine_cfg_file))
return Engine.get_backend(this_engine_cfg[engine]['backend'],
this_engine_cfg[engine])
except __HOLE__:
LOG.exception("Could not find engine's cfg script") | KeyError | dataset/ETHPy150Open openstack/entropy/entropy/__main__.py/_get_backend_from_engine |
def _add_to_list(engine, script_type, script_name, **script_args):
backend = _get_backend_from_engine(engine)
if backend.check_script_exists(script_type, script_name):
LOG.error('%s already exists, not registering', script_type)
return False
try:
data = {
script_name: script_args
}
backend.add_script(script_type, data)
return True
except __HOLE__:
LOG.exception("No %s script called %s", script_type, script_name)
except Exception:
LOG.exception("Could not register %s script %s", script_type,
script_name)
return False | KeyError | dataset/ETHPy150Open openstack/entropy/entropy/__main__.py/_add_to_list |
def prepare(self, data):
"""Complete string preparation procedure for 'stored' strings.
(includes checks for unassigned codes)
:Parameters:
- `data`: Unicode string to prepare.
:return: prepared string
:raise StringprepError: if the preparation fails
"""
ret = self.cache.get(data)
if ret is not None:
return ret
result = self.map(data)
if self.normalization:
result = self.normalization(result)
result = self.prohibit(result)
result = self.check_unassigned(result)
if self.bidi:
result = self.check_bidi(result)
if isinstance(result, list):
result = "".join()
if len(self.cache_items) >= _stringprep_cache_size:
remove = self.cache_items[: -_stringprep_cache_size // 2]
for profile, key in remove:
try:
del profile.cache[key]
except __HOLE__:
pass
self.cache_items[:] = self.cache_items[
-_stringprep_cache_size // 2 :]
self.cache_items.append((self, data))
self.cache[data] = result
return result | KeyError | dataset/ETHPy150Open kuri65536/python-for-android/python3-alpha/python-libs/pyxmpp2/xmppstringprep.py/Profile.prepare |
def set_stringprep_cache_size(size):
"""Modify stringprep cache size.
:Parameters:
- `size`: new cache size
"""
# pylint: disable-msg=W0603
global _stringprep_cache_size
_stringprep_cache_size = size
if len(Profile.cache_items) > size:
remove = Profile.cache_items[:-size]
for profile, key in remove:
try:
del profile.cache[key]
except __HOLE__:
pass
Profile.cache_items = Profile.cache_items[-size:]
# vi: sts=4 et sw=4 | KeyError | dataset/ETHPy150Open kuri65536/python-for-android/python3-alpha/python-libs/pyxmpp2/xmppstringprep.py/set_stringprep_cache_size |
def main():
opts = parse_args()
lines = opts.lines
seconds = opts.seconds
force_start = opts.force_start
client = command.Client(opts.socket)
try:
if not opts.raw:
curses.wrapper(get_stats, client, limit=lines, seconds=seconds,
force_start=force_start)
else:
raw_stats(client, limit=lines, force_start=force_start)
except TraceNotStarted:
print("tracemalloc not started on qtile, start by setting "
"PYTHONTRACEMALLOC=1 before starting qtile")
print("or force start tracemalloc now, but you'll lose early traces")
exit(1)
except TraceCantStart:
print("Can't start tracemalloc on qtile, check the logs")
except __HOLE__:
exit(-1) | KeyboardInterrupt | dataset/ETHPy150Open qtile/qtile/libqtile/scripts/qtile_top.py/main |
def __init__(self, dir, host, rrd, period="day"):
api = twirrdy.RRDBasicAPI()
self.rrd = rrd
self.host = host
path = "%s/%s/%s" % (dir, host, rrd)
self.rrd_path = "%s.rrd" % path
self.info = api.info(self.rrd_path)
self.color = Colorator()
self.period = period
try:
coil_fd = open("%s.coil" % path)
try:
coil_stat = os.fstat(coil_fd.fileno())
self.private = not (coil_stat.st_mode & stat.S_IROTH)
self.conf = coil.parse(coil_fd.read())
finally:
coil_fd.close()
except (IOError, __HOLE__), ex:
raise errors.InitError("Unable to read coil file: %s" % ex)
if period not in ('day', 'week', 'month', 'year'):
raise ValueError("Invalid period parameter")
self.args = []
self.ds = []
self._init_args()
self._init_ds()
self._init_ds_args() | OSError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/graph.py/Graph.__init__ |
@require_POST
@require_can_edit_data
def excel_commit(request, domain):
"""
Step three of three.
This page is submitted with the list of column to
case property mappings for this upload.
The config variable is an ImporterConfig object that
has everything gathered from previous steps, with the
addition of all the field data. See that class for
more information.
"""
config = importer_util.ImporterConfig.from_request(request)
excel_id = request.session.get(EXCEL_SESSION_ID)
excel_ref = DownloadBase.get(excel_id)
try:
importer_util.get_spreadsheet(excel_ref, config.named_columns)
except ImporterError as e:
return render_error(request, domain, _get_importer_error_message(e))
download = DownloadBase()
download.set_task(bulk_import_async.delay(
download.download_id,
config,
domain,
excel_id,
))
try:
del request.session[EXCEL_SESSION_ID]
except __HOLE__:
pass
return render(
request,
"importer/excel_commit.html", {
'download_id': download.download_id,
'template': 'importer/partials/import_status.html',
'domain': domain,
'report': {
'name': 'Import: Completed'
},
'slug': base.ImportCases.slug
}
) | KeyError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/importer/views.py/excel_commit |
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except __HOLE__, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky. | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_sys.py/SysModuleTest.test_original_excepthook |
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except __HOLE__, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc) | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_sys.py/SysModuleTest.test_exc_clear |
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except __HOLE__, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception") | SystemExit | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_sys.py/SysModuleTest.test_exit |
def get_pool(arg = None, opts = None, abort = False):
""" Returns pool to work with
Returns a pynipap.Pool object representing the pool we are working with.
"""
# yep, global variables are evil
global pool
try:
pool = Pool.list({ 'name': arg })[0]
except __HOLE__:
if abort:
print >> sys.stderr, "Pool '%s' not found." % str(arg)
sys.exit(1)
else:
pool = None
return pool | IndexError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/get_pool |
def get_vrf(arg = None, default_var = 'default_vrf_rt', abort = False):
""" Returns VRF to work in
Returns a pynipap.VRF object representing the VRF we are working
in. If there is a VRF set globally, return this. If not, fetch the
VRF named 'arg'. If 'arg' is None, fetch the default_vrf
attribute from the config file and return this VRF.
"""
# yep, global variables are evil
global vrf
# if there is a VRF set, return it
if vrf is not None:
return vrf
if arg is None:
# fetch default vrf
try:
vrf_rt = cfg.get('global', default_var)
except ConfigParser.NoOptionError:
# default to all VRFs
vrf_rt = 'all'
else:
vrf_rt = arg
if vrf_rt.lower() == 'all':
vrf = VRF()
vrf.rt = 'all'
else:
if vrf_rt.lower() in ('-', 'none'):
vrf_rt = None
try:
vrf = VRF.search({ 'val1': 'rt',
'operator': 'equals',
'val2': vrf_rt
})['result'][0]
except (__HOLE__, IndexError):
if abort:
print >> sys.stderr, "VRF with [RT: %s] not found." % str(vrf_rt)
sys.exit(1)
else:
vrf = False
return vrf | KeyError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/get_vrf |
def add_prefix(arg, opts, shell_opts):
""" Add prefix to NIPAP
"""
# sanity checks
if 'from-pool' not in opts and 'from-prefix' not in opts and 'prefix' not in opts:
print >> sys.stderr, "ERROR: 'prefix', 'from-pool' or 'from-prefix' must be specified."
sys.exit(1)
if len([opt for opt in opts if opt in ['from-pool', 'from-prefix', 'prefix']]) > 1:
print >> sys.stderr, "ERROR: Use either assignment 'from-pool', 'from-prefix' or manual mode (using 'prefix')"
sys.exit(1)
if 'from-pool' in opts:
return add_prefix_from_pool(arg, opts)
args = {}
p = _prefix_from_opts(opts)
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
if 'from-prefix' in opts:
args['from-prefix'] = [ opts['from-prefix'], ]
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
if 'family' in opts:
if opts['family'] == 'ipv4':
family = 4
elif opts['family'] == 'ipv6':
family = 6
elif opts['family'] == 'dual-stack':
print >> sys.stderr, "ERROR: dual-stack mode only valid for from-pool assignments"
sys.exit(1)
args['family'] = family
# try to automatically figure out type for new prefix when not
# allocating from a pool
# get a list of prefixes that contain this prefix
vrf_id = 0
if p.vrf:
vrf_id = p.vrf.id
if 'from-prefix' in args:
parent_prefix = args['from-prefix'][0]
parent_op = 'equals'
else:
# If no prefix length is specified it is assumed to be a host and we do
# a search for prefixes that contains the specified prefix. The last
# entry will be the parent of the new prefix and we can look at it to
# determine type.
# If prefix length is specified (i.e. CIDR format) we check if prefix
# length equals max length in which case we assume a host prefix,
# otherwise we search for the network using an equal match and by
# zeroing out bits in the host part.
if len(opts.get('prefix').split("/")) == 2:
ip = IPy.IP(opts.get('prefix').split("/")[0])
plen = int(opts.get('prefix').split("/")[1])
if ip.version() == 4 and plen == 32 or ip.version() == 6 and plen == 128:
parent_prefix = str(ip)
parent_op = 'contains'
else:
parent_prefix = str(IPy.IP(opts.get('prefix'), make_net=True))
parent_op = 'equals'
else:
parent_prefix = opts.get('prefix')
parent_op = 'contains'
auto_type_query = {
'val1': {
'val1' : 'prefix',
'operator' : parent_op,
'val2' : parent_prefix
},
'operator': 'and',
'val2': {
'val1' : 'vrf_id',
'operator' : 'equals',
'val2' : vrf_id
}
}
res = Prefix.search(auto_type_query, { })
# no results, ie the requested prefix is a top level prefix
if len(res['result']) == 0:
if p.type is None:
print >> sys.stderr, "ERROR: Type of prefix must be specified ('assignment' or 'reservation')."
sys.exit(1)
else:
# last prefix in list will be the parent of the new prefix
parent = res['result'][-1]
# if the parent is an assignment, we can assume the new prefix to be
# a host and act accordingly
if parent.type == 'assignment':
# automatically set type
if p.type is None:
print >> sys.stderr, "WARNING: Parent prefix is of type 'assignment'. Automatically setting type 'host' for new prefix."
elif p.type == 'host':
pass
else:
print >> sys.stderr, "WARNING: Parent prefix is of type 'assignment'. Automatically overriding specified type '%s' with type 'host' for new prefix." % p.type
p.type = 'host'
# if it's a manually specified prefix
if 'prefix' in opts:
# fiddle prefix length to all bits set
if parent.family == 4:
p.prefix = p.prefix.split('/')[0] + '/32'
else:
p.prefix = p.prefix.split('/')[0] + '/128'
# for from-prefix, we set prefix_length to host length
elif 'from-prefix' in opts:
if parent.family == 4:
args['prefix_length'] = 32
else:
args['prefix_length'] = 128
try:
p.save(args)
except NipapError as exc:
print >> sys.stderr, "Could not add prefix to NIPAP: %s" % str(exc)
sys.exit(1)
if p.type == 'host':
print "Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description)
else:
print "Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description)
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print >> sys.stderr, "ERROR: Not possible to add hosts to non-assignment"
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {}) | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/add_prefix |
def add_prefix_from_pool(arg, opts):
""" Add prefix using from-pool to NIPAP
"""
args = {}
# sanity checking
if 'from-pool' in opts:
res = Pool.list({ 'name': opts['from-pool'] })
if len(res) == 0:
print >> sys.stderr, "No pool named '%s' found." % opts['from-pool']
sys.exit(1)
args['from-pool'] = res[0]
if 'family' not in opts:
print >> sys.stderr, "ERROR: You have to specify the address family."
sys.exit(1)
if opts['family'] == 'ipv4':
afis = [4]
elif opts['family'] == 'ipv6':
afis = [6]
elif opts['family'] == 'dual-stack':
afis = [4, 6]
if 'prefix_length' in opts:
print >> sys.stderr, "ERROR: 'prefix_length' can not be specified for 'dual-stack' assignment"
sys.exit(1)
else:
print >> sys.stderr, "ERROR: 'family' must be one of: %s" % " ".join(valid_families)
sys.exit(1)
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
for afi in afis:
p = _prefix_from_opts(opts)
if opts.get('vrf_rt') is None:
# if no VRF is specified use the pools implied VRF
p.vrf = args['from-pool'].vrf
else:
# use the specified VRF
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
# set type to default type of pool unless already set
if p.type is None:
if args['from-pool'].default_type is None:
print >> sys.stderr, "ERROR: Type not specified and no default-type specified for pool: %s" % opts['from-pool']
p.type = args['from-pool'].default_type
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
args['family'] = afi
try:
p.save(args)
except NipapError as exc:
print >> sys.stderr, "Could not add prefix to NIPAP: %s" % str(exc)
sys.exit(1)
if p.type == 'host':
print "Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description)
else:
print "Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description)
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print >> sys.stderr, "ERROR: Not possible to add hosts to non-assignment"
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {}) | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/add_prefix_from_pool |
def add_vrf(arg, opts, shell_opts):
""" Add VRF to NIPAP
"""
v = VRF()
v.rt = opts.get('rt')
v.name = opts.get('name')
v.description = opts.get('description')
v.tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
v.avps[key] = value
try:
v.save()
except pynipap.NipapError as exc:
print >> sys.stderr, "Could not add VRF to NIPAP: %s" % str(exc)
sys.exit(1)
print "Added %s" % (vrf_format(v)) | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/add_vrf |
def add_pool(arg, opts, shell_opts):
""" Add a pool.
"""
p = Pool()
p.name = opts.get('name')
p.description = opts.get('description')
p.default_type = opts.get('default-type')
p.ipv4_default_prefix_length = opts.get('ipv4_default_prefix_length')
p.ipv6_default_prefix_length = opts.get('ipv6_default_prefix_length')
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
try:
p.save()
except pynipap.NipapError as exc:
print >> sys.stderr, "Could not add pool to NIPAP: %s" % str(exc)
sys.exit(1)
print "Pool '%s' created." % (p.name) | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/add_pool |
def view_vrf(arg, opts, shell_opts):
""" View a single VRF
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, __HOLE__):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
print "-- VRF"
print " %-26s : %d" % ("ID", v.id)
print " %-26s : %s" % ("RT", v.rt)
print " %-26s : %s" % ("Name", v.name)
print " %-26s : %s" % ("Description", v.description)
print "-- Extra Attributes"
if v.avps is not None:
for key in sorted(v.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, v.avps[key])
print "-- Tags"
for tag_name in sorted(v.tags, key=lambda s: s.lower()):
print " %s" % tag_name
# statistics
if v.total_addresses_v4 == 0:
used_percent_v4 = 0
else:
used_percent_v4 = (float(v.used_addresses_v4)/v.total_addresses_v4)*100
if v.total_addresses_v6 == 0:
used_percent_v6 = 0
else:
used_percent_v6 = (float(v.used_addresses_v6)/v.total_addresses_v6)*100
print "-- Statistics"
print " %-26s : %s" % ("IPv4 prefixes", v.num_prefixes_v4)
print " %-26s : %.0f / %.0f (%.2f%% of %.0f)" % ("IPv4 addresses Used / Free",
v.used_addresses_v4, v.free_addresses_v4, used_percent_v4,
v.total_addresses_v4)
print " %-26s : %s" % ("IPv6 prefixes", v.num_prefixes_v6)
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("IPv6 addresses Used / Free",
v.used_addresses_v6, v.free_addresses_v6, used_percent_v6,
v.total_addresses_v6) | IndexError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/view_vrf |
def view_prefix(arg, opts, shell_opts):
""" View a single prefix.
"""
# Internally, this function searches in the prefix column which means that
# hosts have a prefix length of 32/128 while they are normally displayed
# with the prefix length of the network they are in. To allow the user to
# input either, e.g. 1.0.0.1 or 1.0.0.1/31 we strip the prefix length bits
# to assume /32 if the address is not a network address. If it is the
# network address we always search using the specified mask. In certain
# cases there is a host with the network address, typically when /31 or /127
# prefix lengths are used, for example 1.0.0.0/31 and 1.0.0.0/32 (first host
# in /31 network) in which case it becomes necessary to distinguish between
# the two using the mask.
try:
# this fails if bits are set on right side of mask
ip = IPy.IP(arg)
except __HOLE__:
arg = arg.split('/')[0]
q = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
if v.rt != 'all':
q['vrf_rt'] = v.rt
res = Prefix.list(q)
if len(res) == 0:
vrf_text = 'any VRF'
if v.rt != 'all':
vrf_text = vrf_format(v)
print >> sys.stderr, "Address %s not found in %s." % (arg, vrf_text)
sys.exit(1)
p = res[0]
vrf = p.vrf.rt
print "-- Address "
print " %-26s : %s" % ("Prefix", p.prefix)
print " %-26s : %s" % ("Display prefix", p.display_prefix)
print " %-26s : %s" % ("Type", p.type)
print " %-26s : %s" % ("Status", p.status)
print " %-26s : IPv%s" % ("Family", p.family)
print " %-26s : %s" % ("VRF", vrf)
print " %-26s : %s" % ("Description", p.description)
print " %-26s : %s" % ("Node", p.node)
print " %-26s : %s" % ("Country", p.country)
print " %-26s : %s" % ("Order", p.order_id)
print " %-26s : %s" % ("Customer", p.customer_id)
print " %-26s : %s" % ("VLAN", p.vlan)
print " %-26s : %s" % ("Alarm priority", p.alarm_priority)
print " %-26s : %s" % ("Monitor", p.monitor)
print " %-26s : %s" % ("Added", p.added)
print " %-26s : %s" % ("Last modified", p.last_modified)
print " %-26s : %s" % ("Expires", p.expires or '-')
if p.family == 4:
print " %-26s : %s / %s (%.2f%% of %s)" % ("Addresses Used / Free", p.used_addresses,
p.free_addresses, (float(p.used_addresses)/p.total_addresses)*100,
p.total_addresses)
else:
print " %-26s : %.4e / %.4e (%.2f%% of %.4e)" % ("Addresses Used / Free", p.used_addresses,
p.free_addresses, (float(p.used_addresses)/p.total_addresses)*100,
p.total_addresses)
print "-- Extra Attributes"
if p.avps is not None:
for key in sorted(p.avps, key=lambda s: s.lower()):
print " %-26s : %s" % (key, p.avps[key])
print "-- Tags"
for tag_name in sorted(p.tags, key=lambda s: s.lower()):
print " %s" % tag_name
print "-- Inherited Tags"
for tag_name in sorted(p.inherited_tags, key=lambda s: s.lower()):
print " %s" % tag_name
print "-- Comment"
print p.comment or '' | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/view_prefix |
def modify_vrf(arg, opts, shell_opts):
""" Modify a VRF with the options set in opts
"""
res = VRF.list({ 'rt': arg })
if len(res) < 1:
print >> sys.stderr, "VRF with [RT: %s] not found." % arg
sys.exit(1)
v = res[0]
if 'rt' in opts:
v.rt = opts['rt']
if 'name' in opts:
v.name = opts['name']
if 'description' in opts:
v.description = opts['description']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
v.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
v.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
v.avps[key] = value
v.save()
print "%s saved." % vrf_format(v) | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/modify_vrf |
def modify_pool(arg, opts, shell_opts):
""" Modify a pool with the options set in opts
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
if 'name' in opts:
p.name = opts['name']
if 'description' in opts:
p.description = opts['description']
if 'default-type' in opts:
p.default_type = opts['default-type']
if 'ipv4_default_prefix_length' in opts:
p.ipv4_default_prefix_length = opts['ipv4_default_prefix_length']
if 'ipv6_default_prefix_length' in opts:
p.ipv6_default_prefix_length = opts['ipv6_default_prefix_length']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
p.save()
print "Pool '%s' saved." % p.name | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/modify_pool |
def modify_prefix(arg, opts, shell_opts):
""" Modify the prefix 'arg' with the options 'opts'
"""
modify_confirmed = shell_opts.force
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
if 'prefix' in opts:
p.prefix = opts['prefix']
if 'description' in opts:
p.description = opts['description']
if 'comment' in opts:
p.comment = opts['comment']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
if 'node' in opts:
p.node = opts['node']
if 'type' in opts:
p.type = opts['type']
if 'status' in opts:
p.status = opts['status']
if 'country' in opts:
p.country = opts['country']
if 'order_id' in opts:
p.order_id = opts['order_id']
if 'customer_id' in opts:
p.customer_id = opts['customer_id']
if 'vlan' in opts:
p.vlan = opts['vlan']
if 'alarm_priority' in opts:
p.alarm_priority = opts['alarm_priority']
if 'monitor' in opts:
p.monitor = _str_to_bool(opts['monitor'])
if 'expires' in opts:
p.expires = opts['expires']
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
return
p.avps[key] = value
# Promt user if prefix has authoritative source != nipap
if not modify_confirmed and p.authoritative_source.lower() != 'nipap':
res = raw_input("Prefix %s in %s is managed by system '%s'. Are you sure you want to modify it? [y/n]: " %
(p.prefix, vrf_format(p.vrf), p.authoritative_source))
# If the user declines, short-circuit...
if res.lower() not in [ 'y', 'yes' ]:
print "Operation aborted."
return
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf)) | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/modify_prefix |
def prefix_attr_add(arg, opts, shell_opts):
""" Add attributes to a prefix
"""
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print >> sys.stderr, "Prefix %s not found in %s." % (arg, vrf_format(v))
return
p = res[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in p.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
p.avps[key] = value
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save prefix changes: %s" % str(exc)
sys.exit(1)
print "Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf)) | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/prefix_attr_add |
def vrf_attr_add(arg, opts, shell_opts):
""" Add attributes to a VRF
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (KeyError, __HOLE__):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in v.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
v.avps[key] = value
try:
v.save()
except NipapError as exc:
print >> sys.stderr, "Could not save VRF changes: %s" % str(exc)
sys.exit(1)
print "%s saved." % vrf_format(v) | IndexError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/vrf_attr_add |
def vrf_attr_remove(arg, opts, shell_opts):
""" Remove attributes from a prefix
"""
if arg is None:
print >> sys.stderr, "ERROR: Please specify the RT of the VRF to view."
sys.exit(1)
# interpret as default VRF (ie, RT = None)
if arg.lower() in ('-', 'none'):
arg = None
try:
v = VRF.search({
'val1': 'rt',
'operator': 'equals',
'val2': arg }
)['result'][0]
except (__HOLE__, IndexError):
print >> sys.stderr, "VRF with [RT: %s] not found." % str(arg)
sys.exit(1)
for key in opts.get('extra-attribute', []):
if key not in v.avps:
print >> sys.stderr, "Unable to remove extra-attribute: '%s' does not exist." % key
sys.exit(1)
del v.avps[key]
try:
v.save()
except NipapError as exc:
print >> sys.stderr, "Could not save VRF changes: %s" % str(exc)
sys.exit(1)
print "%s saved." % vrf_format(v) | KeyError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/vrf_attr_remove |
def pool_attr_add(arg, opts, shell_opts):
""" Add attributes to a pool
"""
res = Pool.list({ 'name': arg })
if len(res) < 1:
print >> sys.stderr, "No pool with name '%s' found." % arg
sys.exit(1)
p = res[0]
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except __HOLE__:
print >> sys.stderr, "ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp
sys.exit(1)
if key in p.avps:
print >> sys.stderr, "Unable to add extra-attribute: '%s' already exists." % key
sys.exit(1)
p.avps[key] = value
try:
p.save()
except NipapError as exc:
print >> sys.stderr, "Could not save pool changes: %s" % str(exc)
sys.exit(1)
print "Pool '%s' saved." % p.name | ValueError | dataset/ETHPy150Open SpriteLink/NIPAP/nipap-cli/nipap_cli/nipap_cli.py/pool_attr_add |
def _UploadFile(self, media_source, title, category, folder_or_uri=None):
"""Uploads a file to the Document List feed.
Args:
media_source: A gdata.MediaSource object containing the file to be
uploaded.
title: string The title of the document on the server after being
uploaded.
category: An atom.Category object specifying the appropriate document
type.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the document created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except __HOLE__:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
entry = gdata.docs.DocumentListEntry()
entry.title = atom.Title(text=title)
if category is not None:
entry.category.append(category)
entry = self.Post(entry, uri, media_source=media_source,
extra_headers={'Slug': media_source.file_name},
converter=gdata.docs.DocumentListEntryFromString)
return entry | AttributeError | dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/attic/PicasaSnap/gdata/docs/service.py/DocsService._UploadFile |
def CreateFolder(self, title, folder_or_uri=None):
"""Creates a folder in the Document List feed.
Args:
title: string The title of the folder on the server after being created.
folder_or_uri: DocumentListEntry or string (optional) An object with a
link to a folder or a uri to a folder to upload to.
Note: A valid uri for a folder is of the form:
/feeds/folders/private/full/folder%3Afolder_id
Returns:
A DocumentListEntry containing information about the folder created on
the Google Documents service.
"""
if folder_or_uri:
try:
uri = folder_or_uri.content.src
except __HOLE__:
uri = folder_or_uri
else:
uri = '/feeds/documents/private/full'
folder_entry = gdata.docs.DocumentListEntry()
folder_entry.title = atom.Title(text=title)
folder_entry.category.append(self._MakeKindCategory(FOLDER_LABEL))
folder_entry = self.Post(folder_entry, uri,
converter=gdata.docs.DocumentListEntryFromString)
return folder_entry | AttributeError | dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/attic/PicasaSnap/gdata/docs/service.py/DocsService.CreateFolder |
def __call__(self, argv):
if not argv or not argv[0] or argv[0].startswith("_"):
return 2
argv = self._expand_aliases(argv)
# special escape characters...
if argv[0].startswith("!"): # bang-escape reads pipe
argv[0] = argv[0][1:]
argv.insert(0, "pipe")
elif argv[0].startswith("%"): # percent-escape spawns pty
argv[0] = argv[0][1:]
argv.insert(0, "spawn")
elif argv[0].startswith("#"): # comment
return 0
elif argv[0].startswith("@"): # python exec escape
argv[0] = argv[0][1:]
argv.insert(0, "pyexec")
elif argv[0].startswith("="): # python eval escape
argv[0] = argv[0][1:]
argv.insert(0, "pyeval")
# ok, now fetch the real method...
try:
meth = getattr(self, argv[0])
except AttributeError:
meth = self.default_command
# ...and exec it.
try:
rv = meth(argv) # call the method
except (NewCommand, CommandQuit, CommandExit, KeyboardInterrupt):
raise # pass these through to parser
except CLISyntaxError:
self._print("Syntax error.")
self._print(meth.__doc__)
except IndexError: # may have tried to get non-existent argv value.
ex, val, tb = sys.exc_info()
lev = 0
t = tb
while t.tb_next is not None:
t = t.tb_next
lev += 1
if lev == 1: # Happened inside the command method.
self._print("Argument error.")
self._print(meth.__doc__)
else: # IndexError from something called by command method.
if _DEBUG:
from pycopia import debugger
debugger.post_mortem(tb, ex, val)
else:
self.except_hook(ex, val, tb)
except getopt.GetoptError, err:
self._print("option %r: %s" % (err.opt, err.msg))
except:
ex, val, tb = sys.exc_info()
if _DEBUG:
from pycopia import debugger # import here due to circular dependency
debugger.post_mortem(tb, ex, val)
else:
self.except_hook(ex, val, tb)
else:
if rv is not None:
try:
self._environ["?"] = int(rv)
except (__HOLE__, TypeError, AttributeError):
self._environ["?"] = 0
self._environ["_"] = rv
return rv | ValueError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/BaseCommands.__call__ |
def printenv(self, argv):
"""printenv [name ...]
Shows the shell environment that processes will run with. """
if len(argv) == 1:
names = self._environ.keys()
names.sort()
ms = reduce(max, map(len, names))
for name in names:
value = self._environ[name]
self._print("%*s = %s" % (ms, name, safe_repr(value)))
else:
s = []
for name in argv[1:]:
try:
s.append("%s = %s" % (name, safe_repr(self._environ[name])))
except __HOLE__:
pass
self._print("\n".join(s)) | KeyError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/BaseCommands.printenv |
def help(self, argv):
"""help [-lLcia] [<commandname>]...
Print a list of available commands, or information about a command,
if the name is given. Options:
-l Shows only local (object specific) commands.
-c Shows only the dynamic commands.
-L shows only local and dynamic commands.
-i Shows only the inherited commands from the parent context.
-a Shows all commands (default)
"""
local=True ; created=True ; inherited=True
opts, longs, args = self.getopt(argv, "lLcia")
for opt, optarg in opts:
if opt =="-i":
local=False ; created=False ; inherited=True
elif opt == "-c":
local=False ; created=True ; inherited=False
elif opt == "-l":
local=True ; created=False ; inherited=False
elif opt == "-a":
local=True ; created=True ; inherited=True
elif opt == "-L":
local=True ; created=True ; inherited=False
if not args:
args = self.get_commands()
for name in args:
try:
doc = getattr(self, name).__doc__
except __HOLE__:
self._print("No command named %r found." % (name,))
continue
if not doc:
self._print("No docs for %r." % (name,))
elif local and self.__class__.__dict__.has_key(name):
self._ui.help_local(doc)
elif created and "*" in doc: # dynamic method from generic_cli
self._ui.help_created(doc)
elif inherited:
self._ui.help_inherited(doc) | AttributeError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/BaseCommands.help |
def alias(self, argv):
"""alias [newalias]
With no argument prints the current set of aliases. With an argument of the
form alias ..., sets a new alias. """
if len(argv) == 1:
for name, val in self._aliases.items():
self._print("alias %s='%s'" % (name, " ".join(val)))
return 0
elif len(argv) == 2 and '=' not in argv[1]:
name = argv[1]
try:
self._print("%s=%s" % (name, " ".join(self._aliases[name])))
except __HOLE__:
self._print("undefined alias.")
return 0
# else
try: # this icky code to handle different permutations of where the '=' is.
argv.pop(0) # discard 'alias'
name = argv.pop(0)
if "=" in name:
name, rh = name.split("=", 1)
argv.insert(0,rh)
elif argv[0].startswith("="):
if len(argv[0]) > 1: # if argv[1] is '=something'
argv[0] = argv[0][1:]
else:
del argv[0] # remove the '='
self._aliases[name] = argv
except:
ex, val = sys.exc_info()[:2]
self._print("alias: Could not set alias. Usage: alias name=value")
self._print("%s (%s)" % (ex, val))
return 1 | KeyError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/BaseCommands.alias |
def cycle(self, argv):
"""cycle <range> <command> [<arg>...]
Cycle the % variable through range, and re-evaluate the command for
each value.
Range is of the form [start':']end[':' step]
Where start defaults to zero and step defaults to one.
Or, range is a list of values separated by ','."""
argv.pop(0) # eat name
rangetoken = argv.pop(0)
argv = self._expand_aliases(argv)
meth = getattr(self, argv[0])
for sloc, arg in enumerate(argv):
if arg.find("%") >= 0:
break
else:
self._ui.error("No %% substitution found.")
return
try:
therange = self._parse_range(rangetoken)
except __HOLE__, err:
raise CLISyntaxError, err
for i in therange:
newargs = argv[:]
newargs[sloc] = newargs[sloc].replace("%", str(i))
self._ui.Print(" ".join(newargs))
apply(meth, (newargs,)) | ValueError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/BaseCommands.cycle |
def python(self, argv):
import code
ns = self._get_ns()
console = code.InteractiveConsole(ns)
console.raw_input = self._ui.user_input
try:
saveps1, saveps2 = sys.ps1, sys.ps2
except __HOLE__:
saveps1, saveps2 = ">>> ", "... "
sys.ps1, sys.ps2 = "%%GPython%%N:%s> " % (self._obj.__class__.__name__,), "more> "
if readline:
oc = readline.get_completer()
readline.set_completer(Completer(ns).complete)
console.interact("You are now in Python. ^D exits.")
if readline:
readline.set_completer(oc)
sys.ps1, sys.ps2 = saveps1, saveps2
self._reset_scopes()
# This is needed to reset PagedIO so background events don't cause the pager to activate. | AttributeError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/BaseCommands.python |
def complete(self, text, state):
if state == 0:
self.matches = []
if "." in text:
for name, obj in self.namespace.items():
for key in dir(obj):
if key.startswith("__"):
continue
lname = "%s.%s" % (name, key)
if lname.startswith(text):
self.matches.append(lname)
else:
for key in self.globalNamespace:
if key.startswith(text):
self.matches.append(key)
try:
return self.matches[state]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/Completer.complete |
def _rl_completer(self, text, state):
if state == 0:
curr = readline.get_line_buffer()
b = readline.get_begidx()
if b == 0:
complist = self._cmd.get_completion_scope("commands")
else: # complete based on scope keyed on previous word
word = curr[:b].split()[-1]
complist = self._cmd.get_completion_scope(word)
self._complist = filter(lambda s: s.startswith(text), complist)
try:
return self._complist[state]
except __HOLE__:
return None | IndexError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/CommandParser._rl_completer |
def run_cli_wrapper(argv, wrappedclass=Shell, cliclass=GenericCLI, theme=None):
"""Instantiate a class object (the wrappedclass), and run a CLI wrapper on it."""
logfile = sourcefile = None
paged = False
try:
optlist, longopts, args = getopt.getopt(argv[1:], "?hgs:")
except getopt.GetoptError:
print wrappedclass.__doc__
return
for opt, val in optlist:
if opt in ("-?", "-h", "--help"):
print run_cli_wrapper.__doc__
return
elif opt == "-s":
sourcefile = val
elif opt == "-g":
paged = True
elif opt == "-l":
logfile = open(val, "w")
if args:
targs, kwargs = breakout_args(args)
else:
targs, kwargs = (), {}
try:
obj = apply(wrappedclass, targs, kwargs)
except (ValueError, __HOLE__):
print "Bad parameters."
print wrappedclass.__doc__
return
io = ConsoleIO()
ui = UserInterface(io, None, theme)
cmd = get_generic_cmd(obj, ui, cliclass)
cmd._export("PS1", "%%I%s%%N(%s%s%s)> " % (wrappedclass.__name__,
", ".join(map(repr, targs)), IF(kwargs, ", ", ""),
", ".join(map(lambda t: "%s=%r" % t, kwargs.items()))) )
cli = CommandParser(cmd, logfile)
if sourcefile:
cli.parse(sourcefile)
else:
cli.interact() | TypeError | dataset/ETHPy150Open kdart/pycopia/fepy/pycopia/fepy/CLI.py/run_cli_wrapper |
def status(name, sig=None):
'''
Return the status for a service via s6, return pid if running
CLI Example:
.. code-block:: bash
salt '*' s6.status <service name>
'''
cmd = 's6-svstat {0}'.format(_service_path(name))
out = __salt__['cmd.run_stdout'](cmd)
try:
pid = re.search(r'up \(pid (\d+)\)', out).group(1)
except __HOLE__:
pid = ''
return pid | AttributeError | dataset/ETHPy150Open saltstack/salt/salt/modules/s6.py/status |
def verify_token_string(self, token_string, action=None, timeout=None,
current_time=None):
"""Generate a hash of the given token contents that can be verified.
:param token_string:
A string containing the hashed token (generated by
`generate_token_string`).
:param action:
A string containing the action that is being verified.
:param timeout:
An int or float representing the number of seconds that the token
is valid for. If None then tokens are valid forever.
:current_time:
An int representing the number of seconds since the epoch. Will be
used by to check for token expiry if `timeout` is set. If `None`
then the current time will be used.
:raises:
XSRFTokenMalformed if the given token_string cannot be parsed.
XSRFTokenExpiredException if the given token string is expired.
XSRFTokenInvalid if the given token string does not match the
contents of the `XSRFToken`.
"""
try:
decoded_token_string = base64.urlsafe_b64decode(token_string)
except TypeError:
raise XSRFTokenMalformed()
split_token = decoded_token_string.split(self._DELIMITER)
if len(split_token) != 2:
raise XSRFTokenMalformed()
try:
token_time = int(split_token[1])
except __HOLE__:
raise XSRFTokenMalformed()
if timeout is not None:
if current_time is None:
current_time = time.time()
# If an attacker modifies the plain text time then it will not match
# the hashed time so this check is sufficient.
if (token_time + timeout) < current_time:
raise XSRFTokenExpiredException()
expected_token = XSRFToken(self.user_id, self.secret, token_time)
expected_token_string = expected_token.generate_token_string(action)
if len(expected_token_string) != len(token_string):
raise XSRFTokenInvalid()
# Compare the two strings in constant time to prevent timing attacks.
different = 0
for a, b in zip(token_string, expected_token_string):
different |= ord(a) ^ ord(b)
if different:
raise XSRFTokenInvalid() | ValueError | dataset/ETHPy150Open gregorynicholas/flask-xsrf/flask_xsrf.py/XSRFToken.verify_token_string |
@classmethod
def instance(cls, name, thread_count=1, idle_timeout=60):
'''A convenience method for accessing shared instances of ``TaskQueue``.
If ``name`` references an existing instance created with this method,
that instance will be returned. Otherwise, a new ``TaskQueue`` will be
instantiated with ``thread_count`` threads and the specified ``idle_timeout``
then stored under ``name``.
All arguments after name are ignored after the queue is created.
'''
if not hasattr(cls, '_task_queues'):
cls._task_queues = {}
try:
return cls._task_queues[name]
except __HOLE__:
cls._task_queues[name] = cls(thread_count, idle_timeout, name)
return cls._task_queues[name] | KeyError | dataset/ETHPy150Open JeremyOT/Toto/toto/tasks.py/TaskQueue.instance |
def run(self):
try:
while 1:
with self.condition:
if not len(self.tasks):
self.condition.wait(self.idle_timeout)
try:
task = self.tasks.popleft()
except __HOLE__:
logging.debug('Idle timeout: %s' % self.name)
self.threads.remove(self)
self.in_use = False
return
try:
task[0](*task[1], **task[2])
except Exception as e:
logging.error(traceback.format_exc())
finally:
with self.condition:
if self.in_use:
self.threads.remove(self)
self.in_use = False | IndexError | dataset/ETHPy150Open JeremyOT/Toto/toto/tasks.py/TaskQueue.__TaskLoop.run |
def open_report_file():
try:
FILE = open (reportfile,"r" )
entries = FILE.readlines()
FILE.close()
except __HOLE__:
print "[+] Can not find report file:", reportfile
sys.exit(1)
return entries | IOError | dataset/ETHPy150Open mertsarica/hack4career/codes/vt_reporter.py/open_report_file |
def convert_paramiko_errors(method):
"""Convert remote Paramiko errors to errors.RemoteError"""
def wrapper(self, *args, **kw):
try:
return method(self, *args, **kw)
except __HOLE__ as error:
if error.errno == errno.ENOENT:
raise errors.RemoteFileDoesNotExist(str(error))
else:
raise errors.RemoteError("%s: %s" % (error.__class__.__name__,
error))
except (socket.error, paramiko.SSHException, EOFError) as error:
raise errors.RemoteError("%s: %s" % (error.__class__.__name__,
error))
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper | IOError | dataset/ETHPy150Open ohmu/poni/poni/rcontrol_paramiko.py/convert_paramiko_errors |
@convert_paramiko_errors
def execute_command(self, cmd, pseudo_tty=False):
def get_channel(ssh):
channel = ssh.get_transport().open_session()
if not channel:
raise paramiko.SSHException("channel opening failed")
return channel
channel = self.get_ssh(get_channel)
if not channel:
raise errors.RemoteError("failed to open an SSH session to %s" % (
self.node.name))
if pseudo_tty:
channel.get_pty()
channel.set_combine_stderr(True) # TODO: separate stdout/stderr?
BS = 2**16
rx_time = time.time()
log_name = "%s (%s): %r" % (self.node.name, self.node.get("host"), cmd)
next_warn = time.time() + self.warn_timeout
next_ping = time.time() + self.ping_interval
def available_output():
"""read all the output that is immediately available"""
while channel.recv_ready():
chunk = channel.recv(BS)
yield rcontrol.STDOUT, chunk
channel.exec_command(cmd)
channel.shutdown_write()
exit_code = None
if epoll:
poll = select.epoll()
poll.register(channel.fileno(), select.EPOLLIN)
else:
poll = None
try:
while True:
if (exit_code is None) and channel.exit_status_ready():
# process has finished executing, but there still may be
# output to read from stdout or stderr
exit_code = channel.recv_exit_status()
# wait for input, note that the results are not used for anything
if poll:
try:
poll.poll(timeout=1.0) # just poll, not interested in the fileno
except __HOLE__ as ex:
if ex.errno != errno.EINTR:
raise
continue
else:
select.select([channel], [], [], 1.0)
for output in available_output():
rx_time = time.time()
next_warn = time.time() + self.warn_timeout
yield output
if channel.closed and (exit_code is not None):
yield rcontrol.DONE, exit_code
break # everything done!
now = time.time()
if now > (rx_time + self.terminate_timeout):
# no output in a long time, terminate connection
raise errors.RemoteError(
"%s: no output in %.1f seconds, terminating" % (
log_name, self.terminate_timeout))
if now > next_warn:
elapsed_since = time.time() - rx_time
self.log.warning("%s: no output in %.1fs", log_name,
elapsed_since)
next_warn = time.time() + self.warn_timeout
if now > next_ping:
channel.transport.send_ignore()
next_ping = time.time() + self.ping_interval
finally:
if poll:
poll.close() | IOError | dataset/ETHPy150Open ohmu/poni/poni/rcontrol_paramiko.py/ParamikoRemoteControl.execute_command |
@convert_paramiko_errors
def makedirs(self, dir_path):
sftp = self.get_sftp()
create_dirs = []
while 1:
try:
sftp.stat(dir_path)
break # dir exists
except (paramiko.SSHException, __HOLE__):
create_dirs.insert(0, dir_path)
dir_path, rest = os.path.split(dir_path)
if not dir_path or not rest:
break
for dir_path in create_dirs:
sftp.mkdir(dir_path) | IOError | dataset/ETHPy150Open ohmu/poni/poni/rcontrol_paramiko.py/ParamikoRemoteControl.makedirs |
def _pretty_frame(frame):
"""
Helper function for pretty-printing a frame.
:param frame: The frame to be printed.
:type frame: AttrDict
:return: A nicely formated string representation of the frame.
:rtype: str
"""
outstr = ""
outstr += "frame ({0.ID}): {0.name}\n\n".format(frame)
outstr += "[definition]\n"
outstr += _pretty_longstring(frame.definition, ' ') + '\n'
outstr += "[semTypes] {0} semantic types\n".format(len(frame.semTypes))
outstr += " "*(len(frame.semTypes)>0) + ", ".join("{0}({1})".format(x.name, x.ID) for x in frame.semTypes) + '\n'*(len(frame.semTypes)>0)
outstr += "\n[frameRelations] {0} frame relations\n".format(len(frame.frameRelations))
outstr += ' ' + '\n '.join(repr(frel) for frel in frame.frameRelations) + '\n'
outstr += "\n[lexUnit] {0} lexical units\n".format(len(frame.lexUnit))
lustrs = []
for luName,lu in sorted(frame.lexUnit.items()):
tmpstr = '{0} ({1})'.format(luName, lu.ID)
lustrs.append(tmpstr)
outstr += "{0}\n".format(_pretty_longstring(', '.join(lustrs),prefix=' '))
outstr += "\n[FE] {0} frame elements\n".format(len(frame.FE))
fes = {}
for feName,fe in sorted(frame.FE.items()):
try:
fes[fe.coreType].append("{0} ({1})".format(feName, fe.ID))
except __HOLE__:
fes[fe.coreType] = []
fes[fe.coreType].append("{0} ({1})".format(feName, fe.ID))
for ct in sorted(fes.keys(), key=lambda ct2: ['Core','Core-Unexpressed','Peripheral','Extra-Thematic'].index(ct2)):
outstr += "{0:>16}: {1}\n".format(ct, ', '.join(sorted(fes[ct])))
outstr += "\n[FEcoreSets] {0} frame element core sets\n".format(len(frame.FEcoreSets))
outstr += " " + '\n '.join(", ".join([x.name for x in coreSet]) for coreSet in frame.FEcoreSets) + '\n'
return outstr | KeyError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/_pretty_frame |
def _short_repr(self):
if '_type' in self:
if self['_type'].endswith('relation'):
return self.__repr__()
try:
return "<{0} ID={1} name={2}>".format(self['_type'], self['ID'], self['name'])
except __HOLE__: # no ID--e.g., for _type=lusubcorpus
return "<{0} name={1}>".format(self['_type'], self['name'])
else:
return self.__repr__() | KeyError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/AttrDict._short_repr |
def __repr__(self):
parts = []
for k,v in sorted(self.items()):
kv = repr(k)+': '
try:
kv += v._short_repr()
except __HOLE__:
kv += repr(v)
parts.append(kv)
return '{'+(',\n ' if self._BREAK_LINES else ', ').join(parts)+'}' | AttributeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/PrettyDict.__repr__ |
def readme(self):
"""
Return the contents of the corpus README.txt (or README) file.
"""
try:
return self.open("README.txt").read()
except __HOLE__:
return self.open("README").read() | IOError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.readme |
def annotated_document(self, fn_docid):
"""
Returns the annotated document whose id number is
``fn_docid``. This id number can be obtained by calling the
Documents() function.
The dict that is returned from this function will contain the
following keys:
- '_type' : 'fulltextannotation'
- 'sentence' : a list of sentences in the document
- Each item in the list is a dict containing the following keys:
- 'ID' : the ID number of the sentence
- '_type' : 'sentence'
- 'text' : the text of the sentence
- 'paragNo' : the paragraph number
- 'sentNo' : the sentence number
- 'docID' : the document ID number
- 'corpID' : the corpus ID number
- 'aPos' : the annotation position
- 'annotationSet' : a list of annotation layers for the sentence
- Each item in the list is a dict containing the following keys:
- 'ID' : the ID number of the annotation set
- '_type' : 'annotationset'
- 'status' : either 'MANUAL' or 'UNANN'
- 'luName' : (only if status is 'MANUAL')
- 'luID' : (only if status is 'MANUAL')
- 'frameID' : (only if status is 'MANUAL')
- 'frameName': (only if status is 'MANUAL')
- 'layer' : a list of labels for the layer
- Each item in the layer is a dict containing the
following keys:
- '_type': 'layer'
- 'rank'
- 'name'
- 'label' : a list of labels in the layer
- Each item is a dict containing the following keys:
- 'start'
- 'end'
- 'name'
- 'feID' (optional)
:param fn_docid: The Framenet id number of the document
:type fn_docid: int
:return: Information about the annotated document
:rtype: dict
"""
try:
xmlfname = self._fulltext_idx[fn_docid].filename
except __HOLE__: # happens when self._fulltext_idx == None
# build the index
self._buildcorpusindex()
xmlfname = self._fulltext_idx[fn_docid].filename
except KeyError: # probably means that fn_docid was not in the index
raise FramenetError("Unknown document id: {0}".format(fn_docid))
# construct the path name for the xml file containing the document info
locpath = os.path.join(
"{0}".format(self._root), self._fulltext_dir, xmlfname)
# Grab the top-level xml element containing the fulltext annotation
elt = XMLCorpusView(locpath, 'fullTextAnnotation')[0]
return self._handle_fulltextannotation_elt(elt) | TypeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.annotated_document |
def frame_by_id(self, fn_fid, ignorekeys=[]):
"""
Get the details for the specified Frame using the frame's id
number.
Usage examples:
>>> from nltk.corpus import framenet as fn
>>> f = fn.frame_by_id(256)
>>> f.ID
256
>>> f.name
'Medical_specialties'
>>> f.definition
"This frame includes words that name ..."
:param fn_fid: The Framenet id number of the frame
:type fn_fid: int
:param ignorekeys: The keys to ignore. These keys will not be
included in the output. (optional)
:type ignorekeys: list(str)
:return: Information about a frame
:rtype: dict
Also see the ``frame()`` function for details about what is
contained in the dict that is returned.
"""
# get the name of the frame with this id number
try:
fentry = self._frame_idx[fn_fid]
if '_type' in fentry:
return fentry # full frame object is cached
name = fentry['name']
except TypeError:
self._buildframeindex()
name = self._frame_idx[fn_fid]['name']
except __HOLE__:
raise FramenetError('Unknown frame id: {0}'.format(fn_fid))
return self.frame_by_name(name, ignorekeys, check_cache=False) | KeyError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.frame_by_id |
def frame_by_name(self, fn_fname, ignorekeys=[], check_cache=True):
"""
Get the details for the specified Frame using the frame's name.
Usage examples:
>>> from nltk.corpus import framenet as fn
>>> f = fn.frame_by_name('Medical_specialties')
>>> f.ID
256
>>> f.name
'Medical_specialties'
>>> f.definition
"This frame includes words that name ..."
:param fn_fname: The name of the frame
:type fn_fname: str
:param ignorekeys: The keys to ignore. These keys will not be
included in the output. (optional)
:type ignorekeys: list(str)
:return: Information about a frame
:rtype: dict
Also see the ``frame()`` function for details about what is
contained in the dict that is returned.
"""
if check_cache and fn_fname in self._cached_frames:
return self._frame_idx[self._cached_frames[fn_fname]]
elif not self._frame_idx:
self._buildframeindex()
# construct the path name for the xml file containing the Frame info
locpath = os.path.join(
"{0}".format(self._root), self._frame_dir, fn_fname + ".xml")
#print(locpath, file=sys.stderr)
# Grab the xml for the frame
try:
elt = XMLCorpusView(locpath, 'frame')[0]
except __HOLE__:
raise FramenetError('Unknown frame: {0}'.format(fn_fname))
fentry = self._handle_frame_elt(elt, ignorekeys)
assert fentry
# INFERENCE RULE: propagate lexical semtypes from the frame to all its LUs
for st in fentry.semTypes:
if st.rootType.name=='Lexical_type':
for lu in fentry.lexUnit.values():
if st not in lu.semTypes:
lu.semTypes.append(st)
self._frame_idx[fentry.ID] = fentry
self._cached_frames[fentry.name] = fentry.ID
'''
# now set up callables to resolve the LU pointers lazily.
# (could also do this here--caching avoids infinite recursion.)
for luName,luinfo in fentry.lexUnit.items():
fentry.lexUnit[luName] = (lambda luID: Future(lambda: self.lu(luID)))(luinfo.ID)
'''
return fentry | IOError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.frame_by_name |
def _lu_file(self, lu, ignorekeys=[]):
"""
Augment the LU information that was loaded from the frame file
with additional information from the LU file.
"""
fn_luid = lu.ID
fname = "lu{0}.xml".format(fn_luid)
locpath = os.path.join("{0}".format(self._root), self._lu_dir, fname)
#print(locpath, file=sys.stderr)
if not self._lu_idx:
self._buildluindex()
try:
elt = XMLCorpusView(locpath, 'lexUnit')[0]
except __HOLE__:
raise FramenetError('Unknown LU id: {0}'.format(fn_luid))
lu2 = self._handle_lexunit_elt(elt, ignorekeys)
lu.subCorpus = lu2.subCorpus
return lu.subCorpus | IOError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader._lu_file |
def propagate_semtypes(self):
"""
Apply inference rules to distribute semtypes over relations between FEs.
For FrameNet 1.5, this results in 1011 semtypes being propagated.
(Not done by default because it requires loading all frame files,
which takes several seconds. If this needed to be fast, it could be rewritten
to traverse the neighboring relations on demand for each FE semtype.)
>>> from nltk.corpus import framenet as fn
>>> sum(1 for f in fn.frames() for fe in f.FE.values() if fe.semType)
4241
>>> fn.propagate_semtypes()
>>> sum(1 for f in fn.frames() for fe in f.FE.values() if fe.semType)
5252
"""
if not self._semtypes:
self._loadsemtypes()
if not self._ferel_idx:
self._buildrelationindex()
changed = True
i = 0
nPropagations = 0
while changed:
# make a pass and see if anything needs to be propagated
i += 1
changed = False
for ferel in self.fe_relations():
superST = ferel.superFE.semType
subST = ferel.subFE.semType
try:
if superST and superST is not subST:
# propagate downward
assert subST is None or self.semtype_inherits(subST, superST),(superST.name,ferel,subST.name)
if subST is None:
ferel.subFE.semType = subST = superST
changed = True
nPropagations += 1
if ferel.type.name in ['Perspective_on', 'Subframe', 'Precedes'] and subST \
and subST is not superST:
# propagate upward
assert superST is None,(superST.name,ferel,subST.name)
ferel.superFE.semType = superST = subST
changed = True
nPropagations += 1
except __HOLE__ as ex:
# bug in the data! ignore
#print(ex, file=sys.stderr)
continue
#print(i, nPropagations, file=sys.stderr) | AssertionError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.propagate_semtypes |
def semtype(self, key):
"""
>>> from nltk.corpus import framenet as fn
>>> fn.semtype(233).name
'Temperature'
>>> fn.semtype(233).abbrev
'Temp'
>>> fn.semtype('Temperature').ID
233
:param key: The name, abbreviation, or id number of the semantic type
:type key: string or int
:return: Information about a semantic type
:rtype: dict
"""
if isinstance(key, int):
stid = key
else:
try:
stid = self._semtypes[key]
except TypeError:
self._loadsemtypes()
stid = self._semtypes[key]
try:
st = self._semtypes[stid]
except __HOLE__:
self._loadsemtypes()
st = self._semtypes[stid]
return st | TypeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.semtype |
def frames(self, name=None):
"""
Obtain details for a specific frame.
>>> from nltk.corpus import framenet as fn
>>> len(fn.frames())
1019
>>> PrettyList(fn.frames(r'(?i)medical'), maxReprSize=0, breakLines=True)
[<frame ID=256 name=Medical_specialties>,
<frame ID=257 name=Medical_instruments>,
<frame ID=255 name=Medical_professionals>,
<frame ID=239 name=Medical_conditions>]
A brief intro to Frames (excerpted from "FrameNet II: Extended
Theory and Practice" by Ruppenhofer et. al., 2010):
A Frame is a script-like conceptual structure that describes a
particular type of situation, object, or event along with the
participants and props that are needed for that Frame. For
example, the "Apply_heat" frame describes a common situation
involving a Cook, some Food, and a Heating_Instrument, and is
evoked by words such as bake, blanch, boil, broil, brown,
simmer, steam, etc.
We call the roles of a Frame "frame elements" (FEs) and the
frame-evoking words are called "lexical units" (LUs).
FrameNet includes relations between Frames. Several types of
relations are defined, of which the most important are:
- Inheritance: An IS-A relation. The child frame is a subtype
of the parent frame, and each FE in the parent is bound to
a corresponding FE in the child. An example is the
"Revenge" frame which inherits from the
"Rewards_and_punishments" frame.
- Using: The child frame presupposes the parent frame as
background, e.g the "Speed" frame "uses" (or presupposes)
the "Motion" frame; however, not all parent FEs need to be
bound to child FEs.
- Subframe: The child frame is a subevent of a complex event
represented by the parent, e.g. the "Criminal_process" frame
has subframes of "Arrest", "Arraignment", "Trial", and
"Sentencing".
- Perspective_on: The child frame provides a particular
perspective on an un-perspectivized parent frame. A pair of
examples consists of the "Hiring" and "Get_a_job" frames,
which perspectivize the "Employment_start" frame from the
Employer's and the Employee's point of view, respectively.
:param name: A regular expression pattern used to match against
Frame names. If 'name' is None, then a list of all
Framenet Frames will be returned.
:type name: str
:return: A list of matching Frames (or all Frames).
:rtype: list(AttrDict)
"""
try:
fIDs = list(self._frame_idx.keys())
except __HOLE__:
self._buildframeindex()
fIDs = list(self._frame_idx.keys())
if name is not None:
return PrettyList(self.frame(fID) for fID,finfo in self.frame_ids_and_names(name).items())
else:
return PrettyLazyMap(self.frame, fIDs) | AttributeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.frames |
def lus(self, name=None):
"""
Obtain details for a specific lexical unit.
>>> from nltk.corpus import framenet as fn
>>> len(fn.lus())
11829
>>> PrettyList(fn.lus(r'(?i)a little'), maxReprSize=0, breakLines=True)
[<lu ID=14744 name=a little bit.adv>,
<lu ID=14733 name=a little.n>,
<lu ID=14743 name=a little.adv>]
A brief intro to Lexical Units (excerpted from "FrameNet II:
Extended Theory and Practice" by Ruppenhofer et. al., 2010):
A lexical unit (LU) is a pairing of a word with a meaning. For
example, the "Apply_heat" Frame describes a common situation
involving a Cook, some Food, and a Heating Instrument, and is
_evoked_ by words such as bake, blanch, boil, broil, brown,
simmer, steam, etc. These frame-evoking words are the LUs in the
Apply_heat frame. Each sense of a polysemous word is a different
LU.
We have used the word "word" in talking about LUs. The reality
is actually rather complex. When we say that the word "bake" is
polysemous, we mean that the lemma "bake.v" (which has the
word-forms "bake", "bakes", "baked", and "baking") is linked to
three different frames:
- Apply_heat: "Michelle baked the potatoes for 45 minutes."
- Cooking_creation: "Michelle baked her mother a cake for her birthday."
- Absorb_heat: "The potatoes have to bake for more than 30 minutes."
These constitute three different LUs, with different
definitions.
Multiword expressions such as "given name" and hyphenated words
like "shut-eye" can also be LUs. Idiomatic phrases such as
"middle of nowhere" and "give the slip (to)" are also defined as
LUs in the appropriate frames ("Isolated_places" and "Evading",
respectively), and their internal structure is not analyzed.
Framenet provides multiple annotated examples of each sense of a
word (i.e. each LU). Moreover, the set of examples
(approximately 20 per LU) illustrates all of the combinatorial
possibilities of the lexical unit.
Each LU is linked to a Frame, and hence to the other words which
evoke that Frame. This makes the FrameNet database similar to a
thesaurus, grouping together semantically similar words.
In the simplest case, frame-evoking words are verbs such as
"fried" in:
"Matilde fried the catfish in a heavy iron skillet."
Sometimes event nouns may evoke a Frame. For example,
"reduction" evokes "Cause_change_of_scalar_position" in:
"...the reduction of debt levels to $665 million from $2.6 billion."
Adjectives may also evoke a Frame. For example, "asleep" may
evoke the "Sleep" frame as in:
"They were asleep for hours."
Many common nouns, such as artifacts like "hat" or "tower",
typically serve as dependents rather than clearly evoking their
own frames.
:param name: A regular expression pattern used to search the LU
names. Note that LU names take the form of a dotted
string (e.g. "run.v" or "a little.adv") in which a
lemma preceeds the "." and a POS follows the
dot. The lemma may be composed of a single lexeme
(e.g. "run") or of multiple lexemes (e.g. "a
little"). If 'name' is not given, then all LUs will
be returned.
The valid POSes are:
v - verb
n - noun
a - adjective
adv - adverb
prep - preposition
num - numbers
intj - interjection
art - article
c - conjunction
scon - subordinating conjunction
:type name: str
:return: A list of selected (or all) lexical units
:rtype: list of LU objects (dicts). See the lu() function for info
about the specifics of LU objects.
"""
try:
luIDs = list(self._lu_idx.keys())
except __HOLE__:
self._buildluindex()
luIDs = list(self._lu_idx.keys())
if name is not None:
return PrettyList(self.lu(luID) for luID,luName in self.lu_ids_and_names(name).items())
else:
return PrettyLazyMap(self.lu, luIDs) | AttributeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.lus |
def documents(self, name=None):
"""
Return a list of the annotated documents in Framenet.
Details for a specific annotated document can be obtained using this
class's annotated_document() function and pass it the value of the 'ID' field.
>>> from nltk.corpus import framenet as fn
>>> len(fn.documents())
78
>>> set([x.corpname for x in fn.documents()])==set(['ANC', 'C-4', 'KBEval', \
'LUCorpus-v0.3', 'Miscellaneous', 'NTI', 'PropBank', 'QA', 'SemAnno'])
True
:param name: A regular expression pattern used to search the
file name of each annotated document. The document's
file name contains the name of the corpus that the
document is from, followed by two underscores "__"
followed by the document name. So, for example, the
file name "LUCorpus-v0.3__20000410_nyt-NEW.xml" is
from the corpus named "LUCorpus-v0.3" and the
document name is "20000410_nyt-NEW.xml".
:type name: str
:return: A list of selected (or all) annotated documents
:rtype: list of dicts, where each dict object contains the following
keys:
- 'name'
- 'ID'
- 'corpid'
- 'corpname'
- 'description'
- 'filename'
"""
try:
ftlist = PrettyList(self._fulltext_idx.values())
except __HOLE__:
self._buildcorpusindex()
ftlist = PrettyList(self._fulltext_idx.values())
if name is None:
return ftlist
else:
return PrettyList(x for x in ftlist if re.search(name, x['filename']) is not None) | AttributeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader.documents |
def _load_xml_attributes(self, d, elt):
"""
Extracts a subset of the attributes from the given element and
returns them in a dictionary.
:param d: A dictionary in which to store the attributes.
:type d: dict
:param elt: An ElementTree Element
:type elt: Element
:return: Returns the input dict ``d`` possibly including attributes from ``elt``
:rtype: dict
"""
d = type(d)(d)
try:
attr_dict = elt.attrib
except __HOLE__:
return d
if attr_dict is None:
return d
# Ignore these attributes when loading attributes from an xml node
ignore_attrs = ['cBy', 'cDate', 'mDate', 'xsi',
'schemaLocation', 'xmlns', 'bgColor', 'fgColor']
for attr in attr_dict:
if any(attr.endswith(x) for x in ignore_attrs):
continue
val = attr_dict[attr]
if val.isdigit():
d[attr] = int(val)
else:
d[attr] = val
return d | AttributeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader._load_xml_attributes |
def _strip_tags(self, data):
"""
Gets rid of all tags and newline characters from the given input
:return: A cleaned-up version of the input string
:rtype: str
"""
try:
data = data.replace('<t>', '')
data = data.replace('</t>', '')
data = re.sub('<fex name="[^"]+">', '', data)
data = data.replace('</fex>', '')
data = data.replace('<fen>', '')
data = data.replace('</fen>', '')
data = data.replace('<m>', '')
data = data.replace('</m>', '')
data = data.replace('<ment>', '')
data = data.replace('</ment>', '')
data = data.replace('<ex>', "'")
data = data.replace('</ex>', "'")
data = data.replace('<gov>', '')
data = data.replace('</gov>', '')
data = data.replace('<x>', '')
data = data.replace('</x>', '')
# Get rid of <def-root> and </def-root> tags
data = data.replace('<def-root>', '')
data = data.replace('</def-root>', '')
data = data.replace('\n', ' ')
except __HOLE__:
pass
return data | AttributeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader._strip_tags |
def _handle_lusubcorpus_elt(self, elt):
"""Load a subcorpus of a lexical unit from the given xml."""
sc = AttrDict()
try:
sc['name'] = elt.get('name')
except __HOLE__:
return None
sc['_type'] = "lusubcorpus"
sc['sentence'] = []
for sub in elt:
if sub.tag.endswith('sentence'):
s = self._handle_lusentence_elt(sub)
if s is not None:
sc['sentence'].append(s)
return sc | AttributeError | dataset/ETHPy150Open nltk/nltk/nltk/corpus/reader/framenet.py/FramenetCorpusReader._handle_lusubcorpus_elt |
def generate_data(self, count, offset):
"""
Generates training data in the CRF++ format for the ingredient
tagging task
"""
df = pd.read_csv(self.opts.data_path)
df = df.fillna("")
start = int(offset)
end = int(offset) + int(count)
df_slice = df.iloc[start: end]
for index, row in df_slice.iterrows():
try:
# extract the display name
display_input = utils.cleanUnicodeFractions(row["input"])
tokens = utils.tokenize(display_input)
del(row["input"])
rowData = self.addPrefixes([(t, self.matchUp(t, row)) for t in tokens])
for i, (token, tags) in enumerate(rowData):
features = utils.getFeatures(token, i+1, tokens)
print utils.joinLine([token] + features + [self.bestTag(tags)])
# ToDo: deal with this
except __HOLE__:
pass
print | UnicodeDecodeError | dataset/ETHPy150Open NYTimes/ingredient-phrase-tagger/lib/training/cli.py/Cli.generate_data |
def check_non_negative_int(value):
try:
value = int(value)
except __HOLE__:
raise argparse.ArgumentTypeError(_("invalid int value: %r") % value)
if value < 0:
raise argparse.ArgumentTypeError(_("input value %d is negative") %
value)
return value | ValueError | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/shell.py/check_non_negative_int |
def _extend_shell_commands(self, name, module, version):
classes = inspect.getmembers(module, inspect.isclass)
for cls_name, cls in classes:
if (issubclass(cls, client_extension.NeutronClientExtension) and
hasattr(cls, 'shell_command')):
cmd = cls.shell_command
if hasattr(cls, 'versions'):
if version not in cls.versions:
continue
try:
name_prefix = "[%s]" % name
cls.__doc__ = ("%s %s" % (name_prefix, cls.__doc__) if
cls.__doc__ else name_prefix)
self.command_manager.add_command(cmd, cls)
self.commands[version][cmd] = cls
except __HOLE__:
pass | TypeError | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/shell.py/NeutronShell._extend_shell_commands |
def run_subcommand(self, argv):
subcommand = self.command_manager.find_command(argv)
cmd_factory, cmd_name, sub_argv = subcommand
cmd = cmd_factory(self, self.options)
try:
self.prepare_to_run_command(cmd)
full_name = (cmd_name
if self.interactive_mode
else ' '.join([self.NAME, cmd_name])
)
cmd_parser = cmd.get_parser(full_name)
return run_command(cmd, cmd_parser, sub_argv)
except __HOLE__:
print(_("Try 'neutron help %s' for more information.") %
cmd_name, file=sys.stderr)
raise
except Exception as e:
if self.options.verbose_level >= self.DEBUG_LEVEL:
self.log.exception("%s", e)
raise
self.log.error("%s", e)
return 1 | SystemExit | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/shell.py/NeutronShell.run_subcommand |
def main(argv=sys.argv[1:]):
try:
return NeutronShell(NEUTRON_API_VERSION).run(
list(map(encodeutils.safe_decode, argv)))
except __HOLE__:
print("... terminating neutron client", file=sys.stderr)
return 130
except exc.NeutronClientException:
return 1
except Exception as e:
print(e)
return 1 | KeyboardInterrupt | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/shell.py/main |
def test_stress(repeat=1000):
sdict = SortedDict((val, -val) for val in range(1000))
for rpt in range(repeat):
action = random.choice(actions)
action(sdict)
try:
sdict._check()
except __HOLE__:
print(action)
raise
start_len = len(sdict)
while len(sdict) < 500:
key = random.randrange(0, 2000)
sdict[key] = -key
while len(sdict) > 2000:
key = random.randrange(0, 2000)
if key in sdict:
del sdict[key]
if start_len != len(sdict):
sdict._check() | AssertionError | dataset/ETHPy150Open grantjenks/sorted_containers/tests/test_stress_sorteddict.py/test_stress |
def rm_xmltag(statement):
try:
_t = statement.startswith(XMLTAG)
except __HOLE__:
statement = statement.decode("utf8")
_t = statement.startswith(XMLTAG)
if _t:
statement = statement[len(XMLTAG):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX1):
statement = statement[len(PREFIX1):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX2):
statement = statement[len(PREFIX2):]
if statement[0] == '\n':
statement = statement[1:]
return statement | TypeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/rm_xmltag |
def get_xmlsec_binary(paths=None):
"""
Tries to find the xmlsec1 binary.
:param paths: Non-system path paths which should be searched when
looking for xmlsec1
:return: full name of the xmlsec1 binary found. If no binaries are
found then an exception is raised.
"""
if os.name == "posix":
bin_name = ["xmlsec1"]
elif os.name == "nt":
bin_name = ["xmlsec.exe", "xmlsec1.exe"]
else: # Default !?
bin_name = ["xmlsec1"]
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ["PATH"].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except __HOLE__:
pass
raise SigverError("Can't find %s" % bin_name) | OSError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/get_xmlsec_binary |
def _make_vals(val, klass, seccont, klass_inst=None, prop=None, part=False,
base64encode=False, elements_to_sign=None):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
#print "make_vals(%s, %s)" % (val, klass)
if isinstance(val, dict):
cinst = _instance(klass, val, seccont, base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
try:
cinst = klass().set_text(val)
except __HOLE__:
if not part:
cis = [_make_vals(sval, klass, seccont, klass_inst, prop,
True, base64encode, elements_to_sign) for sval
in val]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis) | ValueError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/_make_vals |
def active_cert(key):
"""
Verifies that a key is active that is present time is after not_before
and before not_after.
:param key: The Key
:return: True if the key is active else False
"""
cert_str = pem_format(key)
certificate = importKey(cert_str)
try:
not_before = to_time(str(certificate.get_not_before()))
not_after = to_time(str(certificate.get_not_after()))
assert not_before < utc_now()
assert not_after > utc_now()
return True
except __HOLE__:
return False
except AttributeError:
return False | AssertionError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/active_cert |
def verify_redirect_signature(saml_msg, cert=None, sigkey=None):
"""
:param saml_msg: A dictionary with strings as values, *NOT* lists as
produced by parse_qs.
:param cert: A certificate to use when verifying the signature
:return: True, if signature verified
"""
try:
signer = SIGNER_ALGS[saml_msg["SigAlg"]]
except __HOLE__:
raise Unsupported("Signature algorithm: %s" % saml_msg["SigAlg"])
else:
if saml_msg["SigAlg"] in SIGNER_ALGS:
if "SAMLRequest" in saml_msg:
_order = REQ_ORDER
elif "SAMLResponse" in saml_msg:
_order = RESP_ORDER
else:
raise Unsupported(
"Verifying signature on something that should not be "
"signed")
_args = saml_msg.copy()
del _args["Signature"] # everything but the signature
string = "&".join(
[urllib.urlencode({k: _args[k]}) for k in _order if k in _args])
if cert:
_key = extract_rsa_key_from_x509_cert(pem_format(cert))
else:
_key = sigkey
_sign = base64.b64decode(saml_msg["Signature"])
return bool(signer.verify(string, _sign, _key)) | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py/verify_redirect_signature |