function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def render(self, environ, start_response):
verb = environ.get('REQUEST_METHOD', 'GET').strip().upper()
if verb not in ('GET', 'POST'):
response = Response('405 Method Not Allowed', status=405,
mimetype='text/plain')
response.headers['Allow'] = 'GET, POST'
return response(environ, start_response)
# handle on/off switch
if verb == 'POST':
try:
clen = int(environ.get('CONTENT_LENGTH', '0'))
except __HOLE__:
clen = 0
body = environ['wsgi.input'].read(clen).decode('utf-8')
body = urlparse.parse_qs(body)
clear = body.get('clear', None)
if clear:
del self.stats[:]
return self.render_response(environ, start_response)
turn = body.get('turn', ' ')[0].strip().lower()
if turn not in ('on', 'off'):
response = Response('400 Bad Request: parameter '
'"turn=(on|off)" required',
status='400', mimetype='text/plain')
return response(environ, start_response)
if turn == 'on':
self.start()
else:
self.stop()
try:
while True:
self.stats.append(self.collector.get(block=False))
except queue.Empty:
pass
return self.render_response(environ, start_response) | ValueError | dataset/ETHPy150Open inconshreveable/sqltap/sqltap/wsgi.py/SQLTapMiddleware.render |
def test_ftest_pvalues(self):
res = self.results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except __HOLE__:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True) | AttributeError | dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/base/tests/test_generic_methods.py/CheckGenericMixin.test_ftest_pvalues |
def HAS(module):
try:
return __import__(module)
except __HOLE__:
return None | ImportError | dataset/ETHPy150Open facebook/sparts/sparts/deps.py/HAS |
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except __HOLE__:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/case.py/_AssertRaisesContext.__exit__ |
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except __HOLE__:
raise ValueError("no such test method in %s: %s" % \
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual') | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/case.py/TestCase.__init__ |
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except __HOLE__:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg)) | UnicodeDecodeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/case.py/TestCase._formatMessage |
def assertSequenceEqual(self, seq1, seq2,
msg=None, seq_type=None, max_diff=80*8):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
max_diff: Maximum size off the diff, larger diffs are not shown
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = repr(seq1)
seq2_repr = repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (__HOLE__, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg) | TypeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/case.py/TestCase.assertSequenceEqual |
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except __HOLE__, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg)) | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/case.py/TestCase.assertSetEqual |
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except __HOLE__:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg)) | TypeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.3/django/utils/unittest/case.py/TestCase.assertItemsEqual |
def _scanDirectory(self, dirIter, f):
while len(f) < 250:
try:
info = dirIter.next()
except __HOLE__:
if not f:
raise EOFError
return f
if isinstance(info, defer.Deferred):
info.addCallback(self._cbScanDirectory, dirIter, f)
return
else:
f.append(info)
return f | StopIteration | dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/conch/ssh/filetransfer.py/FileTransferServer._scanDirectory |
def dump_content(self, content, from_addr, to_addr, direction='sending'):
"""Dump content to stdout."""
self.dump_title(from_addr, to_addr, direction, 'content')
if self.content_encoding:
print "(%d bytes of %s with %s encoding)" % (len(content),
repr(self.content_type), repr(self.content_encoding))
else:
print "(%d bytes of %s)" % (len(content), repr(self.content_type))
if self.content_encoding == 'gzip':
if options.gzip_size_limit == 0 or \
self.gzip_data.tell() < options.gzip_size_limit:
self.gzip_data.write(content)
try:
content = self.gunzip()
except __HOLE__, error:
content = 'Could not gunzip: ' + str(error)
if self.content_type.startswith('text/'):
limit = options.text_dump_limit
elif self.content_type.startswith('application/') and \
self.content_type.count('xml'):
limit = options.text_dump_limit
else:
limit = options.data_dump_limit
content = repr(content)
if len(content) < limit or limit == 0:
print content
else:
print content[:limit] + '(showing only %d bytes)' % limit
print | IOError | dataset/ETHPy150Open jcrocholl/throxy/throxy.py/Header.dump_content |
@property
def local_device_name(self):
"""Bluetooth Local Name encoded as sequence of characters in
the given order. Received as complete (EIR type 0x09) or
shortened (EIR type 0x08) local name. Transmitted as complete
local name. Set to None if not received or not to be
transmitted."""
try: return self.eir[0x09]
except __HOLE__: return self.eir.get(0x08, None) | KeyError | dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/ndef/bt_record.py/BluetoothConfigRecord.local_device_name |
@property
def simple_pairing_hash(self):
"""Simple Pairing Hash C. Received and transmitted as EIR type
0x0E. Set to None if not received or not to be transmitted.
Raises nfc.ndef.DecodeError if the received value or
nfc.ndef.EncodeError if the assigned value is not a sequence
of 16 octets."""
try:
if len(self.eir[0x0E]) != 16:
raise DecodeError("wrong length of simple pairing hash")
return bytearray(self.eir[0x0E])
except __HOLE__:
return None | KeyError | dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/ndef/bt_record.py/BluetoothConfigRecord.simple_pairing_hash |
@property
def simple_pairing_rand(self):
"""Simple Pairing Randomizer R. Received and transmitted as
EIR type 0x0F. Set to None if not received or not to be
transmitted. Raises nfc.ndef.DecodeError if the received value
or nfc.ndef.EncodeError if the assigned value is not a
sequence of 16 octets."""
try:
if len(self.eir[0x0F]) != 16:
raise DecodeError("wrong length of simple pairing hash")
return bytearray(self.eir[0x0F])
except __HOLE__:
return None | KeyError | dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/ndef/bt_record.py/BluetoothConfigRecord.simple_pairing_rand |
@property
def service_class_uuid_list(self):
"""Listq of Service Class UUIDs. Set and retrieved as a list
of complete 128-bit UUIDs. Decoded from and encoded as EIR
types 0x02/0x03 (16-bit partial/complete UUIDs), 0x04/0x05
(32-bit partial/complete UUIDs), 0x06/0x07 (128-bit
partial/complete UUIDs)."""
L = list()
try: uuid_list = self.eir[0x03]
except KeyError: uuid_list = self.eir.get(0x02, '')
for x in struct.unpack("<"+"H"*(len(uuid_list)/2), uuid_list):
L.append("{0:08x}-0000-1000-8000-00805f9b34fb".format(x))
try: uuid_list = self.eir[0x05]
except KeyError: uuid_list = self.eir.get(0x04, '')
for x in struct.unpack("<"+"L"*(len(uuid_list)/4), uuid_list):
L.append("{0:08x}-0000-1000-8000-00805f9b34fb".format(x))
try: uuid_list = self.eir[0x07]
except __HOLE__: uuid_list = self.eir.get(0x06, '')
for i in range(0, len(uuid_list), 16):
L.append(str(UUID(bytes_le=uuid_list[i:i+16])))
return L | KeyError | dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/ndef/bt_record.py/BluetoothConfigRecord.service_class_uuid_list |
@property
def class_of_device(self):
"""Class of Device encoded as unsigned long integer. Received
and transmitted as EIR type 0x0D in little endian byte
order. Set to None if not received or not to be
transmitted."""
try: return int(self.eir[0x0D][::-1].encode("hex"), 16)
except __HOLE__: return None | KeyError | dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/ndef/bt_record.py/BluetoothConfigRecord.class_of_device |
def pretty(self, indent=0):
lines = list()
if self.name:
lines.append(("identifier", repr(self.name)))
lines.append(("device address", self.device_address))
if self.local_device_name:
lines.append(("device name", self.local_device_name))
if self.class_of_device:
cod = self.class_of_device
if cod & 0x003 == 0:
lines.append(("device class", decode_class_of_device(cod)))
msc = [major_service_class[mask]
for mask in sorted(major_service_class)
if self.class_of_device >> 13 & mask]
lines.append(("major service", ", ".join(msc)))
else:
lines.append(("class of device", "{0:b}".format(cod)))
if self.simple_pairing_hash:
simple_pairing_hash = str(self.simple_pairing_hash)
lines.append(("pubkey hash", simple_pairing_hash.encode("hex")))
if self.simple_pairing_rand:
simple_pairing_rand = str(self.simple_pairing_rand)
lines.append(("randomizer", simple_pairing_rand.encode("hex")))
for service_class_uuid in self.service_class_uuid_list:
try: service_class = service_class_uuid_map[service_class_uuid]
except __HOLE__: service_class = service_class_uuid
lines.append(("service class", service_class))
for key, value in self.eir.items():
if key not in (3, 5, 7, 8, 9, 13, 14, 15):
lines.append(("EIR 0x%02x" % key, repr(value)))
indent = indent * ' '
lwidth = max([len(line[0]) for line in lines])
lines = [line[0].ljust(lwidth) + " = " + line[1] for line in lines]
return ("\n").join([indent + line for line in lines]) | KeyError | dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/ndef/bt_record.py/BluetoothConfigRecord.pretty |
def decode_class_of_device(cod):
mdc, sdc = cod >> 8 & 0x1f, cod >> 2 & 0x3f
if mdc == 0:
mdc = "Miscellaneous"
sdc = "{0:06b}".format(sdc)
elif mdc == 1:
mdc = "Computer"
minor_device_class = (
"Uncategorized",
"Desktop workstation",
"Server-class computer",
"Laptop",
"Handheld PC/PDA (clam shell)",
"Palm sized PC/PDA",
"Wearable computer (Watch sized)")
try: sdc = minor_device_class[sdc]
except IndexError: sdc = "{0:06b}".format(sdc)
elif mdc == 2:
mdc = "Phone"
minor_device_class = (
"Uncategorized",
"Cellular",
"Cordless",
"Smart phone",
"Wired modem or voice gateway",
"Common ISDN Access")
try: sdc = minor_device_class[sdc]
except IndexError: sdc = "{0:06b}".format(sdc)
elif mdc == 3:
mdc = "Access Point"
minor_device_class = (
"fully available",
"1 - 17% utilized",
"17 - 33% utilized",
"33 - 50% utilized",
"50 - 67% utilized",
"67 - 83% utilized",
"83 - 99% utilized",
"no service available")
try: sdc = minor_device_class[sdc >> 3]
except IndexError: sdc = "{0:06b}".format(sdc)
elif mdc == 4:
mdc = "Audio/Video"
minor_device_class = (
"Uncategorized",
"Wearable Headset Device",
"Hands-free Device",
"Reserved",
"Microphone",
"Loudspeaker",
"Headphones",
"Portable Audio",
"Car audio",
"Set-top box",
"HiFi Audio Device",
"VCR",
"Video Camera",
"Camcorder",
"Video Monitor",
"Video Display and Loudspeaker",
"Video Conferencing",
"Reserved",
"Gaming/Toy")
try: sdc = minor_device_class[sdc]
except IndexError: sdc = "{0:06b}".format(sdc)
elif mdc == 5:
mdc = "Peripheral"
minor_device_class = (
"uncategorized",
"joystick",
"gamepad",
"remote control",
"sensing device",
"digitizer tablet",
"card reader",
"digital pen",
"handheld scanner",
"handheld pointer")
kbd_mouse = ("", " keyboard", " mouse", " keyboard/mouse")[sdc >> 4]
try: sdc = minor_device_class[sdc & 0x0f]
except __HOLE__: sdc = "{0:06b}".format(sdc)
sdc = sdc + kbd_mouse
elif mdc == 6:
mdc = "Imaging"
minor_device_class = {
0b0001: "display",
0b0010: "camera",
0b0100: "scanner",
0b1000: "printer"}
sdc = ', '.join([minor_device_class[mask]
for mask in minor_device_class
if sdc >> 2 & mask])
elif mdc == 7:
mdc = "Wearable"
minor_device_class = (
"Wrist Watch",
"Pager",
"Jacket",
"Helmet",
"Glasses")
try: sdc = minor_device_class[sdc & 0x0f]
except IndexError: sdc = "{0:06b}".format(sdc)
elif mdc == 8:
mdc = "Toy"
minor_device_class = (
"Robot",
"Vehicle",
"Doll / Action Figure",
"Controller",
"Game")
try: sdc = minor_device_class[sdc & 0x0f]
except IndexError: sdc = "{0:06b}".format(sdc)
elif mdc == 9:
mdc = "Health"
minor_device_class = (
"Undefined",
"Blood Pressure Monitor",
"Thermometer",
"Weighing Scale",
"Glucose Meter",
"Pulse Oximeter",
"Heart/Pulse Rate Monitor",
"Health Data Display",
"Step Counter",
"Body Composition Analyzer",
"Peak Flow Monitor",
"Medication Monitor",
"Knee Prosthesis",
"Ankle Prosthesis",
"Generic Health Manager",
"Personal Mobility Device")
try: sdc = minor_device_class[sdc & 0x0f]
except IndexError: sdc = "{0:06b}".format(sdc)
elif mdc == 31:
mdc = "Uncategorized"
sdc = "{0:06b}".format(sdc)
else:
mdc = "{0:05b}".format(mdc)
sdc = "{0:06b}".format(sdc)
return "{0} ({1})".format(mdc, sdc) | IndexError | dataset/ETHPy150Open javgh/greenaddress-pos-tools/nfc/ndef/bt_record.py/decode_class_of_device |
def read_la_file(path):
sp = re.compile(r'^([^=]+)=\'(.*)\'$')
dc={}
file = open(path, "r")
for line in file.readlines():
try:
#print sp.split(line.strip())
_, left, right, _ = sp.split(line.strip())
dc[left]=right
except __HOLE__:
pass
file.close()
return dc | ValueError | dataset/ETHPy150Open appcelerator-archive/poc-nodejs-desktop/Resources/nodejs/builds/linux/node/lib/node/wafadmin/Tools/libtool.py/read_la_file |
def __parse(self):
"Retrieve the variables from a file"
if not os.path.isfile(self.__la_filename): return 0
la_file=open(self.__la_filename, 'r')
for line in la_file:
ln = line.strip()
if not ln: continue
if ln[0]=='#': continue
(key, value) = str(ln).split('=', 1)
key = key.strip()
value = value.strip()
if value == "no": value = False
elif value == "yes": value = True
else:
try: value = int(value)
except __HOLE__: value = value.strip("'")
setattr(self, key, value)
la_file.close()
return 1 | ValueError | dataset/ETHPy150Open appcelerator-archive/poc-nodejs-desktop/Resources/nodejs/builds/linux/node/lib/node/wafadmin/Tools/libtool.py/libtool_la_file.__parse |
def migrate_old_comments(apps, schema_editor):
# https://code.djangoproject.com/ticket/24282
# If someone has a better solution until Django 1.8, would appreciate
# a pull-request :)
try:
from django.contrib.comments.models import Comment as OldComment
except __HOLE__:
# Django > 1.7
pass
else:
Comment = apps.get_model('document_comments', 'Comment')
Document = apps.get_model('documents', 'Document')
User = apps.get_model(*settings.AUTH_USER_MODEL.split('.'))
for old_comment in OldComment.objects.all():
comment = Comment(
document=Document.objects.get(pk=old_comment.object_pk),
user=User(old_comment.user.pk),
comment=old_comment.comment,
submit_date=old_comment.submit_date,
)
comment.save() | ImportError | dataset/ETHPy150Open mayan-edms/mayan-edms/mayan/apps/document_comments/migrations/0003_auto_20150729_2144.py/migrate_old_comments |
def run():
FrozenPriceCurrency.objects.no_cache().all().delete()
for k in sorted(tiers.keys()):
v = tiers[k]
try:
tier = Price.objects.filter(price=k).no_transforms()[0]
except __HOLE__:
print 'Tier does not exist: {0}'.format(k)
continue
for country, values in v.items():
FrozenPriceCurrency.objects.create(
tier=tier,
carrier=None,
provider=1,
price=values['price'],
region=values['region'],
currency=values['currency']
)
print 'Creating: {0}, {1}'.format(k, country) | IndexError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/migrations/607-reboot-price-currencies.py/run |
def getInterfaceConnection(self, interface, protocol):
""" Get the connection between an interface and a protocol.
@param interface: Interface which belongs to this endpoint and
which is on one side of the connection.
@type interface: rce.core.network.Interface
@param protocol: Protocol which belongs to this endpoint and
which is on one side of the connection.
@type protocol: rce.core.network.Protocol
@return: Connection between the interface and the
protocol.
@rtype: rce.core.network.InterfaceConnection
"""
try:
connectionI = self._interfaces[interface]
except KeyError:
raise InternalError('Interface does not belong to this endpoint.')
try:
connectionP = self._protocols[protocol]
except __HOLE__:
raise InternalError('Protocol does not belong to this endpoint.')
candidates = connectionP.intersection(connectionI)
if candidates:
if len(candidates) != 1:
raise InternalError('There are more than one possible '
'interface-protocol connections.')
return candidates.pop()
else:
connection = InterfaceConnection(interface, protocol)
connectionI.add(connection)
connectionP.add(connection)
return connection | KeyError | dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/network.py/Endpoint.getInterfaceConnection |
def notifyOnDeath(self, cb):
""" Method is used to to register a callback which will be called
when the connection died.
@param cb: Callback which should be registered. The
callback should take the died connection as
only argument.
@type cb: callable
"""
assert callable(cb)
try:
self._cbs.add(cb)
except __HOLE__:
raise AlreadyDead('{0} is already '
'dead.'.format(self.__class__.__name__)) | AttributeError | dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/network.py/Connection.notifyOnDeath |
def dontNotifyOnDeath(self, cb):
""" Method is used to unregister a callback which should have been
called when the connection died.
@param cb: Callback which should be unregistered.
@type cb: callable
"""
try:
self._cbs.remove(cb)
except __HOLE__:
pass | AttributeError | dataset/ETHPy150Open rapyuta/rce/rce-core/rce/core/network.py/Connection.dontNotifyOnDeath |
def tail_avg(timeseries):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][1] + timeseries[-2][1] + timeseries[-3][1]) / 3
return t
except __HOLE__:
return timeseries[-1][1] | IndexError | dataset/ETHPy150Open etsy/skyline/src/analyzer/algorithms.py/tail_avg |
@property
def _view(self):
'''
Returns the view that should receive any actions.
'''
view = None
try:
view = self.view
except AttributeError:
try:
view = self.window.active_view()
except __HOLE__:
raise AttributeError(
'ViCommandMixin must be used with a TextCommand or a WindowCommand class')
return view | AttributeError | dataset/ETHPy150Open guillermooo/Vintageous/vi/core.py/ViCommandMixin._view |
@property
def _window(self):
'''
Returns the view that should receive any actions.
'''
window = None
try:
window = self.window
except AttributeError:
try:
window = self.view.window()
except __HOLE__:
raise AttributeError(
'ViCommandMixin must be used with a TextCommand or a WindowCommand class')
return window | AttributeError | dataset/ETHPy150Open guillermooo/Vintageous/vi/core.py/ViCommandMixin._window |
def is_equal_to_old_sel(self, new_sel):
try:
return (tuple((s.a, s.b) for s in self.old_sel) ==
tuple((s.a, s.b) for s in tuple(self._view.sel())))
except __HOLE__:
raise AttributeError('have you forgotten to call .save_sel()?') | AttributeError | dataset/ETHPy150Open guillermooo/Vintageous/vi/core.py/ViCommandMixin.is_equal_to_old_sel |
@staticmethod
def _getInputs(collada, localscope, inputnodes):
try:
inputs = [(int(i.get('offset')), i.get('semantic'),
i.get('source'), i.get('set'))
for i in inputnodes]
except __HOLE__ as ex:
raise DaeMalformedError('Corrupted offsets in primitive')
return Primitive._getInputsFromList(collada, localscope, inputs) | ValueError | dataset/ETHPy150Open pycollada/pycollada/collada/primitive.py/Primitive._getInputs |
def __str__(self):
try:
return self.__bytes__()
except __HOLE__:
return self.__unicode__().encode('utf-8') | AttributeError | dataset/ETHPy150Open openstack/taskflow/taskflow/utils/mixins.py/StrMixin.__str__ |
def scan(self, **kwargs):
"""Implemnets the scan method in BasePlugin class.
.. note::
In this module, mac addesses were retrieved by ssh.
"""
try:
user = self.credential['username']
pwd = self.credential['password']
except __HOLE__:
logging.error("Cannot find username and password in credential")
return None
cmd = ("BRIDGES=$(ovs-vsctl show |grep Bridge |cut -f 2 -d '\"');"
"for br in $BRIDGES; do"
"PORTS=$(ovs-ofctl show $br |grep addr |cut -f 1 -d ':' "
"|egrep -v 'eth|wlan|LOCAL'|awk -F '(' '{print $1}');"
"for port in $PORTS; do"
"RESULT=$(ovs-appctl fdb/show $br |"
"awk '$1 == '$port' {print $1" "$2" "$3}');"
"echo '$RESULT'"
"done;"
"done;")
output = None
try:
output = utils.ssh_remote_execute(self.host, user, pwd, cmd)
except Exception as error:
logging.exception(error)
return None
logging.debug("[scan][output] output is %s", output)
if not output:
return None
fields_arr = ['port', 'vlan', 'mac']
result = []
for line in output:
if not line or line == '\n':
continue
values_arr = line.split()
temp = {}
for field, value in zip(fields_arr, values_arr):
temp[field] = value
result.append(temp.copy())
return result | KeyError | dataset/ETHPy150Open openstack/compass-core/compass/hdsdiscovery/vendors/ovswitch/plugins/mac.py/Mac.scan |
def __getattr__(self, name):
try:
return self.fields[name]
except __HOLE__:
raise AttributeError(name) | KeyError | dataset/ETHPy150Open ARM-software/workload-automation/wlauto/utils/trace_cmd.py/TraceCmdEvent.__getattr__ |
def __getattr__(self, name):
try:
return self.fields[name]
except __HOLE__:
raise AttributeError(name) | KeyError | dataset/ETHPy150Open ARM-software/workload-automation/wlauto/utils/trace_cmd.py/DroppedEventsEvent.__getattr__ |
def try_convert_to_numeric(v):
try:
if isiterable(v):
return map(numeric, v)
else:
return numeric(v)
except __HOLE__:
return v | ValueError | dataset/ETHPy150Open ARM-software/workload-automation/wlauto/utils/trace_cmd.py/try_convert_to_numeric |
def default_body_parser(event, text):
"""
Default parser to attempt to use to parser body text for the event (i.e. after
the "header" common to all events has been parsed). This assumes that the body is
a whitespace-separated list of key=value pairs. The parser will attempt to convert
the value into a numeric type, and failing that, keep it as string.
"""
parts = [e.rsplit(' ', 1) for e in text.strip().split('=')]
parts = [p.strip() for p in chain.from_iterable(parts)]
if not len(parts) % 2:
i = iter(parts)
for k, v in zip(i, i):
try:
v = int(v)
except __HOLE__:
pass
event.fields[k] = v | ValueError | dataset/ETHPy150Open ARM-software/workload-automation/wlauto/utils/trace_cmd.py/default_body_parser |
def regex_body_parser(regex, flags=0):
"""
Creates an event body parser form the specified regular expression (could be an
``re.RegexObject``, or a string). The regular expression should contain some named
groups, as those will be extracted as the event attributes (unnamed groups and the
reset of the match will be ignored).
If the specified regex is a string, it will be compiled, in which case ``flags`` may
be provided for the resulting regex object (see ``re`` standard module documentation).
If regex is a pre-compiled object, flags will be ignored.
"""
if isinstance(regex, basestring):
regex = re.compile(regex, flags)
def regex_parser_func(event, text):
match = regex.search(text)
if match:
for k, v in match.groupdict().iteritems():
try:
event.fields[k] = int(v)
except __HOLE__:
event.fields[k] = v
return regex_parser_func | ValueError | dataset/ETHPy150Open ARM-software/workload-automation/wlauto/utils/trace_cmd.py/regex_body_parser |
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: psl2map.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])
parser.add_option("--queries-tsv-file", dest="input_filename_queries", type="string",
help="fasta filename with queries - required for polyA analysis [%default].")
parser.add_option("--polyA", dest="polyA", action="store_true",
help="detect polyA tails [%default].")
parser.add_option("-p", "--output-filename-pattern", dest="output_filename_pattern", type="string",
help="OUTPUT filename with histogram information on aggregate coverages [%default].")
parser.add_option("--output-filename-empty", dest="output_filename_empty", type="string",
help="OUTPUT filename with queries for which all matches have been discarded [%default].")
parser.add_option("-o", "--output-format", dest="output_format", type="choice",
choices=("map", "psl"),
help="output format to choose [%default].")
parser.add_option("-z", "--from-zipped", dest="from_zipped", action="store_true",
help="input is zipped.")
parser.add_option("--threshold-min-pid", dest="threshold_min_pid", type="float",
help="minimum thresholds for pid [%default].")
parser.add_option("--threshold-min-matches", dest="threshold_min_matches", type="int",
help="minimum threshold for number of matching residues [%default].")
parser.add_option("--threshold-max-error-rate", dest="threshold_max_error_rate", type="float",
help="maximum threshold for error of aligned part [%default].")
parser.add_option("--threshold-good-query-coverage", dest="threshold_good_query_coverage", type="float",
help="minimum query coverage for segments to be counted as good [%default].")
parser.add_option("--threshold-min-query-coverage", dest="threshold_min_query_coverage", type="float",
help="minimum query coverage for segments to be accepted [%default].")
parser.add_option("--threshold-max-query-gapchars", dest="threshold_max_query_gapchars", type="int",
help="maximum number of gap characters in query[%default].")
parser.add_option("--threshold-max-query-gaps", dest="threshold_max_query_gaps", type="int",
help="maximum number of gaps in query[%default].")
parser.add_option("--threshold-max-sbjct-gapchars", dest="threshold_max_sbjct_gapchars", type="int",
help="maximum number of gap characters in sbjct[%default].")
parser.add_option("--keep-unique-matches", dest="keep_unique_matches", action="store_true",
help="ignore filters for unique matches [%default].")
parser.add_option("--keep-all-best", dest="keep_all_best", action="store_true",
help="when sorting matches, keep all matches within the collection threshold [%default].")
parser.add_option("--output-best-per-subject", dest="best_per_sbjct", action="store_true",
help="keep only the best entry per sbjct (for transcript mapping) [%default].")
parser.add_option("--threshold-max-sbjct-gaps", dest="threshold_max_sbjct_gaps", type="int",
help="maximum number of gaps in sbjct[%default].")
parser.add_option("--test", dest="test", type="int",
help="test - stop after # rows of parsing[%default].")
parser.add_option("-m", "--matching-mode", dest="matching_mode", type="choice",
choices=("best-coverage", "best-query-coverage", "best-sbjct-coverage",
"best-pid", "best-covpid", "best-query-covpid", "best-sbjct-covpid",
"best-min-covpid", "best-query-min-covpid", "best-sbjct-min-covpid",
"unique", "all"),
help="determines how to selecte the best match [%default].")
parser.add_option("--subjctfilter-tsv-file", dest="filename_filter_sbjct", type="string",
help="gff file for filtering sbjct matches. Matches overlapping these regions are discarded, but see --keep-forbidden [%default].")
parser.add_option("--keep-forbidden", dest="keep_forbidden", action="store_true",
help="if set, keep only matches that overlap the regions supplied with --subjctfilter-tsv-file [%default].")
parser.add_option("--query-forward-coordinates", dest="query_forward_coordinates", action="store_true",
help="use forward coordinates for query, strand will refer to sbjct [%default].")
parser.add_option("--ignore-all-random", dest="ignore_all_random", action="store_true",
help="if there are multiple best matches, ignore all those to chrUn and _random [%default].")
parser.add_option("--collection-threshold", dest="collection_threshold", type="float",
help="threshold for collecting matches, percent of best score [%default].")
parser.add_option("--collection-distance", dest="collection_distance", type="float",
help="threshold for collecting matches, difference to best score [%default].")
parser.set_defaults(input_filename_domains=None,
input_filename_queries=None,
threshold_good_query_coverage=90.0,
threshold_min_pid=30.0,
threshold_min_matches=0,
threshold_max_error_rate=None,
output_filename_pattern="%s",
keep_unique_matches=False,
output_format="map",
print_matched=["full", "partial", "good"],
from_zipped=False,
combine_overlaps=True,
min_length_domain=30,
threshold_min_query_coverage=50,
min_length_singletons=30,
new_family_id=10000000,
add_singletons=False,
matching_mode="best-coverage",
best_per_sbjct=False,
threshold_max_query_gapchars=None,
threshold_max_query_gaps=None,
threshold_max_sbjct_gapchars=None,
threshold_max_sbjct_gaps=None,
filename_filter_sbjct=None,
keep_forbidden=False,
keep_all_best=False,
test=None,
query_forward_coordinates=False,
output_filename_empty=None,
collection_threshold=1.0,
collection_distance=0,
polyA=False,
# max residues missing from non polyA end
polyA_max_unaligned=3,
# min residues in tail
polyA_min_unaligned=10,
# min percent residues that are A/T in tail
polyA_min_percent=70.0,
# ignore duplicate matches if they are on Un or
# _random
ignore_all_random=False,
)
(options, args) = E.Start(parser, add_pipe_options=True)
if len(args) == 1:
if options.from_zipped or args[0][-3:] == ".gz":
import gzip
infile = gzip.open(args[0], "r")
else:
infile = open(args[0], "r")
else:
infile = sys.stdin
if options.input_filename_queries:
queries_fasta = IndexedFasta.IndexedFasta(
options.input_filename_queries)
else:
queries_fasta = None
if options.filename_filter_sbjct:
try:
import bx.intervals.intersection
except __HOLE__:
raise "filtering for intervals requires the bx tools."
intervals = GTF.readGFFFromFileAsIntervals(
open(options.filename_filter_sbjct, "r"))
intersectors = {}
for contig, values in intervals.items():
intersector = bx.intervals.intersection.Intersecter()
for start, end in values:
intersector.add_interval(bx.intervals.Interval(start, end))
intersectors[contig] = intersector
if options.loglevel >= 1:
options.stdlog.write("# read %i intervals for %i contigs.\n" %
(sum([len(x) for x in intervals.values()]),
len(intersectors)))
else:
intersectors = None
################################################
################################################
################################################
# processing of a chunk (matches of same query)
################################################
ninput, noutput, nskipped = 0, 0, 0
# number of sequences with full/partial/good matches
nfull_matches, npartial_matches, ngood_matches = 0, 0, 0
# number of sequences which are fully/good/partially matched
# i.e., after combining all aligned regions
nfully_matched, npartially_matched, nwell_matched = 0, 0, 0
nremoved_pid, nremoved_query_coverage, nempty = 0, 0, 0
nremoved_gaps, nremoved_nmatches = 0, 0
nremoved_regions = 0
nqueries_removed_region = 0
aggregate_coverages = []
mapped_coverages = []
fully_matched = []
well_matched = []
partially_matched = []
new_family_id = options.new_family_id
if options.output_filename_empty:
outfile_empty = open(options.output_filename_empty, "w")
outfile_empty.write("read_id\tcomment\n")
else:
outfile_empty = None
if options.polyA:
options.outfile_polyA = open(
options.output_filename_pattern % "polyA", "w")
options.outfile_polyA.write("query_id\tstart\tend\tpA+N\tpT+N\ttail\n")
def processChunk(query_id, matches):
"""process a set of matches from query_id"""
global ninput, noutput, nskipped
global nfull_matches, npartial_matches, ngood_matches
global nremoved_pid, nremoved_query_coverage, nempty, nremoved_gaps, nremoved_nmatches
global nremoved_regions, nqueries_removed_region
global outfile_empty
ninput += 1
full_matches = []
good_matches = []
partial_matches = []
x_nremoved_pid, x_nquery_coverage, x_nremoved_gaps, x_nremoved_nmatches = 0, 0, 0, 0
nmatches = len(matches)
new_matches = []
# absolute filters applicable to non-fragmentory matches
for match in matches:
if match.mPid < options.threshold_min_pid:
nremoved_pid += 1
continue
if match.mNMatches < options.threshold_min_matches:
nremoved_nmatches += 1
continue
if options.threshold_max_error_rate:
r = 100.0 * \
math.power(
options.threshold_max_error_rate, match.mNMatches + match.mNMismatches)
if match.mPid < r:
nremoved_pid += 1
x_nremoved_pid += 1
continue
new_matches.append(match)
matches = new_matches
# filter matches
if len(matches) == 0:
if outfile_empty:
outfile_empty.write("%s\tall matches removed after applying thresholds: before=%i, npid=%i, nqcoverage=%i, ngaps=%i, nmatches=%i\n" %
(query_id, nmatches, x_nremoved_pid, x_nquery_coverage, x_nremoved_gaps, x_nremoved_nmatches))
nskipped += 1
return
if options.keep_unique_matches and len(matches) == 1:
pass
else:
new_matches = []
for match in matches:
if match.mQueryCoverage < options.threshold_min_query_coverage:
nremoved_query_coverage += 1
x_nquery_coverage += 1
continue
if options.threshold_max_query_gaps and options.threshold_max_query_gaps > match.mQueryNGapsCounts:
nremoved_gaps += 1
x_nremoved_gaps += 1
continue
if options.threshold_max_query_gapchars and options.threshold_max_query_gapchars > match.mQueryNGapsBases:
nremoved_gaps += 1
x_nremoved_gaps += 1
continue
if options.threshold_max_sbjct_gaps and options.threshold_max_sbjct_gaps > match.mSbjctNGapsCounts:
nremoved_gaps += 1
x_nremoved_gaps += 1
continue
if options.threshold_max_sbjct_gapchars and options.threshold_max_sbjct_gapchars > match.mSbjctNGapsBases:
nremoved_gaps += 1
x_nremoved_gaps += 1
continue
new_matches.append(match)
matches = new_matches
if len(matches) == 0:
if outfile_empty:
outfile_empty.write("%s\tall matches removed after applying thresholds: before=%i, npid=%i, nqcoverage=%i, ngaps=%i, nmatches=%i\n" %
(query_id, nmatches, x_nremoved_pid, x_nquery_coverage, x_nremoved_gaps, x_nremoved_nmatches))
nskipped += 1
return
# Remove queries matching to a forbidden region. This section
# will remove the full query if any of its matches matches in a
# forbidden region.
keep = True
for match in matches:
if intersectors and match.mSbjctId in intersectors:
found = intersectors[match.mSbjctId].find(
match.mSbjctFrom, match.mSbjctTo)
if found and not options.keep_forbidden or (found and not options.keep_forbidden):
nremoved_regions += 1
keep = False
continue
if not keep:
nqueries_removed_region += 1
if outfile_empty:
outfile_empty.write(
"%s\toverlap with forbidden region\n" % query_id)
return
# check for full length matches
for match in matches:
if match.mQueryCoverage >= 99.9:
full_matches.append(match)
if match.mQueryCoverage > options.threshold_good_query_coverage:
good_matches.append(match)
else:
partial_matches.append(match)
if full_matches:
nfull_matches += 1
elif good_matches:
ngood_matches += 1
elif partial_matches:
npartial_matches += 1
# compute coverage of sequence with matches
intervals = []
for match in full_matches + good_matches + partial_matches:
intervals.append((match.mQueryFrom, match.mQueryTo))
rest = Intervals.complement(intervals, 0, match.mQueryLength)
query_coverage = 100.0 * \
(match.mQueryLength -
sum(map(lambda x: x[1] - x[0], rest))) / match.mQueryLength
if query_coverage >= 99.9:
fully_matched.append(query_id)
elif query_coverage > options.threshold_good_query_coverage:
well_matched.append(query_id)
else:
partially_matched.append(query_id)
aggregate_coverages.append(query_coverage)
# select matches to output
matches, msg = selectMatches(query_id, matches, options, queries_fasta)
if len(matches) > 0:
for match in matches:
if options.query_forward_coordinates:
match.convertCoordinates()
if options.output_format == "map":
options.stdout.write("%s\n" %
"\t".join(map(str, (
match.mQueryId, match.mSbjctId,
match.strand,
"%5.2f" % match.mQueryCoverage,
"%5.2f" % match.mSbjctCoverage,
"%5.2f" % match.mPid,
match.mQueryLength,
match.mSbjctLength,
match.mQueryFrom, match.mQueryTo,
match.mSbjctFrom, match.mSbjctTo,
",".join(
map(str, match.mBlockSizes)),
",".join(
map(str, match.mQueryBlockStarts)),
",".join(
map(str, match.mSbjctBlockStarts)),
))))
elif options.output_format == "psl":
options.stdout.write(str(match) + "\n")
noutput += 1
else:
if outfile_empty:
outfile_empty.write(
"%s\tno matches selected: %s\n" % (query_id, msg))
nempty += 1
if options.output_format == "map":
options.stdout.write("\t".join(("query_id", "sbjct_id", "sstrand", "qcoverage", "scoverage",
"pid", "qlen", "slen", "qfrom", "qto", "sfrom", "sto", "blocks", "qstarts", "sstarts")) + "\n")
elif options.output_format == "psl":
options.stdout.write(Blat.Match().getHeader() + "\n")
################################################
################################################
################################################
# main loop
################################################
nfully_covered = None
matches = []
last_query_id = None
is_complete = True
ninput_lines = 0
skip = 0
iterator = Blat.BlatIterator(infile)
while 1:
try:
match = iterator.next()
except Blat.ParsingError:
iterator = Blat.BlatIterator(infile)
continue
if match is None:
break
ninput_lines += 1
if options.test and ninput_lines > options.test:
break
if match.mQueryId != last_query_id:
if last_query_id:
processChunk(last_query_id, matches)
matches = []
last_query_id = match.mQueryId
matches.append(match)
processChunk(last_query_id, matches)
printHistogram(aggregate_coverages, "aggregate", options)
printHistogram(mapped_coverages, "mapped", options)
if "full" in options.print_matched:
printMatched(fully_matched, "full", options)
if "good" in options.print_matched:
printMatched(well_matched, "good", options)
if "partial" in options.print_matched:
printMatched(partially_matched, "partial", options)
if options.loglevel >= 1:
options.stdlog.write(
"# alignments: ninput=%i, is_complete=%s\n" % (ninput_lines, str(is_complete)))
options.stdlog.write(
"# queries: ninput=%i, noutput=%i\n" % (ninput, noutput))
options.stdlog.write("# individual coverage: full=%i, good=%i, partial=%i\n" % (
nfull_matches, ngood_matches, npartial_matches))
options.stdlog.write("# aggregate coverage: full=%i, good=%i, partial=%i\n" % (
len(fully_matched), len(well_matched), len(partially_matched)))
options.stdlog.write("# omitted queries: total=%i, thresholds=%i, regions=%i, selection=%i\n" %
(nskipped + nqueries_removed_region + nempty,
nskipped, nqueries_removed_region, nempty))
options.stdlog.write("# omitted matches: pid=%i, query_coverage=%i, gaps=%i, regions=%i, nmatches=%i\n" % (
nremoved_pid, nremoved_query_coverage, nremoved_gaps, nremoved_regions, nremoved_nmatches))
E.Stop() | ImportError | dataset/ETHPy150Open CGATOxford/cgat/scripts/psl2map.py/main |
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-c", "--create", dest="create", type="string",
help="create substitution list [%default] ")
parser.add_option("-r", "--regex-token", dest="regex_token", type="string",
help="regular expression for tokens (has to create one pair of brackets) [%default] ")
parser.add_option("-p", "--pattern-sub", dest="pattern_sub", type="string",
help="pattern for substitution [%default] ")
parser.add_option("-a", "--map-tsv-file", dest="apply", type="string",
help="apply substitution list [%default] ")
parser.add_option("-x", "--extended", dest="extended", action="store_true",
help="replace not just with second column in map, but all columns. [%default] ")
parser.add_option("-i", "--invert", dest="invert", action="store_true",
help="pairs of substitution patterns is to,from [%default] ")
parser.add_option("-m", "--multiple", dest="multiple", action="store_true",
help="do multiple substitutions per row [%default] ")
parser.add_option("-e", "--echo", dest="echo", action="store_true",
help="echo susbstituted column [%default] ")
parser.add_option("-k", "--keep", dest="keep", action="store_true",
help="keep column that is substituted [%default] ")
parser.add_option("-f", "--method=filter --filter-method", dest="filter", action="store_true",
help="remove lines not matching [%default] ")
parser.add_option("-y", "--reverse-filter", dest="reverse_filter", action="store_true",
help="remove lines matching [%default] ")
parser.add_option("-n", "--inplace", dest="inplace", action="store_true",
help="do inplace subsitutions of all files on command line [%default] ")
parser.add_option("-b", "--backup", dest="backup", action="store_true",
help="keep backup (with ending .bak) [%default] ")
parser.add_option("--keep-header", dest="keep_header", action="store_true",
help="do not apply transformation to header [%default] ")
parser.add_option("-o", "--columns-token", dest="columns_token", type="string",
help="substitute tokens in columns [%default] ")
parser.add_option("-s", "--select-rows", dest="regex_rows", type="string",
help="regular expression for rows to use. [%default] ")
parser.set_defaults(create=None,
regex_token=None,
pattern_sub="%s",
apply=None,
invert=False,
multiple=False,
columns_token=None,
filter=None,
reverse_filter=None,
inplace=False,
backup=False,
regex_rows=None,
echo=False,
extended=False,
keep=False,
keep_header=False)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if options.regex_token:
options.regex_token = re.compile(options.regex_token)
if options.regex_rows:
options.regex_rows = re.compile(options.regex_rows)
if options.columns_token:
if options.columns_token != "all":
options.columns_token = map(
lambda x: int(x) - 1, string.split(options.columns_token, ","))
file_id = 0
keys = {}
if options.apply:
infile = IOTools.openFile(options.apply, "r")
for line in infile:
if line[0] == "#":
continue
d = line[:-1].split("\t")
try:
a, b = d[:2]
except __HOLE__:
print "# invalid map skipped in line: %s" % line
continue
if options.invert:
a, b = b, a
if options.extended:
b = "\t".join(d[0] + d[2:])
else:
if options.extended:
b = "\t".join(d[1:])
if not keys.has_key(a):
keys[a] = []
if options.keep:
b = a + "\t" + b
keys[a].append(b)
files = args
if not options.inplace and len(args) == 0:
files = ["-"]
for file in files:
close_infile = False
close_outfile = False
if file == "-":
infile = sys.stdin
outfile = sys.stdout
else:
if options.inplace:
os.rename(file, file + ".bak")
infile = IOTools.openFile(file + ".bak", "r")
outfile = IOTools.openFile(file, "w")
close_infile = True
close_outfile = True
else:
infile = IOTools.openFile(file, "r")
outfile = sys.stdout
close_infile = True
first = True
for line in infile:
if line[0] == "#":
outfile.write(line)
continue
if first:
first = False
if options.keep_header:
outfile.write(line)
continue
if options.regex_rows:
if options.regex_rows.search(line):
outfile.write(line)
continue
new_lines = []
if options.regex_token:
r = options.regex_token.search(line[:-1])
while r:
key = r.group(1)
if key not in keys:
if options.create:
keys[key] = [options.pattern_sub % str(len(keys))]
else:
new_lines.append(line[:-1])
break
for k in keys[key]:
new_lines.append(
line[:r.start(1)] + k + line[r.end(1):-1])
if options.multiple:
r = options.regex_token.search(line[r.end(1):-1])
else:
break
else:
if not options.filter:
new_lines.append(line[:-1])
elif options.columns_token:
data = line[:-1].split("\t")
if options.columns_token == "all":
columns = range(len(data))
else:
columns = options.columns_token
keep = not options.reverse_filter
first_multiple = True
for c in columns:
k = data[c]
if k in keys:
if len(keys[k]) > 1:
if not first_multiple:
raise "warning: could not substitute multiple keys for %s in multiple columns in line: %s" % (
k, line)
first_multiple = False
for v in keys[k]:
if options.echo:
data.append(data[c])
# multiple substitutions: write data now
data[c] = v
if keep:
new_lines.append(string.join(data, "\t"))
keep = False
else:
if options.create:
keys[k] = [options.pattern_sub % str(len(keys))]
data[c] = keys[k][0]
elif options.filter:
keep = False
elif options.reverse_filter:
keep = True
if keep:
new_lines.append(string.join(data, "\t"))
elif options.apply:
for key in keys:
for k in keys[key]:
line = line.replace(key, k)
new_lines.append(line[:-1])
if new_lines:
outfile.write(string.join(new_lines, "\n") + "\n")
if options.create:
create_file = IOTools.openFile(options.create, "w")
for key in keys:
for k in keys[key]:
create_file.write("%s\t%s\n" % (key, str(k)))
create_file.close()
if close_outfile:
outfile.close()
if close_infile:
infile.close()
if options.inplace and not options.backup:
os.remove(file + ".bak")
E.Stop() | ValueError | dataset/ETHPy150Open CGATOxford/cgat/scripts/substitute_tokens.py/main |
def model_choices():
try:
return tuple(sorted([
(u"{0}.{1}".format(app._meta.app_label, app._meta.object_name),
u"{0} - {1}".format(app._meta.app_label, app._meta.object_name))
for app in models.get_models() if 'opps.' in app.__module__]))
except __HOLE__:
return tuple([]) | ImportError | dataset/ETHPy150Open opps/opps/opps/core/forms.py/model_choices |
def test_extend(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
try:
ak.extend(None, None)
except __HOLE__ as e:
if str(e) != ("Extending a 1D Akima interpolator is not "
"yet implemented"):
raise
except:
raise | NotImplementedError | dataset/ETHPy150Open scipy/scipy/scipy/interpolate/tests/test_interpolate.py/TestAkima1DInterpolator.test_extend |
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except __HOLE__:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname)) | ValueError | dataset/ETHPy150Open joeyb/joeyb-blog/externals/pygments/util.py/get_int_opt |
def shebang_matches(text, regex):
"""
Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except __HOLE__:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False | IndexError | dataset/ETHPy150Open joeyb/joeyb-blog/externals/pygments/util.py/shebang_matches |
def looks_like_xml(text):
"""
Check if a doctype exists or if we have some tags.
"""
key = hash(text)
try:
return _looks_like_xml_cache[key]
except __HOLE__:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv | KeyError | dataset/ETHPy150Open joeyb/joeyb-blog/externals/pygments/util.py/looks_like_xml |
def overall_stats_json(request):
"""Generate JSON containing overall data for use by graphs. The data can
be generated however, and just needs to be a series of x,y points. Any
number of graphs can be generated. The time range is specified as a GET
parameter indicating the number of minutes before now.
The response is a list of graph definitions as dictionaries, and a list of
annotations, and looks something like:
{
'graphs': [
{
'title' : 'Graph Title',
'stacked' : True,
'series' : [
{
'label': 'series-1',
'data': [ [x1,y1], [x2,y2], ... ]
},
...
]
},
...
],
'annotations': [
{
'x': 1332009326000,
'label': 'Some event happened'
},
...
]
}
To add a graph, generate the needed list(s) of [x,y] points and include that
list in a structure with title and label information similar that described
above. Then append that "graph definition" to the list of graphs to be
displayed.
Note: dates should be sent as ms since epoch (unix time * 1000). Also,
annotations are applied to all of the graphs.
Currently, the data is Endpoint overall_bitrate and overall_clients,
aggregated as an average for each unique lastseen.
"""
graphs = []
annotations = []
DEFAULT_RANGE = 120 # minutes
# Get the requested time range for the data, in minutes.
view_range = request.GET.get('range', DEFAULT_RANGE)
# Ensure the range is an int, falling back to the default if it's not.
try:
view_range = int(view_range)
except __HOLE__:
view_range = DEFAULT_RANGE
range_start_datetime = datetime.datetime.now() - datetime.timedelta(minutes=view_range)
# Prepare the graphs to be sent back.
bitrate_graph = {
'title': 'Overall Bitrate',
'stacked': True,
'series': [],
}
client_graph = {
'title': 'Overall Clients',
'stacked': True,
'series': [],
}
# Retrieve endpoints from within the requested data range.
recent_endpoints = models.Endpoint.objects.filter(
lastseen__gte=range_start_datetime,
lastseen__lte=datetime.datetime.now()
).order_by('-lastseen')
# Assemble the data for each endpoint by group.
endpoints_by_group = {}
# Attributes that can be copied directly.
raw_attrs = ('overall_bitrate', 'overall_clients',)
for endpoint in recent_endpoints:
if endpoint.group not in endpoints_by_group:
endpoints_by_group[endpoint.group] = []
# Send time as a unix timestamp.
endpoint_data = {
'lastseen': int(endpoint.lastseen.strftime('%s')) * 1000,
}
for attr in raw_attrs:
endpoint_data[attr] = getattr(endpoint, attr)
endpoints_by_group[endpoint.group].append(endpoint_data)
for group, endpoints in endpoints_by_group.items():
bitrate_data = []
client_data = []
for point in endpoints:
bitrate_data.append([point['lastseen'], point['overall_bitrate'] / (1000000)])
client_data.append([point['lastseen'], point['overall_clients']])
bitrate_graph['series'].append({
'label': group,
'data': bitrate_data,
})
client_graph['series'].append({
'label': group,
'data': client_data,
})
graphs.append(bitrate_graph)
graphs.append(client_graph)
# SAMPLE GRAPH AND ANNOTATION GENERATION
# Uncomment these to see sample graphs and annotations using data generated
# based on the current time.
# Graphs:
# now = datetime.datetime.now()
# graphs.append({
# 'title': 'Test graph',
# 'stacked': True,
# 'series': [{
# 'label': 'series-' + str(i),
# 'data': [[int((now - datetime.timedelta(minutes=j)).strftime('%s')) * 1000,random.randint(1,11)] for j in range(200)]
# } for i in range(5)]
# })
# Annotations:
# annotations.append({
# 'x': int((datetime.datetime.now() - datetime.timedelta(minutes=12)).strftime('%s')) * 1000,
# 'label': 'Chow!'
# })
# Send the data back as JSON data.
response = http.HttpResponse(content_type='application/json')
response.write(json.dumps({
'graphs': graphs,
'annotations': annotations,
}))
return response
###########################################################################################
# Code which collects the client side system reporting.
########################################################################################### | ValueError | dataset/ETHPy150Open timvideos/streaming-system/website/tracker/views.py/overall_stats_json |
@csrf_exempt
@never_cache
@transaction.atomic
def client_stats(request, group, _now=None):
"""
Save stats about a client.
Args:
request: Django request object.
group: Group to save stats about.
_now: A datetime.datetime object to pretend is "now". Used for testing
only.
Returns:
Django response object.
"""
response, group, user = client_common(request, group)
if response is not None:
return response
try:
data = json.loads(request.POST.get('data', "{}"))
except __HOLE__, e:
response = http.HttpResponse(content_type='application/javascript')
response.write(json.dumps({
'code': error.ERROR_JSON,
'error': 'Invalid JSON: %s' % e,
'next': -1,
}))
return response
data['user-agent'] = request.META['HTTP_USER_AGENT']
data['ip'] = request.META[settings.HTTP_REMOTE_ADDR_META]
if 'HTTP_REFERER' in request.META:
data['referrer'] = request.META['HTTP_REFERER']
# Save the stats
s = models.ClientStats(
group=group,
created_by=user)
if _now is not None:
s.created_on = _now
s.save()
s.from_dict(data)
# Return success
response = http.HttpResponse(content_type='application/javascript')
response.write(json.dumps({
'code': error.SUCCESS,
'next': 5,
}))
return response
###########################################################################################
# Code which collects the endpoint system reporting.
########################################################################################### | ValueError | dataset/ETHPy150Open timvideos/streaming-system/website/tracker/views.py/client_stats |
@csrf_exempt
@never_cache
def endpoint_logs(request):
"""Saves an endpoint server Apache logs."""
response, group, ip = endpoint_common(request)
if response is not None:
return response
# Take a lock on the file
while True:
logfile = file(os.path.join(CONFIG['config']['logdir'], "access-%s-%s.log" % (group, ip)), 'a')
try:
fcntl.lockf(logfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except __HOLE__:
time.sleep(1)
else:
break
# Write out the log lines
logfile.write(request.POST['data'])
logfile.flush()
# Unlock the file
fcntl.lockf(logfile, fcntl.LOCK_UN)
# Close the file
logfile.close()
# Write out that everything went okay
response = http.HttpResponse(content_type='text/plain')
response.write('OK\n')
return response
# Save some data about flumotion | IOError | dataset/ETHPy150Open timvideos/streaming-system/website/tracker/views.py/endpoint_logs |
@csrf_exempt
@never_cache
def flumotion_logging(request):
"""Saves the client's log files."""
response, group, ip = endpoint_common(request, check_group=False)
if response is not None:
return response
try:
data = json.loads(request.POST.get('data', "{}"))
except __HOLE__, e:
response = http.HttpResponse(content_type='text/plain')
response.write('ERROR %s' % e)
return response
s = models.Flumotion(
identifier=request.POST['identifier'],
recorded_time=request.POST['recorded_time'],
type=request.POST.get('type', ''),
ip=request.META[settings.HTTP_REMOTE_ADDR_META],
data=json.dumps(data),
)
s.save()
# Write out that everything went okay
response = http.HttpResponse(content_type='text/plain')
response.write('OK\n')
return response | ValueError | dataset/ETHPy150Open timvideos/streaming-system/website/tracker/views.py/flumotion_logging |
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except __HOLE__:
raise CommandError("Database inspection isn't supported for the currently selected database backend.") | NotImplementedError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/commands/inspectdb.py/Command.handle_noargs |
def handle_inspection(self, options):
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield ''
yield 'from %s import models' % self.db_module
yield ''
for table_name in connection.introspection.get_table_list(cursor):
yield 'class %s(models.Model):' % table2model(table_name)
try:
relations = connection.introspection.get_relations(cursor, table_name)
except __HOLE__:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
column_name = row[0]
att_name = column_name.lower()
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
# If the column name can't be used verbatim as a Python
# attribute, set the "db_column" for this Field.
if ' ' in att_name or '-' in att_name or keyword.iskeyword(att_name) or column_name != att_name:
extra_params['db_column'] = column_name
# Modify the field name to make it Python-compatible.
if ' ' in att_name:
att_name = att_name.replace(' ', '_')
comment_notes.append('Field renamed to remove spaces.')
if '-' in att_name:
att_name = att_name.replace('-', '_')
comment_notes.append('Field renamed to remove dashes.')
if column_name != att_name:
comment_notes.append('Field name made lowercase.')
if i in relations:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
field_type = 'ForeignKey(%s' % rel_to
if att_name.endswith('_id'):
att_name = att_name[:-3]
else:
extra_params['db_column'] = column_name
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
field_type += '('
if keyword.iskeyword(att_name):
att_name += '_field'
comment_notes.append('Field renamed because it was a Python reserved word.')
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(['%s=%r' % (k, v) for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line | NotImplementedError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/commands/inspectdb.py/Command.handle_inspection |
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except __HOLE__:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/core/management/commands/inspectdb.py/Command.get_field_type |
def weave(self, target, **kwargs):
try:
import aspectlib
except __HOLE__ as exc:
raise ImportError(exc.args, "Please install aspectlib or pytest-benchmark[aspect]")
def aspect(function):
def wrapper(*args, **kwargs):
return self(function, *args, **kwargs)
return wrapper
self._cleanup_callbacks.append(aspectlib.weave(target, aspect, **kwargs).rollback) | ImportError | dataset/ETHPy150Open ionelmc/pytest-benchmark/src/pytest_benchmark/fixture.py/BenchmarkFixture.weave |
def parse(self, path):
self.keys = {}
try:
with open(path, "r") as fp:
for line in fp:
line = line.strip()
if not line:
continue
if line.startswith("command="):
# TODO: This is a bit of a hack.. not sure what else could be here
# TODO: Do something with cmd? It'll get overwritten
line = line[line.find("ssh-"):]
l = line.split(' ')
cmd = None
if len(l) == 3:
keytype, key, name = l
else:
keytype, key = l
name = ""
self.keys[(keytype, key)] = name
except __HOLE__ as exc:
if exc.errno != 2: # No such file
raise | IOError | dataset/ETHPy150Open samuel/kokki/kokki/cookbooks/ssh/libraries/utils.py/SSHAuthorizedKeysFile.parse |
def remove_key(self, keytype, key):
try:
self.keys.pop((keytype, key))
except __HOLE__:
return False
return True | KeyError | dataset/ETHPy150Open samuel/kokki/kokki/cookbooks/ssh/libraries/utils.py/SSHAuthorizedKeysFile.remove_key |
def __call__(self, environ, start_response):
"""
Hijack the main loop from the original thread and listen on events on the Redis
and the Websocket filedescriptors.
"""
websocket = None
subscriber = self.Subscriber(self._redis_connection)
try:
self.assure_protocol_requirements(environ)
request = WSGIRequest(environ)
if callable(private_settings.WS4REDIS_PROCESS_REQUEST):
private_settings.WS4REDIS_PROCESS_REQUEST(request)
else:
self.process_request(request)
channels, echo_message = self.process_subscriptions(request)
if callable(private_settings.WS4REDIS_ALLOWED_CHANNELS):
channels = list(private_settings.WS4REDIS_ALLOWED_CHANNELS(request, channels))
elif private_settings.WS4REDIS_ALLOWED_CHANNELS is not None:
try:
mod, callback = private_settings.WS4REDIS_ALLOWED_CHANNELS.rsplit('.', 1)
callback = getattr(import_module(mod), callback, None)
if callable(callback):
channels = list(callback(request, channels))
except __HOLE__:
pass
websocket = self.upgrade_websocket(environ, start_response)
logger.debug('Subscribed to channels: {0}'.format(', '.join(channels)))
subscriber.set_pubsub_channels(request, channels)
websocket_fd = websocket.get_file_descriptor()
listening_fds = [websocket_fd]
redis_fd = subscriber.get_file_descriptor()
if redis_fd:
listening_fds.append(redis_fd)
subscriber.send_persited_messages(websocket)
recvmsg = None
while websocket and not websocket.closed:
ready = self.select(listening_fds, [], [], 4.0)[0]
if not ready:
# flush empty socket
websocket.flush()
for fd in ready:
if fd == websocket_fd:
recvmsg = RedisMessage(websocket.receive())
if recvmsg:
subscriber.publish_message(recvmsg)
elif fd == redis_fd:
sendmsg = RedisMessage(subscriber.parse_response())
if sendmsg and (echo_message or sendmsg != recvmsg):
websocket.send(sendmsg)
else:
logger.error('Invalid file descriptor: {0}'.format(fd))
# Check again that the websocket is closed before sending the heartbeat,
# because the websocket can closed previously in the loop.
if private_settings.WS4REDIS_HEARTBEAT and not websocket.closed:
websocket.send(private_settings.WS4REDIS_HEARTBEAT)
except WebSocketError as excpt:
logger.warning('WebSocketError: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponse(status=1001, content='Websocket Closed')
except UpgradeRequiredError as excpt:
logger.info('Websocket upgrade required')
response = http.HttpResponseBadRequest(status=426, content=excpt)
except HandshakeError as excpt:
logger.warning('HandshakeError: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponseBadRequest(content=excpt)
except PermissionDenied as excpt:
logger.warning('PermissionDenied: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponseForbidden(content=excpt)
except Exception as excpt:
logger.error('Other Exception: {}'.format(excpt), exc_info=sys.exc_info())
response = http.HttpResponseServerError(content=excpt)
else:
response = http.HttpResponse()
finally:
subscriber.release()
if websocket:
websocket.close(code=1001, message='Websocket Closed')
else:
logger.warning('Starting late response on websocket')
status_text = http_client.responses.get(response.status_code, 'UNKNOWN STATUS CODE')
status = '{0} {1}'.format(response.status_code, status_text)
headers = response._headers.values()
if six.PY3:
headers = list(headers)
start_response(force_str(status), headers)
logger.info('Finish non-websocket response with status code: {}'.format(response.status_code))
return response | AttributeError | dataset/ETHPy150Open jrief/django-websocket-redis/ws4redis/wsgi_server.py/WebsocketWSGIServer.__call__ |
def _read(self):
try:
profile_f = open(self.fname)
except __HOLE__:
return
for lineno, line in enumerate(profile_f):
line = line.strip()
if not line or line.startswith("#"):
continue
test_key, platform_key, counts = line.split()
per_fn = self.data[test_key]
per_platform = per_fn[platform_key]
c = [int(count) for count in counts.split(",")]
per_platform['counts'] = c
per_platform['lineno'] = lineno + 1
per_platform['current_count'] = 0
profile_f.close() | IOError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/testing/profiling.py/ProfileStatsFile._read |
def anchor(parser, token):
"""
Parses a tag that's supposed to be in this format: {% anchor field title %}
"""
bits = [b.strip('"\'') for b in token.split_contents()]
if len(bits) < 2:
raise TemplateSyntaxError, "anchor tag takes at least 1 argument"
try:
title = bits[2]
except __HOLE__:
title = bits[1].capitalize()
return SortAnchorNode(bits[1].strip(), title.strip()) | IndexError | dataset/ETHPy150Open directeur/django-sorting/django_sorting/templatetags/sorting_tags.py/anchor |
def convert_cells_to_floats(csv_cont):
for row in range(len(csv_cont)):
for cell in range(len(csv_cont[row])):
try:
csv_cont[row][cell] = float(csv_cont[row][cell])
except __HOLE__:
pass | ValueError | dataset/ETHPy150Open goyalsid/phageParser/cleanPhages.py/convert_cells_to_floats |
def load_known_hosts(self, filename=None):
"""Load host keys from an openssh :file:`known_hosts`-style file. Can
be called multiple times.
If *filename* is not specified, looks in the default locations i.e. :file:`~/.ssh/known_hosts` and :file:`~/ssh/known_hosts` for Windows.
"""
if filename is None:
filename = os.path.expanduser('~/.ssh/known_hosts')
try:
self._host_keys.load(filename)
except __HOLE__:
# for windows
filename = os.path.expanduser('~/ssh/known_hosts')
try:
self._host_keys.load(filename)
except IOError:
pass
else:
self._host_keys.load(filename) | IOError | dataset/ETHPy150Open ncclient/ncclient/ncclient/transport/ssh.py/SSHSession.load_known_hosts |
def _test_restorer(stack, data):
# We need to test the request's specific Registry. Initialize it here so we
# can use it later (RegistryManager will re-use one preexisting in the
# environ)
registry = Registry()
extra_environ={'paste.throw_errors': False,
'paste.registry': registry}
request_id = restorer.get_request_id(extra_environ)
app = TestApp(stack)
res = app.get('/', extra_environ=extra_environ, expect_errors=True)
# Ensure all the StackedObjectProxies are empty after the RegistryUsingApp
# raises an Exception
for stacked, proxied_obj, test_cleanup in data:
only_key = list(proxied_obj.keys())[0]
try:
assert only_key not in stacked
assert False
except __HOLE__:
# Definitely empty
pass
# Ensure the StackedObjectProxies & Registry 'work' in the simulated
# EvalException context
replace = {'replace': 'dict'}
new = {'new': 'object'}
restorer.restoration_begin(request_id)
try:
for stacked, proxied_obj, test_cleanup in data:
# Ensure our original data magically re-appears in this context
only_key, only_val = list(proxied_obj.items())[0]
assert only_key in stacked and stacked[only_key] == only_val
# Ensure the Registry still works
registry.prepare()
registry.register(stacked, new)
assert 'new' in stacked and stacked['new'] == 'object'
registry.cleanup()
# Back to the original (pre-prepare())
assert only_key in stacked and stacked[only_key] == only_val
registry.replace(stacked, replace)
assert 'replace' in stacked and stacked['replace'] == 'dict'
if test_cleanup:
registry.cleanup()
try:
stacked._current_obj()
assert False
except TypeError:
# Definitely empty
pass
finally:
restorer.restoration_end() | TypeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/tests/test_registry.py/_test_restorer |
def clean_addon(self):
addons = []
for a in self.data.getlist('addon'):
try:
addons.append(int(a))
except __HOLE__:
pass
return addons | ValueError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/bandwagon/forms.py/AddonsForm.clean_addon |
def clean_addon_comment(self):
fields = 'addon', 'addon_comment'
rv = {}
for addon, comment in zip(*map(self.data.getlist, fields)):
try:
rv[int(addon)] = comment
except __HOLE__:
pass
return rv | ValueError | dataset/ETHPy150Open mozilla/addons-server/src/olympia/bandwagon/forms.py/AddonsForm.clean_addon_comment |
def make_KeyPress(char,state,keycode):
control = (state & (4+8)) != 0
meta = (state & (1+2)) != 0
shift = (state & 0x10) != 0
if control and char !="\x00":
char = chr(VkKeyScan(ord(char)) & 0xff)
elif control:
char=chr(keycode)
try:
keyname=code2sym_map[keycode]
except __HOLE__:
keyname=""
return KeyPress(char,shift,control,meta,keyname) | KeyError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/pyreadline/keysyms/keysyms.py/make_KeyPress |
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (__HOLE__, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
if not six.PY3:
default_locale = locale.getdefaultlocale()[1]
if not default_locale:
return ''
try:
result = result.decode(default_locale)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ''
return result | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/auth/management/__init__.py/get_system_username |
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = unicodedata.normalize('NFKD', default_username)\
.encode('ascii', 'ignore').decode('ascii').replace(' ', '').lower()
except __HOLE__:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username | UnicodeDecodeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/auth/management/__init__.py/get_default_username |
def __del__(self):
# remove file for memmap
if hasattr(self, '_data') and hasattr(self._data, 'filename'):
# First, close the file out; happens automatically on del
filename = self._data.filename
del self._data
# Now file can be removed
try:
os.remove(filename)
except __HOLE__:
pass # ignore file that no longer exists | OSError | dataset/ETHPy150Open mne-tools/mne-python/mne/io/base.py/_BaseRaw.__del__ |
@classmethod
def crawl_local(cls, link):
try:
dirents = os.listdir(link.local_path)
except __HOLE__ as e:
TRACER.log('Failed to read %s: %s' % (link.local_path, e), V=1)
return set(), set()
files, dirs = partition([os.path.join(link.local_path, fn) for fn in dirents], os.path.isdir)
return set(map(Link.from_filename, files)), set(map(Link.from_filename, dirs)) | OSError | dataset/ETHPy150Open pantsbuild/pex/pex/crawler.py/Crawler.crawl_local |
def _is_redirect(response):
try:
# 2.0.0 <= requests <= 2.2
return response.is_redirect
except __HOLE__:
# requests > 2.2
return (
# use request.sessions conditional
response.status_code in REDIRECT_STATI and
'location' in response.headers
) | AttributeError | dataset/ETHPy150Open getsentry/responses/responses.py/_is_redirect |
def _on_request(self, adapter, request, **kwargs):
match = self._find_match(request)
# TODO(dcramer): find the correct class for this
if match is None:
error_msg = 'Connection refused: {0} {1}'.format(request.method,
request.url)
response = ConnectionError(error_msg)
response.request = request
self._calls.add(request, response)
raise response
if 'body' in match and isinstance(match['body'], Exception):
self._calls.add(request, match['body'])
raise match['body']
headers = {
'Content-Type': match['content_type'],
}
if 'callback' in match: # use callback
status, r_headers, body = match['callback'](request)
if isinstance(body, six.text_type):
body = body.encode('utf-8')
body = BufferIO(body)
headers.update(r_headers)
elif 'body' in match:
if match['adding_headers']:
headers.update(match['adding_headers'])
status = match['status']
body = BufferIO(match['body'])
response = HTTPResponse(
status=status,
body=body,
headers=headers,
preload_content=False,
)
response = adapter.build_response(request, response)
if not match.get('stream'):
response.content # NOQA
try:
resp_cookies = Cookies.from_request(response.headers['set-cookie'])
response.cookies = cookiejar_from_dict(dict(
(v.name, v.value)
for _, v
in resp_cookies.items()
))
except (KeyError, __HOLE__):
pass
self._calls.add(request, response)
return response | TypeError | dataset/ETHPy150Open getsentry/responses/responses.py/RequestsMock._on_request |
def start(self):
try:
from unittest import mock
except __HOLE__:
import mock
def unbound_on_send(adapter, request, *a, **kwargs):
return self._on_request(adapter, request, *a, **kwargs)
self._patcher = mock.patch('requests.adapters.HTTPAdapter.send',
unbound_on_send)
self._patcher.start() | ImportError | dataset/ETHPy150Open getsentry/responses/responses.py/RequestsMock.start |
def createRequirement(self, name, manPath=None):
if manPath is not None:
reqpath = os.path.join(manPath, name)
else:
reqpath = os.path.join(tempfile.TemporaryDirectory().name, name)
try:
os.makedirs(os.path.join(reqpath, "class"))
except __HOLE__ as e:
pass
self.writeFile(os.path.join(reqpath, "class"), "Base.js", ";")
ruquirement = ("""
- source: %s
config:
name: %s""" % (reqpath, name))
return ruquirement | OSError | dataset/ETHPy150Open zynga/jasy/jasy/test/session.py/Tests.createRequirement |
def update_dataset_record(dataset_dir, db_cursor, refresh=True, debug=False):
if debug:
console_handler.setLevel(logging.DEBUG)
logger.debug('update_dataset_record(dataset_dir=%s, db_cursor=%s, refresh=%s, debug=%s) called', dataset_dir, db_cursor, refresh, debug)
def get_directory_size(directory):
command = "du -sk %s | cut -f1" % directory
logger.debug('executing "%s"', command)
result = execute(command)
assert not result['returncode'], '"%s" failed: %s' % (command, result['stderr'])
logger.debug('stdout = %s', result['stdout'])
return int(result['stdout'])
dataset_dir = os.path.abspath(dataset_dir)
dataset = SceneDataset(default_metadata_required=False, utm_fix=True)
assert dataset.Open(dataset_dir), 'Unable to open %s' % dataset_dir
dataset_size = get_directory_size(dataset_dir)
gcp_count = None
mtl_text = None
if dataset.processor_level.upper() in ['ORTHO', 'L1T', 'MAP']:
logger.debug('Dataset %s is Level 1', dataset_dir)
try:
gcp_path = glob(os.path.join(dataset_dir, 'scene01', '*_GCP.txt'))[0]
gcp_file = open(gcp_path)
# Count the number of lines consisting of 8 numbers with the first number being positive
gcp_count = len([line for line in gcp_file.readlines() if re.match('\d+(\s+-?\d+\.?\d*){7}', line)])
gcp_file.close()
except IndexError: # No GCP file exists
logger.debug('No GCP.txt file found')
try:
mtl_path = glob(os.path.join(dataset_dir, 'scene01', '*_MTL.txt'))[0]
mtl_file = open(mtl_path)
mtl_text = mtl_file.read()
mtl_file.close()
except IndexError: # No MTL file exists
logger.debug('No MTL.txt file found')
try:
xml_path = glob(os.path.join(dataset_dir, 'metadata.xml'))[0]
xml_file = open(xml_path)
xml_text = xml_file.read()
xml_file.close()
except __HOLE__: # No XML file exists
logger.debug('No metadata.xml file found')
xml_text = None
sql = """-- Find dataset_id and acquisition_id for given path
select dataset_id, acquisition_id
from dataset
inner join acquisition using(acquisition_id)
where dataset_path = %s
"""
db_cursor.execute(sql, (dataset_dir,))
result = db_cursor.fetchone()
if result: # Record already exists
if refresh:
logger.info('Updating existing record for %s', dataset_dir)
dataset_id = result[0]
acquisition_id = result[1]
sql = """
insert into processing_level(level_id, level_name)
select nextval('level_id_seq'), upper(%(level_name)s)
where not exists (select level_id from processing_level where level_name = upper(%(level_name)s));
-- Update existing acquisition record if required
update acquisition
set gcp_count = %(gcp_count)s
where acquisition_id = %(acquisition_id)s
and %(gcp_count)s is not null;
update acquisition
set mtl_text = %(mtl_text)s
where acquisition_id = %(acquisition_id)s
and %(mtl_text)s is not null;
update acquisition
set cloud_cover = %(cloud_cover)s
where acquisition_id = %(acquisition_id)s
and %(cloud_cover)s is not null;
update dataset
set level_id = (select level_id from processing_level where upper(level_name) = upper(%(processing_level)s)),
datetime_processed = %(datetime_processed)s,
dataset_size = %(dataset_size)s,
crs = %(crs)s,
ll_x = %(ll_x)s,
ll_y = %(ll_y)s,
lr_x = %(lr_x)s,
lr_y = %(lr_y)s,
ul_x = %(ul_x)s,
ul_y = %(ul_y)s,
ur_x = %(ur_x)s,
ur_y = %(ur_y)s,
x_pixels = %(x_pixels)s,
y_pixels = %(y_pixels)s,
xml_text = %(xml_text)s
where dataset_id = %(dataset_id)s
"""
else:
logger.info('Skipping existing record for %s', dataset_dir)
return
else: # Record doesn't already exist
logger.info('Creating new record for %s', dataset_dir)
dataset_id = None
acquisition_id = None
sql = """-- Create new processing level record if needed
insert into processing_level(level_id, level_name)
select nextval('level_id_seq'), upper(%(level_name)s)
where not exists (select level_id from processing_level where level_name = upper(%(level_name)s));
-- Create new acquisition record if needed
insert into acquisition(
acquisition_id,
satellite_id,
sensor_id,
x_ref,
y_ref,
start_datetime,
end_datetime,
ll_lon,
ll_lat,
lr_lon,
lr_lat,
ul_lon,
ul_lat,
ur_lon,
ur_lat"""
if gcp_count is not None:
sql += """,
gcp_count"""
if mtl_text is not None:
sql += """,
mtl_text"""
sql += """
)
select
nextval('acquisition_id_seq'),
(select satellite_id from satellite where upper(satellite_tag) = upper(%(satellite_tag)s)),
(select sensor_id from sensor inner join satellite using(satellite_id)
where upper(satellite_tag) = upper(%(satellite_tag)s) and upper(sensor_name) = upper(%(sensor_name)s)),
%(x_ref)s,
%(y_ref)s,
%(start_datetime)s,
%(end_datetime)s,
%(ll_lon)s,
%(ll_lat)s,
%(lr_lon)s,
%(lr_lat)s,
%(ul_lon)s,
%(ul_lat)s,
%(ur_lon)s,
%(ur_lat)s"""
if gcp_count is not None:
sql += """,
%(gcp_count)s"""
if mtl_text is not None:
sql += """,
%(mtl_text)s"""
sql += """
where not exists
(select acquisition_id
from acquisition
where satellite_id = (select satellite_id
from satellite
where upper(satellite_tag) = upper(%(satellite_tag)s)
)
and sensor_id = (select sensor_id
from sensor
inner join satellite using(satellite_id)
where upper(satellite_tag) = upper(%(satellite_tag)s)
and upper(sensor_name) = upper(%(sensor_name)s)
)
and x_ref = %(x_ref)s
and y_ref = %(y_ref)s
and start_datetime = %(start_datetime)s
and end_datetime = %(end_datetime)s
);
-- Create new dataset record
insert into dataset(
dataset_id,
acquisition_id,
dataset_path,
level_id,
datetime_processed,
dataset_size,
crs,
ll_x,
ll_y,
lr_x,
lr_y,
ul_x,
ul_y,
ur_x,
ur_y,
x_pixels,
y_pixels,
xml_text
)
select
nextval('dataset_id_seq') as dataset_id,
(select acquisition_id
from acquisition
where satellite_id = (select satellite_id from satellite where upper(satellite_tag) = upper(%(satellite_tag)s))
and sensor_id = (select sensor_id from sensor inner join satellite using(satellite_id)
where upper(satellite_tag) = upper(%(satellite_tag)s)
and upper(sensor_name) = upper(%(sensor_name)s))
and x_ref = %(x_ref)s
and y_ref = %(y_ref)s
and start_datetime = %(start_datetime)s
and end_datetime = %(end_datetime)s
) as acquisition_id,
%(dataset_path)s,
(select level_id from processing_level where upper(level_name) = upper(%(processing_level)s)),
%(datetime_processed)s,
%(dataset_size)s,
%(crs)s,
%(ll_x)s,
%(ll_y)s,
%(lr_x)s,
%(lr_y)s,
%(ul_x)s,
%(ul_y)s,
%(ur_x)s,
%(ur_y)s,
%(x_pixels)s,
%(y_pixels)s,
%(xml_text)s
where not exists
(select dataset_id
from dataset
where dataset_path = %(dataset_path)s
)
;
"""
# same params for insert or update
params = {'acquisition_id': acquisition_id,
'dataset_id': dataset_id,
'satellite_tag': dataset.satellite.TAG,
'sensor_name': dataset.satellite.sensor,
'x_ref': dataset.path_number,
'y_ref': dataset.row_number,
'start_datetime': dataset.scene_start_datetime,
'end_datetime': dataset.scene_end_datetime,
'dataset_path': dataset_dir,
'processing_level': dataset.processor_level,
'datetime_processed': dataset.completion_datetime,
'dataset_size': dataset_size,
'level_name': dataset.processor_level.upper(),
'll_lon': dataset.ll_lon,
'll_lat': dataset.ll_lat,
'lr_lon': dataset.lr_lon,
'lr_lat': dataset.lr_lat,
'ul_lon': dataset.ul_lon,
'ul_lat': dataset.ul_lat,
'ur_lon': dataset.ur_lon,
'ur_lat': dataset.ur_lat,
'crs': dataset.GetProjection(),
'll_x': dataset.ll_x,
'll_y': dataset.ll_y,
'lr_x': dataset.lr_x,
'lr_y': dataset.lr_y,
'ul_x': dataset.ul_x,
'ul_y': dataset.ul_y,
'ur_x': dataset.ur_x,
'ur_y': dataset.ur_y,
'x_pixels': dataset.image_pixels,
'y_pixels': dataset.image_lines,
'gcp_count': gcp_count,
'mtl_text': mtl_text,
'cloud_cover': dataset.cloud_cover_percentage,
'xml_text': xml_text
}
log_multiline(logger.debug, db_cursor.mogrify(sql, params), 'SQL', '\t')
db_cursor.execute(sql, params) | IndexError | dataset/ETHPy150Open GeoscienceAustralia/agdc/deprecated/update_dataset_record.py/update_dataset_record |
def get_method_name(self, raw_post_data, request_format='xml'):
'''
Gets the name of the method to be called given the post data
and the format of the data
'''
if request_format == 'xml':
# xmlrpclib.loads could throw an exception, but this is fine
# since _marshaled_dispatch would throw the same thing
try:
params, method = xmlrpclib.loads(raw_post_data)
return method
except Fault:
return None
else:
try:
# attempt to do a json decode on the data
jsondict = json.loads(raw_post_data)
if not isinstance(jsondict, dict) or 'method' not in jsondict:
return None
else:
return jsondict['method']
except __HOLE__:
return None | ValueError | dataset/ETHPy150Open fp7-ofelia/ocf/optin_manager/src/python/openflow/common/rpc4django/rpcdispatcher.py/RPCDispatcher.get_method_name |
def test_closes_image_on_exception(self):
# This tests that willow closes images when the with is exited with an exception
try:
with self.image.get_willow_image():
self.assertFalse(self.image.file.closed)
raise ValueError("Something went wrong!")
except __HOLE__:
pass
self.assertTrue(self.image.file.closed) | ValueError | dataset/ETHPy150Open torchbox/wagtail/wagtail/wagtailimages/tests/test_models.py/TestGetWillowImage.test_closes_image_on_exception |
def instanciate_class(item, modules):
m = NS_AND_TAG.match(item.tag)
ns, tag = m.groups()
for module in modules:
if module.NAMESPACE == ns:
try:
target = module.ELEMENT_BY_TAG[tag]
return create_class_from_element_tree(target, item)
except __HOLE__:
continue
raise Exception("Unknown class: ns='%s', tag='%s'" % (ns, tag)) | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pysaml2-2.4.0/src/saml2/soap.py/instanciate_class |
def target_function(self, running, data):
module_verbosity = boolify(self.verbosity)
name = threading.current_thread().name
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print_status(name, 'process is starting...', verbose=module_verbosity)
while running.is_set():
try:
line = data.next().split(":")
user = line[0].strip()
password = line[1].strip()
ssh.connect(self.target, int(self.port), timeout=5, username=user, password=password)
except __HOLE__:
break
except paramiko.ssh_exception.SSHException as err:
ssh.close()
print_error("Target: {}:{} {}: {} Username: '{}' Password: '{}'".format(self.target, self.port, name, err, user, password), verbose=module_verbosity)
else:
if boolify(self.stop_on_success):
running.clear()
print_success("Target: {}:{} {} Authentication Succeed - Username: '{}' Password: '{}'".format(self.target, self.port, name, user, password), verbose=module_verbosity)
self.credentials.append((self.target, self.port, user, password))
print_status(name, 'process is terminated.', verbose=module_verbosity) | StopIteration | dataset/ETHPy150Open reverse-shell/routersploit/routersploit/modules/creds/ssh_default.py/Exploit.target_function |
def item(*args, **kwargs):
'''
Return one or more grains
CLI Example:
.. code-block:: bash
salt '*' grains.item os
salt '*' grains.item os osrelease oscodename
Sanitized CLI Example:
.. code-block:: bash
salt '*' grains.item host sanitize=True
'''
ret = {}
for arg in args:
try:
ret[arg] = __grains__[arg]
except __HOLE__:
pass
if salt.utils.is_true(kwargs.get('sanitize')):
for arg, func in six.iteritems(_SANITIZERS):
if arg in ret:
ret[arg] = func(ret[arg])
return ret | KeyError | dataset/ETHPy150Open saltstack/salt/salt/client/ssh/wrapper/grains.py/item |
def _get_interesting_headers(self, lines, start, end, is_modified_file):
"""Returns all headers for a region of a diff.
This scans for all headers that fall within the specified range
of the specified lines on both the original and modified files.
"""
possible_functions = \
self.differ.get_interesting_lines('header', is_modified_file)
if not possible_functions:
raise StopIteration
try:
if is_modified_file:
last_index = self._last_header_index[1]
i1 = lines[start][4]
i2 = lines[end - 1][4]
else:
last_index = self._last_header_index[0]
i1 = lines[start][1]
i2 = lines[end - 1][1]
except __HOLE__:
raise StopIteration
for i in range(last_index, len(possible_functions)):
linenum, line = possible_functions[i]
linenum += 1
if linenum > i2:
break
elif linenum >= i1:
last_index = i
yield linenum, line
if is_modified_file:
self._last_header_index[1] = last_index
else:
self._last_header_index[0] = last_index | IndexError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/diffviewer/chunk_generator.py/RawDiffChunkGenerator._get_interesting_headers |
def find_library(name):
possible = ['lib%s.dylib' % name,
'%s.dylib' % name,
'%s.framework/%s' % (name, name)]
for name in possible:
try:
return _dyld_find(name)
except __HOLE__:
continue
return None | ValueError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/ctypes/util.py/find_library |
def _findLib_gcc(name):
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
fdout, ccout = tempfile.mkstemp()
os.close(fdout)
cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; else CC=cc; fi;' \
'$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name
try:
f = os.popen(cmd)
trace = f.read()
f.close()
finally:
try:
os.unlink(ccout)
except __HOLE__, e:
if e.errno != errno.ENOENT:
raise
res = re.search(expr, trace)
if not res:
return None
return res.group(0) | OSError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/ctypes/util.py/_findLib_gcc |
def _num_version(libname):
# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
parts = libname.split(".")
nums = []
try:
while parts:
nums.insert(0, int(parts.pop()))
except __HOLE__:
pass
return nums or [ sys.maxint ] | ValueError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/ctypes/util.py/_num_version |
def _table_needs_update(self, table, temp):
update_table = True
if not os.path.exists(self.table_files_path):
os.makedirs(self.table_files_path)
try:
current_table = open("%s/%s" % (self.table_files_path, table), "r+")
if self._tables_match(current_table, temp):
logger.debug("%s table matches current...skipping" % table)
update_table = False
except __HOLE__:
current_table = open("%s/%s" % (self.table_files_path, table), "w")
finally:
current_table.close()
return (current_table, update_table) | IOError | dataset/ETHPy150Open facebookarchive/neti/neti/iptables.py/IPtables._table_needs_update |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--nth', metavar='N',
help='select the Nth entry (1-based)',
type=int, default=None)
parser.add_argument('-f', '--first', metavar='N',
help='select the N first entires',
type=int, default=None)
parser.add_argument('-v', '--invert-match', default=False, action='store_true',
help='select all entries but the ones that match')
parser.add_argument('-k', '--key', default='name',
help='change the key used for the match (default is name)')
parser.add_argument('-s', '--case-sensitive', default=False, action='store_true')
parser.add_argument('value', metavar='VALUE', nargs='?', default=None,
help='string or numerical value to match')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-e', '--numeric-equals', default=False, action='store_true')
group.add_argument('-d', '--numeric-differs', default=False, action='store_true')
group.add_argument('-G', '--greater', default=False, action='store_true')
group.add_argument('-g', '--greater-or-equal', default=False, action='store_true')
group.add_argument('-l', '--less-or-equal', default=False, action='store_true')
group.add_argument('-L', '--less', default=False, action='store_true')
group.add_argument('-T', '--true', default=False, action='store_true')
group.add_argument('-F', '--false', default=False, action='store_true')
args = parser.parse_args()
if (args.value is not None) and (args.false or args.true):
parser.error('cannot specify value for boolean matching')
if sys.stdin.isatty():
parser.error('no input, pipe another btc command output into this command')
l = sys.stdin.read()
if len(l.strip()) == 0:
exit(1)
try:
l = decoder.decode(l)
except __HOLE__:
error('unexpected input: %s' % l)
new = list()
for o in l:
try:
if args.numeric_equals:
if float(o[args.key]) == float(args.value):
new.append(o)
elif args.numeric_differs:
if float(o[args.key]) != float(args.value):
new.append(o)
elif args.greater:
if float(o[args.key]) > float(args.value):
new.append(o)
elif args.greater_or_equal:
if float(o[args.key]) >= float(args.value):
new.append(o)
elif args.less_or_equal:
if float(o[args.key]) <= float(args.value):
new.append(o)
elif args.less:
if float(o[args.key]) < float(args.value):
new.append(o)
elif args.true:
if bool(o[args.key]):
new.append(o)
elif args.false:
if not bool(o[args.key]):
new.append(o)
elif args.value is not None:
def case(x):
if args.case_sensitive:
return x
return x.lower()
if fnmatch.fnmatch(case(unicode(o[args.key])), case(unicode(args.value))):
new.append(o)
else:
new.append(o)
except KeyError:
pass
except ValueError as e:
error('value error: %s' % e)
if args.first is not None:
new = new[0:min(args.first,len(new))]
if args.nth is not None:
new = [new[args.nth - 1]]
if args.invert_match:
new = [o for o in l if o not in new]
print(encoder.encode(new)) | ValueError | dataset/ETHPy150Open bittorrent/btc/btc/btc_filter.py/main |
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, __HOLE__):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
# | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/check_enough_semaphores |
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except __HOLE__:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways. | NotImplementedError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/BaseTestCase.assertReturnsIfImplemented |
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except __HOLE__:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
# | AttributeError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/get_value |
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except __HOLE__:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1) | NotImplementedError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/_TestProcess.test_cpu_count |
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except __HOLE__:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0) | NotImplementedError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/_TestQueue.test_qsize |
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except __HOLE__:
pass | NotImplementedError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/_TestCondition.check_invariant |
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except __HOLE__ as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True | OSError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/_TestConnection._is_fd_assigned |
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except __HOLE__:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
check_enough_semaphores()
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool | OSError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_multiprocessing.py/test_main |
def test_cannot_create_with_bad_type(self):
try:
self.batch.create("")
except __HOLE__:
assert True
else:
assert False | TypeError | dataset/ETHPy150Open nigelsmall/py2neo/test_ext/batman/test_batch.py/WriteBatchTestCase.test_cannot_create_with_bad_type |
def test_cannot_create_with_none(self):
try:
self.batch.create(None)
except __HOLE__:
assert True
else:
assert False | TypeError | dataset/ETHPy150Open nigelsmall/py2neo/test_ext/batman/test_batch.py/WriteBatchTestCase.test_cannot_create_with_none |
def mapping_for_file(self, filename):
"""Get the mapping for a generated filename"""
year = self._filename_year(filename)
try:
return next(m for m in self.mappings(year)
if m['generated_filename'] == filename)
except __HOLE__:
msg = "Mapping for standardized filename {} could not be found"
msg = msg.format(filename)
raise LookupError(msg) | StopIteration | dataset/ETHPy150Open openelections/openelections-core/openelex/base/datasource.py/BaseDatasource.mapping_for_file |
def jurisdiction_mappings(self, filename=None):
"""
Retrieve jurisdictional mappings based on OCD IDs.
Args:
filename: Filename of the CSV file containing jurisdictional
mappings. Default is
openelex/us/{state_abbrev}/mappings/{state_abbrev}.csv.
Returns:
A list of dictionaries containing jurisdiction Open Civic Data
identifiers, jurisdiction names and other metadata about
the jurisdiction. The return dictionaries include a
value for each column in the input CSV.
Example jurisdiction mapping dictionary:
```
{
'ocd_id': 'ocd-division/country:us/state:ar/county:washington',
'fips': '05143',
'name': 'Washington'
}
```
"""
try:
return self._cached_jurisdiction_mappings
except __HOLE__:
if filename is None:
filename = join(self.mappings_dir, self.state + '.csv')
with open(filename, 'rU') as csvfile:
reader = unicodecsv.DictReader(csvfile)
self._cached_jurisdiction_mappings = [row for row in reader]
return self._cached_jurisdiction_mappings | AttributeError | dataset/ETHPy150Open openelections/openelections-core/openelex/base/datasource.py/BaseDatasource.jurisdiction_mappings |
def place_mappings(self, filename=None):
try:
return self._cached_place_mappings
except __HOLE__:
if filename is None:
filename = join(self.mappings_dir, self.state + '_places.csv')
with open(filename, 'rU') as csvfile:
reader = unicodecsv.DictReader(csvfile)
self._cached_place_mappings = [row for row in reader]
return self._cached_place_mappings | AttributeError | dataset/ETHPy150Open openelections/openelections-core/openelex/base/datasource.py/BaseDatasource.place_mappings |
def _counties(self):
"""
Retrieve jurisdictional mappings for a state's counties.
Returns:
A list of dictionaries containing jurisdiction metadata, as
returned by ``jurisdictional_mappings()``.
"""
try:
return self._cached_counties
except __HOLE__:
county_ocd_re = re.compile(r'ocd-division/country:us/state:' +
self.state.lower() + r'/county:[^/]+$')
self._cached_counties = [m for m in self.jurisdiction_mappings()
if county_ocd_re.match(m['ocd_id'])]
return self._cached_counties | AttributeError | dataset/ETHPy150Open openelections/openelections-core/openelex/base/datasource.py/BaseDatasource._counties |
def _election_slug(self, election):
"""
Generate a slug for an election.
Args:
election: Dictionary of election attributes as returned by the
metadata API.
Returns:
A string containing a unique identifier for an election. For
example, "ar-2012-05-22-primary".
"""
# Delete the 'state' key in the election attrs, because its a
# dict with multiple values we don't care about and we want
# to just pass the value of self.state to election_slug.
# We can probably delete the key from argument without consequence,
# but to be safe and avoid side effects,copy the argument first.
election_attrs = election.copy()
try:
del election_attrs['state']
except __HOLE__:
pass
return election_slug(self.state, **election_attrs) | KeyError | dataset/ETHPy150Open openelections/openelections-core/openelex/base/datasource.py/BaseDatasource._election_slug |
def _url_paths(self, filename=None):
"""
Load URL metadata from a CSV file.
The CSV file should follow the conventsions described at
http://docs.openelections.net/guide/#populating-urlpathscsv
Args:
filename: Path to a URL paths CSV file. Default is
openelex/{state_abbrev}/mappings/url_paths.csv
Returns:
A list of dictionaries, with each dict corresponding to a row
in the CSV file.
"""
if filename is None:
filename = join(self.mappings_dir, 'url_paths.csv')
try:
# We cache the URL paths to avoid having to do multiple filesystem
# reads. We also cache per origin filename to accomodate states
# like Arkansas where we generate multiple multiple URL path files
# from scraping
return self._cached_url_paths[filename]
except __HOLE__:
cached = self._cached_url_paths[filename] = []
with open(filename, 'rU') as csvfile:
reader = unicodecsv.DictReader(csvfile)
for row in reader:
cached.append(self._parse_url_path(row))
return cached | KeyError | dataset/ETHPy150Open openelections/openelections-core/openelex/base/datasource.py/BaseDatasource._url_paths |
def _url_paths_for_election(self, election, filename=None):
"""
Retrieve URL metadata entries for a single election.
Args:
election: Election metadata dictionary as returned by the
elections() method or string containing an election slug.
Returns:
A list of dictionaries, like the return value of
``_url_paths()``.
"""
try:
slug = election['slug']
except __HOLE__:
slug = election
return [p for p in self._url_paths(filename) if p['election_slug'] == slug] | TypeError | dataset/ETHPy150Open openelections/openelections-core/openelex/base/datasource.py/BaseDatasource._url_paths_for_election |