function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def setProperty(self, name, value):
if name == 'voice':
token = self._tokenFromId(value)
self._tts.Voice = token
a, b = E_REG.get(value, E_REG[MSMARY])
self._tts.Rate = int(math.log(self._rateWpm/a, b))
elif name == 'rate':
id = self._tts.Voice.Id
a, b = E_REG.get(id, E_REG[MSMARY])
try:
self._tts.Rate = int(math.log(value/a, b))
except __HOLE__, e:
raise ValueError(str(e))
self._rateWpm = value
elif name == 'volume':
try:
self._tts.Volume = int(round(value*100, 2))
except TypeError, e:
raise ValueError(str(e))
else:
raise KeyError('unknown property %s' % name) | TypeError | dataset/ETHPy150Open parente/pyttsx/pyttsx/drivers/sapi5.py/SAPI5Driver.setProperty |
def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
with lock:
try:
return cache[key]
except __HOLE__:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
lock.acquire()
try:
cache.clear()
finally:
lock.release()
lock = threading.RLock()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper | KeyError | dataset/ETHPy150Open giampaolo/psutil/psutil/_common.py/memoize |
def isfile_strict(path):
"""Same as os.path.isfile() but does not swallow EACCES / EPERM
exceptions, see:
http://mail.python.org/pipermail/python-dev/2012-June/120787.html
"""
try:
st = os.stat(path)
except __HOLE__ as err:
if err.errno in (errno.EPERM, errno.EACCES):
raise
return False
else:
return stat.S_ISREG(st.st_mode) | OSError | dataset/ETHPy150Open giampaolo/psutil/psutil/_common.py/isfile_strict |
def path_exists_strict(path):
"""Same as os.path.exists() but does not swallow EACCES / EPERM
exceptions, see:
http://mail.python.org/pipermail/python-dev/2012-June/120787.html
"""
try:
os.stat(path)
except __HOLE__ as err:
if err.errno in (errno.EPERM, errno.EACCES):
raise
return False
else:
return True | OSError | dataset/ETHPy150Open giampaolo/psutil/psutil/_common.py/path_exists_strict |
def sockfam_to_enum(num):
"""Convert a numeric socket family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
if enum is None:
return num
else: # pragma: no cover
try:
return socket.AddressFamily(num)
except (__HOLE__, AttributeError):
return num | ValueError | dataset/ETHPy150Open giampaolo/psutil/psutil/_common.py/sockfam_to_enum |
def socktype_to_enum(num):
"""Convert a numeric socket type value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
if enum is None:
return num
else: # pragma: no cover
try:
return socket.AddressType(num)
except (__HOLE__, AttributeError):
return num | ValueError | dataset/ETHPy150Open giampaolo/psutil/psutil/_common.py/socktype_to_enum |
def get_model(self, file_path):
"""
Loads the model from okapi style file.
>>> from pkg_resources import resource_filename
>>> import testfm
>>> path = resource_filename(testfm.__name__, "data/okapi.tsv")
>>> okapi = Load_Okapi()
>>> model = okapi.get_model(path)
>>> len(model._users)
4
>>> len(model._items)
2
>>> model.getScore(30331, 6731)
0.11887863463700001
@param file_path: str that shows the path to load the file
@return:
"""
data = self._parse_file(file_path)
# 0 for user factors and 1 for item factors
factors = {0: {}, 1: {}}
for node_type, (data_key, data_value) in data:
try:
factors[node_type][data_key] = data_value
except __HOLE__:
pass
fm = FactorModel(userf=factors[0], itemf=factors[1])
return fm | KeyError | dataset/ETHPy150Open grafos-ml/test.fm/src/testfm/fmio/load_model.py/Load_Okapi.get_model |
def getMessageFromUser(self, prompt=''):
editor = os.environ.get("EDITOR", "/bin/vi")
(fd, name) = tempfile.mkstemp()
if not prompt:
prompt = 'Enter your change log message.'
msg = "\n-----\n%s\n" % prompt
os.write(fd, msg)
os.close(fd)
def _getMessageNoEditor():
sys.stderr.write("Error executing %s. Please set the EDITOR\n"
"environment variable to a valid editor, or enter log message,\n"
"terminated with single '.' (or CTRL+D to cancel)\n" % editor)
rows = []
while 1:
try:
row = raw_input('>> ')
except EOFError:
return None
if row == '.':
# We need a trailing newline
rows.append('')
break
rows.append(row)
return '\n'.join(rows)
class EditorError(Exception):
pass
cmdargs = [editor, name]
try:
try:
# Capture stderr and discard it
retcode = subprocess.call(" ".join(cmdargs), shell=True,
stderr=subprocess.PIPE)
except __HOLE__:
raise EditorError
if retcode != 0:
raise EditorError
except EditorError:
# Error running the editor
msg = _getMessageNoEditor()
if msg is None:
return False
self.message.set(msg)
return True
newMsg = open(name).read()
os.unlink(name)
if newMsg == msg:
return False
if newMsg[-len(msg):]:
newMsg = newMsg[:-len(msg)]
newMsg = string.strip(newMsg)
newMsg += '\n'
self.setMessage(newMsg)
return True | OSError | dataset/ETHPy150Open sassoftware/conary/conary/changelog.py/ChangeLog.getMessageFromUser |
def prof_leftup(self, event=None):
if len(self.map.shape) != 2:
return
if self.rbbox is not None:
zdc = wx.ClientDC(self.panel.canvas)
zdc.SetLogicalFunction(wx.XOR)
zdc.SetBrush(wx.TRANSPARENT_BRUSH)
zdc.SetPen(wx.Pen('White', 2, wx.SOLID))
zdc.ResetBoundingBox()
if not is_wxPhoenix:
zdc.BeginDrawing()
zdc.DrawLine(*self.rbbox)
if not is_wxPhoenix:
zdc.EndDrawing()
self.rbbox = None
if self.zoom_ini is None or self.lastpoint[0] is None:
return
x0 = int(self.zoom_ini[2])
x1 = int(self.lastpoint[0])
y0 = int(self.zoom_ini[3])
y1 = int(self.lastpoint[1])
dx, dy = abs(x1-x0), abs(y1-y0)
self.lastpoint, self.zoom_ini = [None, None], None
if dx < 2 and dy < 2:
self.zoom_ini = None
return
outdat = []
if dy > dx:
_y0 = min(int(y0), int(y1+0.5))
_y1 = max(int(y0), int(y1+0.5))
for iy in range(_y0, _y1):
ix = int(x0 + (iy-int(y0))*(x1-x0)/(y1-y0))
outdat.append((ix, iy))
else:
_x0 = min(int(x0), int(x1+0.5))
_x1 = max(int(x0), int(x1+0.5))
for ix in range(_x0, _x1):
iy = int(y0 + (ix-int(x0))*(y1-y0)/(x1-x0))
outdat.append((ix, iy))
x, y, z = [], [], []
for ix, iy in outdat:
x.append(ix)
y.append(iy)
z.append(self.panel.conf.data[iy,ix])
self.prof_dat = dy>dx, outdat
if self.prof_plotter is not None:
try:
self.prof_plotter.Raise()
self.prof_plotter.clear()
except (__HOLE__, PyDeadObjectError):
self.prof_plotter = None
if self.prof_plotter is None:
self.prof_plotter = PlotFrame(self, title='Profile')
self.prof_plotter.panel.report_leftdown = self.prof_report_coords
xlabel, y2label = 'Pixel (x)', 'Pixel (y)'
if dy > dx:
x, y = y, x
xlabel, y2label = y2label, xlabel
self.prof_plotter.panel.clear() # reset_config()
if len(self.title) < 1:
self.title = os.path.split(self.xrmfile.filename)[1]
opts = dict(linewidth=2, marker='+', markersize=3,
show_legend=True, xlabel=xlabel)
self.prof_plotter.plot(x, z, title=self.title, color='blue',
zorder=20, xmin=min(x)-3, xmax=max(x)+3,
ylabel='counts', label='counts', **opts)
self.prof_plotter.oplot(x, y, y2label=y2label, label=y2label,
zorder=3, side='right', color='#771111', **opts)
self.prof_plotter.panel.unzoom_all()
self.prof_plotter.Show()
self.zoom_ini = None
self.zoom_mode.SetSelection(0)
self.panel.cursor_mode = 'zoom' | AttributeError | dataset/ETHPy150Open xraypy/xraylarch/plugins/wx/mapimageframe.py/MapImageFrame.prof_leftup |
def run(self):
logger.info(u'Starting update loop.')
try:
while True:
self.update()
time.sleep(self.refresh_rate)
except (KeyboardInterrupt, __HOLE__):
pass
except Exception as e:
logger.exception(u"Found exception %s, exiting.", e)
finally:
self.quit() | SystemExit | dataset/ETHPy150Open rbarrois/mpdlcd/mpdlcd/lcdrunner.py/MpdRunner.run |
def gen_zonal_stats(
vectors, raster,
layer=0,
band_num=1,
nodata=None,
affine=None,
stats=None,
all_touched=False,
categorical=False,
category_map=None,
add_stats=None,
raster_out=False,
prefix=None,
geojson_out=False, **kwargs):
"""Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band_num: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
"""
stats, run_count = check_stats(stats, categorical)
# Handle 1.0 deprecations
transform = kwargs.get('transform')
if transform:
warnings.warn("GDAL-style transforms will disappear in 1.0. "
"Use affine=Affine.from_gdal(*transform) instead",
DeprecationWarning)
if not affine:
affine = Affine.from_gdal(*transform)
ndv = kwargs.get('nodata_value')
if ndv:
warnings.warn("Use `nodata` instead of `nodata_value`", DeprecationWarning)
if not nodata:
nodata = ndv
cp = kwargs.get('copy_properties')
if cp:
warnings.warn("Use `geojson_out` to preserve feature properties",
DeprecationWarning)
with Raster(raster, affine, nodata, band_num) as rast:
features_iter = read_features(vectors, layer)
for i, feat in enumerate(features_iter):
geom = shape(feat['geometry'])
if 'Point' in geom.type:
geom = boxify_points(geom, rast)
geom_bounds = tuple(geom.bounds)
fsrc = rast.read(bounds=geom_bounds)
# create ndarray of rasterized geometry
rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)
assert rv_array.shape == fsrc.shape
# Mask the source data array with our current feature
# we take the logical_not to flip 0<->1 for the correct mask effect
# we also mask out nodata values explicitly
masked = np.ma.MaskedArray(
fsrc.array,
mask=np.logical_or(
fsrc.array == fsrc.nodata,
np.logical_not(rv_array)))
if masked.compressed().size == 0:
# nothing here, fill with None and move on
feature_stats = dict([(stat, None) for stat in stats])
if 'count' in stats: # special case, zero makes sense here
feature_stats['count'] = 0
else:
if run_count:
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(zip([np.asscalar(k) for k in keys],
[np.asscalar(c) for c in counts]))
if categorical:
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
feature_stats = {}
if 'min' in stats:
feature_stats['min'] = float(masked.min())
if 'max' in stats:
feature_stats['max'] = float(masked.max())
if 'mean' in stats:
feature_stats['mean'] = float(masked.mean())
if 'count' in stats:
feature_stats['count'] = int(masked.count())
# optional
if 'sum' in stats:
feature_stats['sum'] = float(masked.sum())
if 'std' in stats:
feature_stats['std'] = float(masked.std())
if 'median' in stats:
feature_stats['median'] = float(np.median(masked.compressed()))
if 'majority' in stats:
feature_stats['majority'] = float(key_assoc_val(pixel_count, max))
if 'minority' in stats:
feature_stats['minority'] = float(key_assoc_val(pixel_count, min))
if 'unique' in stats:
feature_stats['unique'] = len(list(pixel_count.keys()))
if 'range' in stats:
try:
rmin = feature_stats['min']
except __HOLE__:
rmin = float(masked.min())
try:
rmax = feature_stats['max']
except KeyError:
rmax = float(masked.max())
feature_stats['range'] = rmax - rmin
for pctile in [s for s in stats if s.startswith('percentile_')]:
q = get_percentile(pctile)
pctarr = masked.compressed()
feature_stats[pctile] = np.percentile(pctarr, q)
if 'nodata' in stats:
featmasked = np.ma.MaskedArray(fsrc.array, mask=np.logical_not(rv_array))
feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())
if add_stats is not None:
for stat_name, stat_func in add_stats.items():
feature_stats[stat_name] = stat_func(masked)
if raster_out:
feature_stats['mini_raster_array'] = masked
feature_stats['mini_raster_affine'] = fsrc.affine
feature_stats['mini_raster_nodata'] = fsrc.nodata
if prefix is not None:
prefixed_feature_stats = {}
for key, val in feature_stats.items():
newkey = "{}{}".format(prefix, key)
prefixed_feature_stats[newkey] = val
feature_stats = prefixed_feature_stats
if geojson_out:
for key, val in feature_stats.items():
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][key] = val
yield feat
else:
yield feature_stats | KeyError | dataset/ETHPy150Open perrygeo/python-rasterstats/src/rasterstats/main.py/gen_zonal_stats |
def convert(self, value):
value = _force_dict(value)
errors = {}
result = {}
for name, field in self.fields.iteritems():
try:
result[name] = field(value.get(name))
except __HOLE__, e:
errors[name] = e
if errors:
raise MultipleValidationErrors(errors)
return result | ValidationError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/Mapping.convert |
def convert(self, value):
value = self._remove_empty(_force_list(value))
if self.min_size is not None and len(value) < self.min_size:
message = self.messages['too_small']
if message is None:
message = self.ngettext(
u'Please provide at least %d item.',
u'Please provide at least %d items.',
self.min_size) % self.min_size
raise ValidationError(message)
if self.max_size is not None and len(value) > self.max_size:
message = self.messages['too_big']
if message is None:
message = self.ngettext(
u'Please provide no more than %d item.',
u'Please provide no more than %d items.',
self.max_size) % self.max_size
raise ValidationError(message)
result = []
errors = {}
for idx, item in value:
try:
result.append(self.field(item))
except __HOLE__, e:
errors[idx] = e
if errors:
raise MultipleValidationErrors(errors)
return result | ValidationError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/Multiple.convert |
def convert(self, value):
if isinstance(value, datetime):
return value
value = _to_string(value)
if not value:
if self.required:
message = self.messages['required']
if message is None:
message = self.gettext(u'This field is required.')
raise ValidationError(message)
return None
try:
return parse_datetime(value, tzinfo=self.tzinfo,
date_formats=self.date_formats,
time_formats=self.time_formats)
except __HOLE__:
message = self.messages['invalid_date']
if message is None:
message = self.gettext('Please enter a valid date.')
raise ValidationError(message) | ValueError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/DateTimeField.convert |
def convert(self, value):
if isinstance(value, date):
return value
value = _to_string(value)
if not value:
if self.required:
message = self.messages['required']
if message is None:
message = self.gettext(u'This field is required.')
raise ValidationError(message)
return None
try:
return parse_date(value, date_formats=self.date_formats)
except __HOLE__:
message = self.messages['invalid_date']
if message is None:
message = self.gettext('Please enter a valid date.')
raise ValidationError(message) | ValueError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/DateField.convert |
def convert(self, value):
value = _to_string(value)
if not value:
if self.required:
message = self.messages['required']
if message is None:
message = self.gettext(u'This field is required.')
raise ValidationError(message)
return None
try:
value = float(value)
except __HOLE__:
message = self.messages['no_float']
if message is None:
message = self.gettext('Please enter a floating-point number.')
raise ValidationError(message)
if self.min_value is not None and value < self.min_value:
message = self.messages['too_small']
if message is None:
message = self.gettext(u'Ensure this value is greater than or '
u'equal to %s.') % self.min_value
raise ValidationError(message)
if self.max_value is not None and value > self.max_value:
message = self.messages['too_big']
if message is None:
message = self.gettext(u'Ensure this value is less than or '
u'equal to %s.') % self.max_value
raise ValidationError(message)
return float(value) | ValueError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/FloatField.convert |
def convert(self, value):
value = _to_string(value)
if not value:
if self.required:
message = self.messages['required']
if message is None:
message = self.gettext(u'This field is required.')
raise ValidationError(message)
return None
try:
value = int(value)
except __HOLE__:
message = self.messages['no_integer']
if message is None:
message = self.gettext('Please enter a whole number.')
raise ValidationError(message)
if self.min_value is not None and value < self.min_value:
message = self.messages['too_small']
if message is None:
message = self.gettext(u'Ensure this value is greater than or '
u'equal to %s.') % self.min_value
raise ValidationError(message)
if self.max_value is not None and value > self.max_value:
message = self.messages['too_big']
if message is None:
message = self.gettext(u'Ensure this value is less than or '
u'equal to %s.') % self.max_value
raise ValidationError(message)
return int(value) | ValueError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/IntegerField.convert |
def __get__(self, obj, type=None):
try:
return (obj or type).fields[self.name]
except __HOLE__:
raise AttributeError(self.name) | KeyError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/FieldDescriptor.__get__ |
def validate(self, data=None, from_flat=True):
"""Validate the form against the data passed. If no data is provided
the form data of the current request is taken. By default a flat
representation of the data is assumed. If you already have a non-flat
representation of the data (JSON for example) you can disable that
with ``from_flat=False``.
"""
if data is None:
data = self._autodiscover_data()
if from_flat:
data = decode_form_data(data)
self.raw_data = data
# for each field in the root that requires validation on value
# omission we add `None` into the raw data dict. Because the
# implicit switch between initial data and user submitted data
# only happens on the "root level" for obvious reasons we only
# have to hook the data in here.
for name, field in self._root_field.fields.iteritems():
if field.validate_on_omission and name not in self.raw_data:
self.raw_data.setdefault(name)
d = self.data.copy()
d.update(self.raw_data)
errors = {}
try:
data = self._root_field(d)
except __HOLE__, e:
errors = e.unpack(self)
self.errors = errors
# every time we validate, we invalidate the csrf token if there
# was one.
if self.csrf_protected:
# FIXME: do we really want action here?
invalidate_csrf_token(self._get_session(), self.action)
if errors:
return False
self.data.update(data)
return True
# extra functionality that has to be implemented | ValidationError | dataset/ETHPy150Open mitsuhiko/fungiform/fungiform/forms.py/FormBase.validate |
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = chr(real_name)
except (__HOLE__, OverflowError) as e:
data = "\N{REPLACEMENT CHARACTER}"
self.handle_data(data) | ValueError | dataset/ETHPy150Open akalongman/sublimetext-codeformatter/codeformatter/lib/htmlbeautifier/bs4/builder/_htmlparser.py/BeautifulSoupHTMLParser.handle_charref |
def test_cons_slicing():
"""Check that cons slicing works as expected"""
cons = HyCons("car", "cdr")
assert cons[0] == "car"
assert cons[1:] == "cdr"
try:
cons[:]
assert True is False
except IndexError:
pass
try:
cons[1]
assert True is False
except __HOLE__:
pass | IndexError | dataset/ETHPy150Open hylang/hy/tests/models/test_cons.py/test_cons_slicing |
def test_cons_replacing():
"""Check that assigning to a cons works as expected"""
cons = HyCons("foo", "bar")
cons[0] = "car"
assert cons == HyCons("car", "bar")
cons[1:] = "cdr"
assert cons == HyCons("car", "cdr")
try:
cons[:] = "foo"
assert True is False
except __HOLE__:
pass | IndexError | dataset/ETHPy150Open hylang/hy/tests/models/test_cons.py/test_cons_replacing |
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self):
return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if isinstance(x, np.ndarray) and x.size:
xravel = x.ravel()
try:
# pass the first value of x that is not masked back to
# get_converter
if not np.all(xravel.mask):
# some elements are not masked
converter = self.get_converter(
xravel[np.argmin(xravel.mask)])
return converter
except __HOLE__:
# not a masked_array
# Make sure we don't recurse forever -- it's possible for
# ndarray subclasses to continue to return subclasses and
# not ever return a non-subclass for a single element.
next_item = xravel[0]
if (not isinstance(next_item, np.ndarray) or
next_item.shape != x.shape):
converter = self.get_converter(next_item)
return converter
if converter is None and iterable(x):
for thisx in x:
# Make sure that recursing might actually lead to a solution,
# if we are just going to re-examine another item of the same
# kind, then do not look at it.
if classx and classx != getattr(thisx, '__class__', None):
converter = self.get_converter(thisx)
return converter
#DISABLED self._cached[idx] = converter
return converter | AttributeError | dataset/ETHPy150Open nipy/nitime/nitime/_mpl_units.py/Registry.get_converter |
def preBuildPage(self, site, page, context, data):
"""
Special call as we have changed the API for this.
We have two calling conventions:
- The new one, which passes page, context, data
- The deprecated one, which also passes the site (Now accessible via the page)
"""
for plugin in self.plugins:
# Find the correct calling convention
new = [page, context, data]
deprecated = [site, page, context, data]
arg_lists = dict((len(l), l) for l in [deprecated, new])
try:
# Try to find the best calling convention
n_args = len(getargspec(plugin.preBuildPage).args)
# Just use the new calling convention if there's fancy usage of
# *args, **kwargs that we can't control.
arg_list = arg_lists.get(n_args, new)
except __HOLE__:
# If we can't get the number of args, use the new one.
arg_list = new
# Call with the best calling convention we have.
# If that doesn't work, then we'll let the error escalate.
context, data = plugin.preBuildPage(*arg_list)
return context, data | NotImplementedError | dataset/ETHPy150Open koenbok/Cactus/cactus/plugin/manager.py/PluginManager.preBuildPage |
@requires(str, 'uri')
def fetch(self, environ, request, uri):
args = {
'uri': uri,
'after': request.args.get('after', 0)
}
try:
args['limit'] = int(request.args.get('limit'))
except __HOLE__:
args['limit'] = None
except ValueError:
return BadRequest("limit should be integer")
if request.args.get('parent') is not None:
try:
args['parent'] = int(request.args.get('parent'))
root_id = args['parent']
except ValueError:
return BadRequest("parent should be integer")
else:
args['parent'] = None
root_id = None
plain = request.args.get('plain', '0') == '0'
reply_counts = self.comments.reply_count(uri, after=args['after'])
if args['limit'] == 0:
root_list = []
else:
root_list = list(self.comments.fetch(**args))
if not root_list:
raise NotFound
if root_id not in reply_counts:
reply_counts[root_id] = 0
try:
nested_limit = int(request.args.get('nested_limit'))
except TypeError:
nested_limit = None
except ValueError:
return BadRequest("nested_limit should be integer")
rv = {
'id' : root_id,
'total_replies' : reply_counts[root_id],
'hidden_replies' : reply_counts[root_id] - len(root_list),
'replies' : self._process_fetched_list(root_list, plain)
}
# We are only checking for one level deep comments
if root_id is None:
for comment in rv['replies']:
if comment['id'] in reply_counts:
comment['total_replies'] = reply_counts[comment['id']]
if nested_limit is not None:
if nested_limit > 0:
args['parent'] = comment['id']
args['limit'] = nested_limit
replies = list(self.comments.fetch(**args))
else:
replies = []
else:
args['parent'] = comment['id']
replies = list(self.comments.fetch(**args))
else:
comment['total_replies'] = 0
replies = []
comment['hidden_replies'] = comment['total_replies'] - len(replies)
comment['replies'] = self._process_fetched_list(replies, plain)
return JSON(rv, 200) | TypeError | dataset/ETHPy150Open posativ/isso/isso/views/comments.py/API.fetch |
def __init__(self, walkers=100, **kwargs):
try:
import emcee
except __HOLE__:
raise ImportError("The emcee package needs to be installed in order to use EmceeSampler")
self.emcee = emcee
self.walkers = walkers
super(EmceeSampler, self).__init__(**kwargs) | ImportError | dataset/ETHPy150Open tensorprob/tensorprob/tensorprob/samplers/emcee.py/EmceeSampler.__init__ |
def sample(self, variables, cost, gradient=None, samples=None):
# Check if variables is iterable
try:
iter(variables)
except __HOLE__:
raise ValueError("Variables parameter is not iterable")
inits = self.session.run(variables)
for v in variables:
if not isinstance(v, tf.Variable):
raise ValueError("Parameter {} is not a tensorflow variable".format(v))
def objective(xs):
feed_dict = { k: v for k, v in zip(variables, xs) }
out = self.session.run(cost, feed_dict=feed_dict)
if np.isnan(out):
return np.inf
return -out
all_inits = self.emcee.utils.sample_ball(inits, [1e-1] * len(inits), self.walkers)
sampler = self.emcee.EnsembleSampler(self.walkers, len(variables), objective)
samples = 1 if samples is None else samples
sampler.random_state = np.random.mtrand.RandomState(np.random.randint(1)).get_state()
pos, lnprob, rstate = sampler.run_mcmc(all_inits, samples)
return sampler.chain | TypeError | dataset/ETHPy150Open tensorprob/tensorprob/tensorprob/samplers/emcee.py/EmceeSampler.sample |
def startGraph():
# We maintain this globally to make it accessible, pylint: disable=W0603
global graph
if Options.shouldCreateGraph():
try:
from graphviz import Digraph # pylint: disable=F0401,I0021
graph = Digraph('G')
except __HOLE__:
warning("Cannot import graphviz module, no graphing capability.") | ImportError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/optimizations/Graphs.py/startGraph |
@unittest.skip("FIXME: broken")
def test_avg_std(self):
# Use integration to test distribution average and standard deviation.
# Only works for distributions which do not consume variates in pairs
g = random.Random()
N = 5000
x = [i/float(N) for i in xrange(1,N)]
for variate, args, mu, sigmasqrd in [
(g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12),
(g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0),
(g.expovariate, (1.5,), 1/1.5, 1/1.5**2),
(g.paretovariate, (5.0,), 5.0/(5.0-1),
5.0/((5.0-1)**2*(5.0-2))),
(g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0),
gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]:
g.random = x[:].pop
y = []
for i in xrange(len(x)):
try:
y.append(variate(*args))
except __HOLE__:
pass
s1 = s2 = 0
for e in y:
s1 += e
s2 += (e - mu) ** 2
N = len(y)
self.assertAlmostEqual(s1/N, mu, 2)
self.assertAlmostEqual(s2/(N-1), sigmasqrd, 2) | IndexError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_random.py/TestDistributions.test_avg_std |
def test_main(verbose=None):
testclasses = [WichmannHill_TestBasicOps,
MersenneTwister_TestBasicOps,
TestDistributions,
TestModule]
if test_support.is_jython:
del MersenneTwister_TestBasicOps.test_genrandbits
del MersenneTwister_TestBasicOps.test_referenceImplementation
del MersenneTwister_TestBasicOps.test_setstate_middle_arg
del MersenneTwister_TestBasicOps.test_strong_reference_implementation
try:
random.SystemRandom().random()
except __HOLE__:
pass
else:
testclasses.append(SystemRandom_TestBasicOps)
test_support.run_unittest(*testclasses)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*testclasses)
counts[i] = sys.gettotalrefcount()
print counts | NotImplementedError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_random.py/test_main |
def write(data):
""" Write data to STDOUT """
if not isinstance(data, str):
data = json.dumps(data)
sys.stdout.write(data)
if not data.endswith('\n'):
sys.stdout.write('\n')
try:
sys.stdout.flush()
except __HOLE__:
sys.exit() | IOError | dataset/ETHPy150Open srusskih/SublimeJEDI/sublime_jedi/daemon.py/write |
def _parameters_for_completion(self):
""" Get function / class' constructor parameters completions list
:rtype: list of str
"""
completions = []
try:
in_call = self.script.call_signatures()[0]
except IndexError:
in_call = None
parameters = get_function_parameters(in_call)
for parameter in parameters:
try:
name, value = parameter
except __HOLE__:
name = parameter[0]
value = None
if value is None:
completions.append((name, '${1:%s}' % name))
else:
completions.append((name + '\t' + value,
'%s=${1:%s}' % (name, value)))
return completions | ValueError | dataset/ETHPy150Open srusskih/SublimeJEDI/sublime_jedi/daemon.py/JediFacade._parameters_for_completion |
def _complete_call_assigments(self):
""" Get function or class parameters and build Sublime Snippet string
for completion
:rtype: str
"""
completions = []
complete_all = auto_complete_function_params == 'all'
try:
call_definition = self.script.call_signatures()[0]
except IndexError:
call_definition = None
parameters = get_function_parameters(call_definition)
for index, parameter in enumerate(parameters):
try:
name, value = parameter
except __HOLE__:
name = parameter[0]
value = None
if value is None:
completions.append('${%d:%s}' % (index + 1, name))
elif complete_all:
completions.append('%s=${%d:%s}' % (name, index + 1, value))
return ", ".join(completions) | ValueError | dataset/ETHPy150Open srusskih/SublimeJEDI/sublime_jedi/daemon.py/JediFacade._complete_call_assigments |
def context_builder(request, **kwargs):
''' (request object, [kwargs...]) -> response dict
Builds query via request item contents overriden by kwargs.
links_from and instance properties
----------------------------------
* Request object SHOULD HAVE "links_from" property as string which holds
type of what we are trying to list. For ex: if "links_from" is "user"
that means, we're going to list links from user.
* If "links_from" is "subscriptions", returns links from subscripted
channels of current user. If current user is not authenticated, returns
all links.
* If "links_from" is "user", returns links posted by user. "instance"
keyword supplied and it must an instance of User model.
* If "links_from" is "channel", returns links posted to channel.
"instance" keyword supplied and it must contain an instance of Channel
model.
* If "links_from" is "likes", returns links liked by user. "instance"
keyword supplied and it must contain an instance of User model.
* Request object can have "days" property which is integer, that limits
query in time.
ordering and limiting
---------------------
* Request object can be supplied with "days" property which is positive
integer to limit links in time.
* Request object can be supplied with "ordering" property which is
string that can contain "contreversial", "top" or "latest" to get
links in that way.
'''
from links.models import Link
from preferences.models import UserPreferences
from channels.models import Subscription
from datetime import datetime
from datetime import timedelta
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.utils.translation import ugettext as _
from qhonuskan_votes.utils import get_vote_model
response = {
"links_from": kwargs.get("links_from"),
"instance": kwargs.get("instance"),
"days": kwargs.get("days"),
"ordering": get_in(
request.GET, "ordering", [
"controversial", "top", "latest"], "hot"),
}
if 'highlight' in request.GET:
try:
response['highlight'] = int(request.GET['highlight'])
except __HOLE__:
pass
response['title'] = {
"subscriptions": _("Posts From Your Subscripted Channels"),
"user": _("Posts From %s") % response['instance'],
"channel": _("Posts From %s Channel") % response['instance'],
"likes": _("Posts liked by %s") % response['instance'],
}.get(response['links_from'], _("All Posts Shared on Linkfloyd"))
# is_authenticated method hits db on every call, so i cached it with this.
user_is_authenticated = request.user.is_authenticated()
query = Q()
if response['links_from'] == "subscriptions" and user_is_authenticated:
# TODO: this line can be optimised:
query = query & Q(channel_id__in=[subscription.channel for \
subscription in Subscription.objects.filter(user=request.user).select_related("channel")])
elif response['links_from'] == "channel":
query = query & Q(channel=response['instance'])
elif response['links_from'] == "user":
query = query & Q(posted_by=response['instance'])
elif response['links_from'] == "likes":
# TODO: this line can be optimised:
vote_model = get_vote_model('links.LinkVote')
votes = vote_model.objects.filter(voter=response['instance'], value=1
).select_related("object")
query = query & Q(id__in=[vote.object.id for vote in votes])
if response['days']:
query = query & Q(
posted_at__gte=datetime.today() - timedelta(days=response['days']))
if user_is_authenticated:
preferences = UserPreferences.objects.get(user=request.user)
query = query & Q(rating__lte=preferences.max_rating)
else:
query = query & Q(rating__lte=1)
links = Link.objects.filter(query)
if user_is_authenticated:
links = links.extra(select={
'is_owned': 'posted_by_id=%s' % request.user.id,
'is_subscribed': 'SELECT COUNT(*) FROM links_subscription WHERE '
'user_id=%s '
'AND '
'id=links_subscription.link_id' % request.user.id,
'is_voted_up': 'SELECT COUNT(*) FROM links_linkvote WHERE '
'voter_id=%s '
'AND '
'object_id=links_link.id '
'AND '
'value=1' % request.user.id,
'is_voted_down': 'SELECT COUNT(*) FROM links_linkvote WHERE '
'voter_id=%s '
'AND '
'object_id=links_link.id '
'AND '
'value=-1' % request.user.id})
links = links.select_related("posted_by", "channel").order_by({
"hot": "-updated_at",
"controversial": "-comment_score",
"top": "-vote_score",
"latest": "-posted_at",
}[response['ordering']])
paginator = Paginator(links, 25)
page = request.GET.get('page', 1)
try:
response['links'] = paginator.page(page)
except PageNotAnInteger:
response['links'] = paginator.page(1)
except EmptyPage:
response['links'] = paginator.page(paginator.num_pages)
return response | ValueError | dataset/ETHPy150Open linkfloyd/linkfloyd/linkfloyd/links/utils.py/context_builder |
def ustr(s, encoding="utf-8"):
""" Convert argument to unicode string.
"""
if isinstance(s, str):
return s
try:
return s.decode(encoding)
except __HOLE__:
return str(s) | AttributeError | dataset/ETHPy150Open nigelsmall/py2neo/py2neo/compat.py/ustr |
def __get__(self, instance, instance_type=None):
try:
return super(WidgyGenericForeignKey, self).__get__(instance, instance_type)
except __HOLE__:
# The model for this content type couldn't be loaded. Use an
# UnknownWidget instead.
from widgy.models import UnknownWidget
ret = UnknownWidget(getattr(instance, self.ct_field), getattr(instance, self.fk_field), instance)
ret.node = instance
ret.warn()
return ret | AttributeError | dataset/ETHPy150Open fusionbox/django-widgy/widgy/generic/__init__.py/WidgyGenericForeignKey.__get__ |
def find_template(self, name, dirs=None):
"""
RemovedInDjango20Warning: An internal method to lookup the template
name in all the configured loaders.
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except __HOLE__:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = Origin(
name=display_name,
template_name=name,
loader=loader,
)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name) | KeyError | dataset/ETHPy150Open django/django/django/template/loaders/cached.py/Loader.find_template |
@staticmethod
def lookup_class(xsi_type):
try:
return stix.lookup_extension(xsi_type, default=VocabString)
except __HOLE__:
return VocabString | ValueError | dataset/ETHPy150Open STIXProject/python-stix/stix/common/vocabs.py/VocabString.lookup_class |
@property
def path(self):
"""Return the config file path creating basedir, if needed."""
path = self._get_path()
try:
os.makedirs(os.path.dirname(path), mode=0o700)
except __HOLE__ as e:
if e.errno != errno.EEXIST:
raise
return path | OSError | dataset/ETHPy150Open jkbrzt/httpie/httpie/config.py/BaseConfigDict.path |
def load(self):
try:
with open(self.path, 'rt') as f:
try:
data = json.load(f)
except ValueError as e:
raise ValueError(
'Invalid %s JSON: %s [%s]' %
(type(self).__name__, str(e), self.path)
)
self.update(data)
except __HOLE__ as e:
if e.errno != errno.ENOENT:
raise | IOError | dataset/ETHPy150Open jkbrzt/httpie/httpie/config.py/BaseConfigDict.load |
def delete(self):
try:
os.unlink(self.path)
except __HOLE__ as e:
if e.errno != errno.ENOENT:
raise | OSError | dataset/ETHPy150Open jkbrzt/httpie/httpie/config.py/BaseConfigDict.delete |
def _migrate_implicit_content_type(self):
"""Migrate the removed implicit_content_type config option"""
try:
implicit_content_type = self.pop('implicit_content_type')
except __HOLE__:
pass
else:
if implicit_content_type == 'form':
self['default_options'].insert(0, '--form')
self.save()
self.load() | KeyError | dataset/ETHPy150Open jkbrzt/httpie/httpie/config.py/Config._migrate_implicit_content_type |
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Title':
Title_ = child_.text
Title_ = self.gds_validate_string(Title_, node, 'Title')
self.Title = Title_
elif nodeName_ == 'Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.add_Description(obj_)
elif nodeName_ == 'Short_Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.add_Short_Description(obj_)
elif nodeName_ == 'CVE_ID':
CVE_ID_ = child_.text
CVE_ID_ = self.gds_validate_string(CVE_ID_, node, 'CVE_ID')
self.CVE_ID = CVE_ID_
elif nodeName_ == 'OSVDB_ID':
sval_ = child_.text
try:
ival_ = int(sval_)
except (__HOLE__, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'OSVDB_ID')
self.OSVDB_ID = ival_
elif nodeName_ == 'Source':
Source_ = child_.text
Source_ = self.gds_validate_string(Source_, node, 'Source')
self.Source = Source_
elif nodeName_ == 'CVSS_Score':
obj_ = CVSSVectorType.factory()
obj_.build(child_)
self.set_CVSS_Score(obj_)
elif nodeName_ == 'Discovered_DateTime':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Discovered_DateTime(obj_)
elif nodeName_ == 'Published_DateTime':
obj_ = stix_common_binding.DateTimeWithPrecisionType.factory()
obj_.build(child_)
self.set_Published_DateTime(obj_)
elif nodeName_ == 'Affected_Software':
obj_ = AffectedSoftwareType.factory()
obj_.build(child_)
self.set_Affected_Software(obj_)
elif nodeName_ == 'References':
obj_ = stix_common_binding.ReferencesType.factory()
obj_.build(child_)
self.set_References(obj_)
# end class VulnerabilityType | TypeError | dataset/ETHPy150Open STIXProject/python-stix/stix/bindings/exploit_target.py/VulnerabilityType.buildChildren |
def addItemBefore(self, caption, icon, command, itemToAddBefore):
"""Add an item before some item. If the given item does not exist the
item is added at the end of the menu. Icon and command can be null,
but a caption must be given.
@param caption:
the text for the menu item
@param icon:
the icon for the menu item
@param command:
the command for the menu item
@param itemToAddBefore:
the item that will be after the new item
@raise ValueError:
"""
if caption is None:
raise ValueError, 'caption cannot be null'
newItem = MenuItem(caption, icon, command, self)
if itemToAddBefore in self._menuItems:
try:
index = self._menuItems.index(itemToAddBefore)
except __HOLE__:
index = -1
self._menuItems.insert(index, newItem)
else:
self._menuItems.append(newItem)
self.requestRepaint()
return newItem | ValueError | dataset/ETHPy150Open rwl/muntjac/muntjac/ui/menu_bar.py/MenuBar.addItemBefore |
def addItemBefore(self, caption, icon, command, itemToAddBefore):
"""Add an item before some item. If the given item does not exist the
item is added at the end of the menu. Icon and command can be null,
but a caption must be given.
@param caption:
the text for the menu item
@param icon:
the icon for the menu item
@param command:
the command for the menu item
@param itemToAddBefore:
the item that will be after the new item
@raise ValueError:
If the item is checkable and thus cannot have children.
"""
if self.isCheckable():
raise ValueError, 'A checkable item cannot have children'
newItem = None
if self.hasChildren() and itemToAddBefore in self._itsChildren:
try:
index = self._itsChildren.index(itemToAddBefore)
except __HOLE__:
index = -1
newItem = MenuItem(caption, icon, command, self._menu)
newItem.setParent(self)
self._itsChildren.append(index, newItem)
else:
newItem = self.addItem(caption, icon, command)
self._menu.requestRepaint()
return newItem | ValueError | dataset/ETHPy150Open rwl/muntjac/muntjac/ui/menu_bar.py/MenuItem.addItemBefore |
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
required=dict({
'filediff_id': {
'type': int,
'description': 'The ID of the file diff the comment is on.',
},
'first_line': {
'type': int,
'description': 'The line number the comment starts at.',
},
'num_lines': {
'type': int,
'description': 'The number of lines the comment spans.',
},
}, **BaseDiffCommentResource.REQUIRED_CREATE_FIELDS),
optional=dict({
'interfilediff_id': {
'type': int,
'description': 'The ID of the second file diff in the '
'interdiff the comment is on.',
},
}, **BaseDiffCommentResource.OPTIONAL_CREATE_FIELDS),
allow_unknown=True,
)
def create(self, request, filediff_id, interfilediff_id=None,
*args, **kwargs):
"""Creates a new diff comment.
This will create a new diff comment on this review. The review
must be a draft review.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
filediff = None
interfilediff = None
invalid_fields = {}
try:
filediff = FileDiff.objects.get(
pk=filediff_id,
diffset__history__review_request=review_request)
except ObjectDoesNotExist:
invalid_fields['filediff_id'] = \
['This is not a valid filediff ID']
if filediff and interfilediff_id:
if interfilediff_id == filediff.id:
invalid_fields['interfilediff_id'] = \
['This cannot be the same as filediff_id']
else:
try:
interfilediff = FileDiff.objects.get(
pk=interfilediff_id,
diffset__history=filediff.diffset.history)
except ObjectDoesNotExist:
invalid_fields['interfilediff_id'] = \
['This is not a valid interfilediff ID']
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields,
}
new_comment = self.create_comment(
review=review,
filediff=filediff,
interfilediff=interfilediff,
fields=('filediff', 'interfilediff', 'first_line', 'num_lines'),
**kwargs)
review.comments.add(new_comment)
return 201, {
self.item_result_key: new_comment,
} | ObjectDoesNotExist | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_diff_comment.py/ReviewDiffCommentResource.create |
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=dict({
'first_line': {
'type': int,
'description': 'The line number the comment starts at.',
},
'num_lines': {
'type': int,
'description': 'The number of lines the comment spans.',
},
}, **BaseDiffCommentResource.OPTIONAL_UPDATE_FIELDS),
allow_unknown=True,
)
def update(self, request, *args, **kwargs):
"""Updates a diff comment.
This can update the text or line range of an existing comment.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
diff_comment = self.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
# Determine whether or not we're updating the issue status.
if self.should_update_issue_status(diff_comment, **kwargs):
return self.update_issue_status(request, self, *args, **kwargs)
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
self.update_comment(diff_comment, ('first_line', 'num_lines'),
**kwargs)
return 200, {
self.item_result_key: diff_comment,
} | ObjectDoesNotExist | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/review_diff_comment.py/ReviewDiffCommentResource.update |
def __getitem__(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix([
... [1, 2 + I],
... [3, 4 ]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._mat[i*self.cols + j]
except (__HOLE__, IndexError):
if isinstance(i, slice):
# XXX remove list() when PY2 support is dropped
i = list(range(self.rows))[i]
elif is_sequence(i):
pass
else:
i = [i]
if isinstance(j, slice):
# XXX remove list() when PY2 support is dropped
j = list(range(self.cols))[j]
elif is_sequence(j):
pass
else:
j = [j]
return self.extract(i, j)
else:
# row-wise decomposition of matrix
if isinstance(key, slice):
return self._mat[key]
return self._mat[a2idx(key)] | TypeError | dataset/ETHPy150Open sympy/sympy/sympy/matrices/dense.py/DenseMatrix.__getitem__ |
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x
>>> from sympy import cos
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.equals
"""
try:
if self.shape != other.shape:
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open sympy/sympy/sympy/matrices/dense.py/DenseMatrix.equals |
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, Matrix):
return self._mat == other._mat
elif isinstance(other, MatrixBase):
return self._mat == Matrix(other)._mat
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open sympy/sympy/sympy/matrices/dense.py/DenseMatrix.__eq__ |
def _get_raw_data(self, image, format_, quality, image_info=None, progressive=False):
# Increase (but never decrease) PIL buffer size
ImageFile.MAXBLOCK = max(ImageFile.MAXBLOCK, image.size[0] * image.size[1])
bf = BufferIO()
params = {
'format': format_,
'quality': quality,
'optimize': 1,
}
# keeps icc_profile
if 'icc_profile' in image_info:
params['icc_profile'] = image_info['icc_profile']
raw_data = None
if format_ == 'JPEG' and progressive:
params['progressive'] = True
try:
# Do not save unnecessary exif data for smaller thumbnail size
params.pop('exif', {})
image.save(bf, **params)
except (IOError, __HOLE__):
# Try without optimization.
params.pop('optimize')
image.save(bf, **params)
else:
raw_data = bf.getvalue()
finally:
bf.close()
return raw_data | OSError | dataset/ETHPy150Open mariocesar/sorl-thumbnail/sorl/thumbnail/engines/pil_engine.py/Engine._get_raw_data |
@classmethod
def initialize(cls):
"""Initialize the class after plugin load."""
super().initialize()
if cls.module is None:
return
# This is tricky. Unfortunately pyflakes chooses to store
# builtins in a class variable and union that with the builtins option
# on every execution. This results in the builtins never being removed.
# To fix that, we get a reference to the pyflakes.checker module and
# pyflakes.checker.Checker class used by flake8. We can then reset
# the Checker.builtIns class variable on each execution.
try:
from pkg_resources import iter_entry_points
except __HOLE__:
persist.printf('WARNING: {} could not import pkg_resources.iter_entry_points'.format(cls.name))
else:
for entry in iter_entry_points('flake8.extension'):
check = entry.load()
if check.name == 'pyflakes':
from pyflakes import checker
cls.pyflakes_checker_module = checker
cls.pyflakes_checker_class = check
break | ImportError | dataset/ETHPy150Open SublimeLinter/SublimeLinter-flake8/linter.py/Flake8.initialize |
@must_be_valid_project
@must_be_logged_in
@must_have_permission(READ)
def node_setting(auth, node, **kwargs):
#check institutions:
try:
email_domains = [email.split('@')[1] for email in auth.user.emails]
inst = Institution.find_one(Q('email_domains', 'in', email_domains))
if inst not in auth.user.affiliated_institutions:
auth.user.affiliated_institutions.append(inst)
auth.user.save()
except (__HOLE__, NoResultsFound):
pass
ret = _view_project(node, auth, primary=True)
addons_enabled = []
addon_enabled_settings = []
for addon in node.get_addons():
addons_enabled.append(addon.config.short_name)
if 'node' in addon.config.configs:
config = addon.to_json(auth.user)
# inject the MakoTemplateLookup into the template context
# TODO inject only short_name and render fully client side
config['template_lookup'] = addon.config.template_lookup
config['addon_icon_url'] = addon.config.icon_url
addon_enabled_settings.append(config)
addon_enabled_settings = sorted(addon_enabled_settings, key=lambda addon: addon['addon_full_name'].lower())
ret['addon_categories'] = settings.ADDON_CATEGORIES
ret['addons_available'] = sorted([
addon
for addon in settings.ADDONS_AVAILABLE
if 'node' in addon.owners
and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node'] and addon.short_name != 'wiki'
], key=lambda addon: addon.full_name.lower())
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.owners and addon.short_name not in settings.SYSTEM_ADDED_ADDONS['node'] and addon.short_name == 'wiki':
ret['wiki'] = addon
break
ret['addons_enabled'] = addons_enabled
ret['addon_enabled_settings'] = addon_enabled_settings
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
ret['addon_js'] = collect_node_config_js(node.get_addons())
ret['include_wiki_settings'] = node.include_wiki_settings(auth.user)
ret['comments'] = {
'level': node.comment_level,
}
ret['categories'] = Node.CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret | IndexError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/node_setting |
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def watch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.watch(watch_config)
except __HOLE__: # Node is already being watched
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': node.watches.count()
} | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/watch_post |
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def unwatch_post(auth, node, **kwargs):
user = auth.user
watch_config = WatchConfig(node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False))
try:
user.unwatch(watch_config)
except __HOLE__: # Node isn't being watched
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'watchCount': node.watches.count()
} | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/unwatch_post |
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_registration
def togglewatch_post(auth, node, **kwargs):
'''View for toggling watch mode for a node.'''
# TODO: refactor this, watch_post, unwatch_post (@mambocab)
user = auth.user
watch_config = WatchConfig(
node=node,
digest=request.json.get('digest', False),
immediate=request.json.get('immediate', False)
)
try:
if user.is_watching(node):
user.unwatch(watch_config)
else:
user.watch(watch_config)
except __HOLE__:
raise HTTPError(http.BAD_REQUEST)
user.save()
return {
'status': 'success',
'watchCount': node.watches.count(),
'watched': user.is_watching(node)
} | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/togglewatch_post |
@collect_auth
def move_pointers(auth):
"""Move pointer from one node to another node.
"""
from_node_id = request.json.get('fromNodeId')
to_node_id = request.json.get('toNodeId')
pointers_to_move = request.json.get('pointerIds')
if from_node_id is None or to_node_id is None or pointers_to_move is None:
raise HTTPError(http.BAD_REQUEST)
from_node = Node.load(from_node_id)
to_node = Node.load(to_node_id)
if to_node is None or from_node is None:
raise HTTPError(http.BAD_REQUEST)
for pointer_to_move in pointers_to_move:
pointer_id = from_node.pointing_at(pointer_to_move)
pointer_node = Node.load(pointer_to_move)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
from_node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http.BAD_REQUEST)
from_node.save()
try:
_add_pointers(to_node, [pointer_node], auth)
except __HOLE__:
raise HTTPError(http.BAD_REQUEST)
return {}, 200, None | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/move_pointers |
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http.BAD_REQUEST)
pointer = Node.load(pointer_to_move)
to_node = Node.load(to_node_id)
try:
_add_pointers(to_node, [pointer], auth)
except __HOLE__:
raise HTTPError(http.BAD_REQUEST) | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/add_pointer |
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http.BAD_REQUEST)
nodes = [
Node.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except __HOLE__:
raise HTTPError(http.BAD_REQUEST)
return {} | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/add_pointers |
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except __HOLE__:
raise HTTPError(http.BAD_REQUEST)
node.save() | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/remove_pointer |
@must_be_valid_project # injects project
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer_from_folder(auth, node, pointer_id, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
if pointer_id is None:
raise HTTPError(http.BAD_REQUEST)
pointer_id = node.pointing_at(pointer_id)
pointer = Pointer.load(pointer_id)
if pointer is None:
raise HTTPError(http.BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except __HOLE__:
raise HTTPError(http.BAD_REQUEST)
node.save() | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/remove_pointer_from_folder |
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
"""
pointer_id = request.json.get('pointerId')
pointer = Pointer.load(pointer_id)
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http.BAD_REQUEST)
try:
node.fork_pointer(pointer, auth=auth, save=True)
except __HOLE__:
raise HTTPError(http.BAD_REQUEST) | ValueError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/node.py/fork_pointer |
def tearDown(self):
self.top = None
os.chdir(self.startdir)
if not os.environ.get('OPENMDAO_KEEPDIRS', False):
try:
shutil.rmtree(self.tempdir)
except __HOLE__:
pass | OSError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/casehandlers/test/test_json_filevar.py/TestCase.tearDown |
def test_unique_for_date_with_nullable_date(self):
p1 = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = FlexibleDatePost(title="Django 1.0 is released")
try:
p.full_clean()
except __HOLE__:
self.fail("unique_for_date checks shouldn't trigger when the associated DateField is None.")
p = FlexibleDatePost(slug="Django 1.0")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_year checks shouldn't trigger when the associated DateField is None.")
p = FlexibleDatePost(subtitle="Finally")
try:
p.full_clean()
except ValidationError:
self.fail("unique_for_month checks shouldn't trigger when the associated DateField is None.") | ValidationError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/tests/modeltests/validation/test_unique.py/PerformUniqueChecksTest.test_unique_for_date_with_nullable_date |
def _os_supports_avx():
"""
Whether the current OS supports AVX, regardless of the CPU.
This is necessary because the user may be running a very old Linux
kernel (e.g. CentOS 5) on a recent CPU.
"""
if (not sys.platform.startswith('linux')
or platform.machine() not in ('i386', 'i586', 'i686', 'x86_64')):
return True
# Executing the CPUID instruction may report AVX available even though
# the kernel doesn't support it, so parse /proc/cpuinfo instead.
try:
f = open('/proc/cpuinfo', 'r')
except __HOLE__:
# If /proc isn't available, assume yes
return True
with f:
for line in f:
head, _, body = line.partition(':')
if head.strip() == 'flags' and 'avx' in body.split():
return True
else:
return False | OSError | dataset/ETHPy150Open numba/numba/numba/config.py/_os_supports_avx |
def __init__(self, filename):
"""Initialize writer with a file descriptor."""
self._data = None
self._data_queue = deque()
self._file = filename
self._fd = None
self._timer = None
try:
self._open_file()
except __HOLE__, (errno, errmsg):
raise errors.InitError("Failed to open nagios pipe %s: %s"
% (self._file, errmsg))
reactor.addSystemEventTrigger('after', 'shutdown', self.shutdown) | OSError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/nagios_api.py/NagiosWriter.__init__ |
def doWrite(self):
"""Write data out to the pipe."""
while self._data or self._data_queue:
if not self._data:
self._data = self._data_queue.popleft()
log.trace("Writing Nagios command to fifo: %s", self._data)
try:
data_written = os.write(self._fd, self._data)
except __HOLE__, (errno, errmsg):
if errno == 11:
# EAGAIN, pause writing until next doWrite()
return
else:
log.warn("Failed to write to nagios pipe: %s" % errmsg)
self._reopen_file()
return
if len(self._data) != data_written:
self._data = self._data[data_written:]
return
else:
self._data = None
self.stopWriting() | OSError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/nagios_api.py/NagiosWriter.doWrite |
def _reopen_file(self):
"""Attempt to reopen the pipe."""
if self._timer:
if not self._timer.called:
self._timer.cancel()
self._timer = None
try:
self._open_file()
except __HOLE__, (errno, errmsg):
log.warn("Failed to reopen nagios pipe: %s" % errmsg)
self._timer = reactor.callLater(10.0, self._reopen_file)
else:
log.info("Reopened nagios pipe, resuming writes.") | OSError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/nagios_api.py/NagiosWriter._reopen_file |
def _close_file(self):
"""Close the named pipe if open"""
if self._fd is not None:
self.stopWriting()
try:
os.close(self._fd)
except __HOLE__:
pass
self._fd = None | OSError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/nagios_api.py/NagiosWriter._close_file |
def __init__(self, command_file, spool_dir):
"""Create writer and add it to the reactor.
command_file is the path to the nagios pipe
spool_dir is where to write large commands to
"""
self.spool_dir = spool_dir
# Create or cleanup the spool dir
if os.path.isdir(spool_dir):
self._cleanup_spool()
else:
assert not os.path.exists(spool_dir)
try:
os.makedirs(spool_dir)
except __HOLE__, ex:
raise errors.InitError(
"Cannot create directory %s: %s" % (spool_dir, ex))
info = os.stat(command_file)
if not stat.S_ISFIFO(info.st_mode):
raise errors.InitError(
"Command file %s is not a fifo" % command_file)
self.writer = NagiosWriter(command_file) | OSError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/nagios_api.py/NagiosCommander.__init__ |
def _cleanup_spool(self):
"""Periodically clean up old things in the spool dir.
This shouldn't normally be required but if things get screwed
up we don't want the directory to get so huge that it keeps
things slow after nagios is handling results again.
"""
# Note: It is entirely possible that the command to submit
# this file is still in the writer queue, if that's the case
# nagios will also log an error when it gets around to
# reading from the queue.
# Set the threshold to 5 minutes ago, if nagios hasn't been
# able to keep up for the past 5 minutes we have problems.
threshold = time.time() - 300
count = 0
for item in os.listdir(self.spool_dir):
path = "%s/%s" % (self.spool_dir, item)
try:
info = os.stat(path)
except:
continue
if info.st_mtime < threshold:
try:
os.unlink(path)
except __HOLE__, ex:
log.error("Failed to remove %s: %s" % (path, ex))
else:
count += 1
if count:
log.warn("Removed %d stale nagios command files" % count)
# Schedule the next cleanup to run from a thread in 1 minute
reactor.callLater(60, reactor.callInThread, self._cleanup_spool) | OSError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/nagios_api.py/NagiosCommander._cleanup_spool |
def _groupTokenizer(self, string):
string = cStringIO.StringIO(string)
lex = shlex.shlex(string, posix=True)
lex.escape = ""
lex.wordchars = (
"abcdfeghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789_.-:[]?*+^$," )
valid = lex.wordchars + "()"
while True:
try:
token = lex.get_token()
except __HOLE__, ex:
raise xmlrpc.Fault(1, "Invalid expression: %s" % ex)
if token is lex.eof:
break
if len(token) == 1 and token not in valid:
raise xmlrpc.Fault(1, "Unexpected character: %s" % token)
yield token | ValueError | dataset/ETHPy150Open marineam/nagcat/python/nagcat/nagios_api.py/NagiosXMLRPC._groupTokenizer |
def apply(self, fgraph):
tasks = defaultdict(list)
if self.max_use_ratio is not None:
max_uses = self.max_use_ratio * len(fgraph.apply_nodes)
runs = defaultdict(int)
else:
runs = None
def importer(node):
# print 'IMPORTING', node
self.backtrack(node, tasks)
def pruner(node):
try:
del tasks[node]
except __HOLE__:
pass
def chin(node, i, r, new_r):
if new_r.owner and not r.clients:
self.backtrack(new_r.owner, tasks)
# # == NOT IDEAL == #
# for node in fgraph.apply_nodes:
# importer(node)
for node in fgraph.toposort():
tasks[node].extend(lopt for track, i, lopt in self.fetch_tracks0(node.op))
u = self.attach_updater(fgraph, importer, pruner, chin)
print('KEYS', [hash(t) for t in tasks.keys()])
while tasks:
for node in tasks:
todo = tasks.pop(node)
break
for lopt in todo:
if runs is not None and runs[lopt] >= max_uses:
print('Warning: optimization exceeded its maximal use ratio: %s, %s' % (lopt, max_uses), file=sys.stderr)
continue
success = self.process_node(fgraph, node, lopt)
if success:
if runs is not None: runs[lopt] += 1
break
self.detach_updater(fgraph, u)
# def match(self, node, candidates):
# candidates[:] = [candidate
# for candidate in candidates
# if candidate.current.op is None or candidate.current.op == node.op]
# for candidate in candidates:
# if candidate.current.inputs is not None:
# for in1, in2 in zip(candidate.current.inputs, node.inputs):
# if isinstance(in1, string_types):
# candidate.match[in1] = in2
# for client in node.clients:
# op = node.op
# patterns = self.pattern_base[(depth, op)].union(self.pattern_base[(depth, WILDCARD)])
# if not patterns:
# return patterns
# return self.match(node, depth + 1).intersection(patterns)
# def backtrack(self, node, q):
# for node2, i in node.clients:
# op2 = node2.op | KeyError | dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/gof/sandbox/equilibrium.py/_EquilibriumOptimizer.apply |
def get(self, name, default=None):
"""Return a key or the default value if no value exists."""
try:
return self[name]
except __HOLE__:
return default | KeyError | dataset/ETHPy150Open IanLewis/kay/kay/utils/datastructures.py/ReadOnlyMultiMapping.get |
def __contains__(self, name):
try:
self[name]
except __HOLE__:
return False
return True | KeyError | dataset/ETHPy150Open IanLewis/kay/kay/utils/datastructures.py/ReadOnlyMultiMapping.__contains__ |
def get_standard_processors():
from django.conf import settings
global _standard_context_processors
if _standard_context_processors is None:
processors = []
collect = []
collect.extend(_builtin_context_processors)
collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS)
for path in collect:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except __HOLE__, e:
raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr))
processors.append(func)
_standard_context_processors = tuple(processors)
return _standard_context_processors | ImportError | dataset/ETHPy150Open adieu/django-nonrel/django/template/context.py/get_standard_processors |
def __init__(self, app, conf):
self.app = app
self.memcache_servers = conf.get('memcache_servers')
serialization_format = conf.get('memcache_serialization_support')
try:
# Originally, while we documented using memcache_max_connections
# we only accepted max_connections
max_conns = int(conf.get('memcache_max_connections',
conf.get('max_connections', 0)))
except __HOLE__:
max_conns = 0
memcache_options = {}
if (not self.memcache_servers
or serialization_format is None
or max_conns <= 0):
path = os.path.join(conf.get('swift_dir', '/etc/swift'),
'memcache.conf')
memcache_conf = ConfigParser()
if memcache_conf.read(path):
# if memcache.conf exists we'll start with those base options
try:
memcache_options = dict(memcache_conf.items('memcache'))
except NoSectionError:
pass
if not self.memcache_servers:
try:
self.memcache_servers = \
memcache_conf.get('memcache', 'memcache_servers')
except (NoSectionError, NoOptionError):
pass
if serialization_format is None:
try:
serialization_format = \
memcache_conf.get('memcache',
'memcache_serialization_support')
except (NoSectionError, NoOptionError):
pass
if max_conns <= 0:
try:
new_max_conns = \
memcache_conf.get('memcache',
'memcache_max_connections')
max_conns = int(new_max_conns)
except (NoSectionError, NoOptionError, ValueError):
pass
# while memcache.conf options are the base for the memcache
# middleware, if you set the same option also in the filter
# section of the proxy config it is more specific.
memcache_options.update(conf)
connect_timeout = float(memcache_options.get(
'connect_timeout', CONN_TIMEOUT))
pool_timeout = float(memcache_options.get(
'pool_timeout', POOL_TIMEOUT))
tries = int(memcache_options.get('tries', TRY_COUNT))
io_timeout = float(memcache_options.get('io_timeout', IO_TIMEOUT))
if not self.memcache_servers:
self.memcache_servers = '127.0.0.1:11211'
if max_conns <= 0:
max_conns = 2
if serialization_format is None:
serialization_format = 2
else:
serialization_format = int(serialization_format)
self.memcache = MemcacheRing(
[s.strip() for s in self.memcache_servers.split(',') if s.strip()],
connect_timeout=connect_timeout,
pool_timeout=pool_timeout,
tries=tries,
io_timeout=io_timeout,
allow_pickle=(serialization_format == 0),
allow_unpickle=(serialization_format <= 1),
max_conns=max_conns) | ValueError | dataset/ETHPy150Open openstack/swift/swift/common/middleware/memcache.py/MemcacheMiddleware.__init__ |
def setup(vim, level, output_file=None):
"""Setup logging for Deoplete
"""
global init
if init:
return
init = True
if output_file:
formatter = logging.Formatter(log_format)
handler = logging.FileHandler(filename=output_file)
handler.setFormatter(formatter)
handler.addFilter(DeopleteLogFilter(vim))
root.addHandler(handler)
level = str(level).upper()
if level not in ('DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR',
'CRITICAL', 'FATAL'):
level = 'DEBUG'
root.setLevel(getattr(logging, level))
try:
import pkg_resources
neovim_version = pkg_resources.get_distribution('neovim').version
except __HOLE__:
neovim_version = 'unknown'
log = getLogger('logging')
log.info('--- Deoplete Log Start ---')
log.info('%s, Python %s, neovim client %s',
vim.call('deoplete#util#neovim_version'),
'.'.join(map(str, sys.version_info[:3])),
neovim_version)
vim.call('deoplete#util#print_warning', 'Logging to %s' % output_file) | ImportError | dataset/ETHPy150Open Shougo/deoplete.nvim/rplugin/python3/deoplete/logger.py/setup |
def get_request_id(self):
"""
This must be called while self.lock is held.
"""
try:
return self.request_ids.popleft()
except __HOLE__:
self.highest_request_id += 1
# in_flight checks should guarantee this
assert self.highest_request_id <= self.max_request_id
return self.highest_request_id | IndexError | dataset/ETHPy150Open datastax/python-driver/cassandra/connection.py/Connection.get_request_id |
@property
def next_timeout(self):
try:
return self._queue[0][0]
except __HOLE__:
pass | IndexError | dataset/ETHPy150Open datastax/python-driver/cassandra/connection.py/TimerManager.next_timeout |
def read(self, location):
"""
Read file from disk.
"""
location = os.path.expanduser(location)
# Try to open this file, using different encodings.
for e in ENCODINGS:
try:
with codecs.open(location, 'r', e) as f:
return f.read(), e
except __HOLE__:
pass # Try next codec.
# Unable to open.
raise Exception('Unable to open file: %r' % location) | UnicodeDecodeError | dataset/ETHPy150Open jonathanslenders/pyvim/pyvim/io/backends.py/FileIO.read |
def _auto_decode(data):
"""
Decode bytes. Return a (text, encoding) tuple.
"""
assert isinstance(data, six.binary_type)
for e in ENCODINGS:
try:
return data.decode(e), e
except __HOLE__:
pass
return data.decode('utf-8', 'ignore') | UnicodeDecodeError | dataset/ETHPy150Open jonathanslenders/pyvim/pyvim/io/backends.py/_auto_decode |
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not connection.tenant or isinstance(connection.tenant, FakeTenant):
return
if not template_dirs:
try:
template_dirs = settings.MULTITENANT_TEMPLATE_DIRS
except AttributeError:
raise ImproperlyConfigured('To use %s.%s you must define the MULTITENANT_TEMPLATE_DIRS' %
(__name__, FilesystemLoader.__name__))
for template_dir in template_dirs:
try:
if '%s' in template_dir:
yield safe_join(template_dir % connection.tenant.domain_url, template_name)
else:
yield safe_join(template_dir, connection.tenant.domain_url, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except __HOLE__:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass | ValueError | dataset/ETHPy150Open bernardopires/django-tenant-schemas/tenant_schemas/template_loaders.py/FilesystemLoader.get_template_sources |
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with open(filepath, 'rb') as fp:
return (fp.read().decode(settings.FILE_CHARSET), filepath)
except __HOLE__:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg) | IOError | dataset/ETHPy150Open bernardopires/django-tenant-schemas/tenant_schemas/template_loaders.py/FilesystemLoader.load_template_source |
@attribute
def schema(self):
try:
m = self._schema
except __HOLE__:
pass
else:
return m()
self.schema = schema = datashape.dshape(self.dshape.measure)
return schema | AttributeError | dataset/ETHPy150Open blaze/blaze/blaze/expr/expressions.py/Expr.schema |
def _len(self):
try:
return int(self.dshape[0])
except __HOLE__:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape) | TypeError | dataset/ETHPy150Open blaze/blaze/blaze/expr/expressions.py/Expr._len |
def __getattr__(self, key):
assert key != '_hash', \
'%s expressions should set _hash in __init__' % type(self).__name__
try:
result = object.__getattribute__(self, key)
except __HOLE__:
fields = dict(zip(map(valid_identifier, self.fields), self.fields))
# prefer the method if there's a field with the same name
methods = toolz.merge(
schema_methods(self.dshape.measure),
dshape_methods(self.dshape)
)
if key in methods:
func = methods[key]
if func in method_properties:
result = func(self)
else:
result = boundmethod(func, self)
elif self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
raise
# cache the attribute lookup, getattr will not be invoked again.
setattr(self, key, result)
return result | AttributeError | dataset/ETHPy150Open blaze/blaze/blaze/expr/expressions.py/Expr.__getattr__ |
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except __HOLE__:
pass
return True | AttributeError | dataset/ETHPy150Open blaze/blaze/blaze/expr/expressions.py/Expr.__exit__ |
@dispatch(object)
def shape(expr):
""" Shape of expression
>>> symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except __HOLE__:
pass
return tuple(s) | TypeError | dataset/ETHPy150Open blaze/blaze/blaze/expr/expressions.py/shape |
def importBundle(org, name, data):
hdrs = { 'Content-Type': 'application/octet-stream' }
uri = '/v1/organizations/%s/apis?action=import&name=%s' \
% (org, name)
print 'Importing new application %s' % name
resp = None
try:
resp = httptools.httpCall('POST', uri, hdrs, data)
except __HOLE__, e:
print traceback.format_exc()
err_message = IOError_messages.get(e.errno)
if err_message:
print '%s uploading API Bundle!\nHINT: %s' % (e, err_message)
return -1
except Exception, e:
print traceback.format_exc()
print e
return -1
if resp.status != 200 and resp.status != 201:
message = HTTP_messages.get(resp.status)
if not message:
message = resp.read()
print 'Import failed to %s with status %i:\nHINT: %s' % (uri, resp.status, message)
return -1
deployment = json.load(resp)
revision = int(deployment['revision'])
return revision | IOError | dataset/ETHPy150Open apigee/api-platform-tools/ApigeePlatformTools/deploytools.py/importBundle |
def iter_query(query):
"""Accept a filename, stream, or string.
Returns an iterator over lines of the query."""
try:
itr = click.open_file(query).readlines()
except __HOLE__:
itr = [query]
return itr | IOError | dataset/ETHPy150Open mapbox/mapbox-cli-py/mapboxcli/scripts/geocoding.py/iter_query |
def coords_from_query(query):
"""Transform a query line into a (lng, lat) pair of coordinates."""
try:
coords = json.loads(query)
except __HOLE__:
vals = re.split(r"\,*\s*", query.strip())
coords = [float(v) for v in vals]
return tuple(coords[:2]) | ValueError | dataset/ETHPy150Open mapbox/mapbox-cli-py/mapboxcli/scripts/geocoding.py/coords_from_query |
@conftest
def find_sxx(conf):
v = conf.env
cc = None
if v['CXX']: cc = v['CXX']
elif 'CXX' in conf.environ: cc = conf.environ['CXX']
if not cc: cc = conf.find_program('c++', var='CXX')
if not cc: conf.fatal('sunc++ was not found')
cc = conf.cmd_to_list(cc)
try:
if not Utils.cmd_output(cc + ['-flags']):
conf.fatal('sunc++ %r was not found' % cc)
except __HOLE__:
conf.fatal('sunc++ -flags could not be executed')
v['CXX'] = cc
v['CXX_NAME'] = 'sun' | ValueError | dataset/ETHPy150Open appcelerator-archive/poc-nodejs-desktop/Resources/nodejs/builds/linux/node/lib/node/wafadmin/Tools/suncxx.py/find_sxx |
def __new__(cls, application, request, **kwargs):
# http://stackoverflow.com/questions/3209233/how-to-replace-an-instance-in-init-with-a-different-object
# Based on upgrade header, websocket request handler must be used
try:
if request.headers['Upgrade'].lower() == 'websocket':
return CustomWebSocketHandler(application, request, **kwargs)
except __HOLE__:
pass
return tornado.web.RequestHandler.__new__(cls, application, request, **kwargs) | KeyError | dataset/ETHPy150Open owtf/owtf/framework/http/proxy/proxy.py/ProxyHandler.__new__ |
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
Overriding is done so as to handle unknown response codes gracefully.
"""
self._status_code = status_code
if reason is not None:
self._reason = tornado.escape.native_str(reason)
else:
try:
self._reason = tornado.httputil.responses[status_code]
except __HOLE__:
self._reason = tornado.escape.native_str("Server Not Found") | KeyError | dataset/ETHPy150Open owtf/owtf/framework/http/proxy/proxy.py/ProxyHandler.set_status |
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
"""Handle all requests except the connect request.
Once ssl stream is formed between browser and proxy, the requests are then processed by this function.
"""
# The flow starts here
self.request.local_timestamp = datetime.datetime.now()
self.request.response_buffer = ''
# The requests that come through ssl streams are relative requests, so transparent proxying is required. The
# following snippet decides the url that should be passed to the async client
if self.request.uri.startswith(self.request.protocol, 0): # Normal Proxy Request.
self.request.url = self.request.uri
else: # Transparent Proxy Request.
self.request.url = self.request.protocol + "://" + self.request.host
if self.request.uri != '/': # Add uri only if needed.
self.request.url += self.request.uri
# This block here checks for already cached response and if present returns one
self.cache_handler = CacheHandler(
self.application.cache_dir,
self.request,
self.application.cookie_regex,
self.application.cookie_blacklist)
request_hash = yield tornado.gen.Task(self.cache_handler.calculate_hash)
self.cached_response = self.cache_handler.load()
if self.cached_response:
if self.cached_response.body:
self.write(self.cached_response.body)
self.finish_response(self.cached_response)
else:
# Request header cleaning
for header in ProxyHandler.restricted_request_headers:
try:
del self.request.headers[header]
except:
continue
# HTTP auth if exists
http_auth_username = None
http_auth_password = None
http_auth_mode = None
if self.application.http_auth:
host = self.request.host
# If default ports are not provided, they are added
if ':' not in self.request.host:
default_ports = {'http': '80', 'https': '443'}
if self.request.protocol in default_ports:
host = self.request.host + ':' + default_ports[self.request.protocol]
# Check if auth is provided for that host
try:
index = self.application.http_auth_hosts.index(host)
http_auth_username = self.application.http_auth_usernames[index]
http_auth_password = self.application.http_auth_passwords[index]
http_auth_mode = self.application.http_auth_modes[index]
except __HOLE__:
pass
# pycurl is needed for curl client
async_client = tornado.curl_httpclient.CurlAsyncHTTPClient()
# httprequest object is created and then passed to async client with a callback
success_response = False # is used to check the response in the botnet mode
while not success_response:
# Proxy Switching (botnet_mode) code
if self.application.proxy_manager:
proxy = self.application.proxy_manager.get_next_available_proxy()
self.application.outbound_ip = proxy["proxy"][0]
self.application.outbound_port = int(proxy["proxy"][1])
# httprequest object is created and then passed to async client with a callback
callback = None
if self.application.outbound_proxy_type == 'socks':
callback = prepare_curl_callback # socks callback function.
body = self.request.body or None
request = tornado.httpclient.HTTPRequest(
url=self.request.url,
method=self.request.method,
body=body,
headers=self.request.headers,
auth_username=http_auth_username,
auth_password=http_auth_password,
auth_mode=http_auth_mode,
follow_redirects=False,
use_gzip=True,
streaming_callback=self.handle_data_chunk,
header_callback=None,
proxy_host=self.application.outbound_ip,
proxy_port=self.application.outbound_port,
proxy_username=self.application.outbound_username,
proxy_password=self.application.outbound_password,
allow_nonstandard_methods=True,
prepare_curl_callback=callback,
validate_cert=False)
try:
response = yield tornado.gen.Task(async_client.fetch, request)
except Exception:
response = None
pass
# Request retries
for i in range(0, 3):
if (response is None) or response.code in [408, 599]:
self.request.response_buffer = ''
response = yield tornado.gen.Task(async_client.fetch, request)
else:
success_response = True
break
# Botnet mode code (proxy switching).
# Checking the status of the proxy (asynchronous).
if self.application.proxy_manager and not success_response:
proxy_check_req = tornado.httpclient.HTTPRequest(
url=self.application.proxy_manager.testing_url, # testing url is google.com.
use_gzip=True,
proxy_host=self.application.outbound_ip,
proxy_port=self.application.outbound_port,
proxy_username=self.application.outbound_username,
proxy_password=self.application.outbound_password,
prepare_curl_callback=callback, # socks callback function.
validate_cert=False)
try:
proxy_check_resp = yield tornado.gen.Task(async_client.fetch, proxy_check_req)
except Exception:
pass
if proxy_check_resp.code != 200:
self.application.proxy_manager.remove_proxy(proxy["index"])
else:
success_response = True
else:
success_response = True
self.finish_response(response)
# Cache the response after finishing the response, so caching time is not included in response time
self.cache_handler.dump(response)
###
# The following 5 methods can be handled through the above implementation.
### | ValueError | dataset/ETHPy150Open owtf/owtf/framework/http/proxy/proxy.py/ProxyHandler.get |
def store_upstream_data(self, message):
"""Save websocket data sent from client to server.
i.e add it to HTTPRequest.response_buffer with direction (>>)
"""
try: # Cannot write binary content as a string, so catch it
self.handshake_request.response_buffer += (">>> %s\r\n" % message)
except __HOLE__:
self.handshake_request.response_buffer += (">>> May be binary\r\n") | TypeError | dataset/ETHPy150Open owtf/owtf/framework/http/proxy/proxy.py/CustomWebSocketHandler.store_upstream_data |
def store_downstream_data(self, message):
"""Save websocket data sent from client to server.
i.e add it to HTTPRequest.response_buffer with direction (<<)
"""
try: # Cannot write binary content as a string, so catch it.
self.handshake_request.response_buffer += ("<<< %s\r\n" % message)
except __HOLE__:
self.handshake_request.response_buffer += ("<<< May be binary\r\n") | TypeError | dataset/ETHPy150Open owtf/owtf/framework/http/proxy/proxy.py/CustomWebSocketHandler.store_downstream_data |
def initialize(self, outbound_options=[], outbound_auth=""):
# The tornado application, which is used to pass variables to request handler
self.application = tornado.web.Application(handlers=[(r'.*', ProxyHandler)], debug=False, gzip=True,)
self.config = self.get_component("config")
self.db_config = self.get_component("db_config")
# All required variables in request handler
# Required variables are added as attributes to application, so that request handler can access these
self.application.Core = self.get_component("core")
try:
self.proxy_manager = self.get_component("proxy_manager")
except ComponentNotFoundException:
self.proxy_manager = None
self.application.proxy_manager = self.proxy_manager
# ctypes object allocated from shared memory to verify if proxy must inject probe code or not
# 'i' means ctypes type is integer, initialization value is 0
# if lock is True then a new recursive lock object is created to
# synchronize access to the value
self.application.Core.pnh_inject = Value('i', 0, lock=True)
self.application.inbound_ip = self.db_config.Get('INBOUND_PROXY_IP')
self.application.inbound_port = int(self.db_config.Get('INBOUND_PROXY_PORT'))
if self.proxy_manager:
self.instances = "1" # Botnet mode needs only one proxy process.
else:
self.instances = self.db_config.Get("INBOUND_PROXY_PROCESSES")
# Proxy CACHE
# Cache related settings, including creating required folders according to cache folder structure
self.application.cache_dir = self.db_config.Get("INBOUND_PROXY_CACHE_DIR")
# Clean possible older cache directory.
if os.path.exists(self.application.cache_dir):
FileOperations.rm_tree(self.application.cache_dir)
FileOperations.make_dirs(self.application.cache_dir)
# SSL MiTM
# SSL certs, keys and other settings (os.path.expanduser because they are stored in users home directory
# ~/.owtf/proxy)
self.application.ca_cert = os.path.expanduser(self.db_config.Get('CA_CERT'))
self.application.ca_key = os.path.expanduser(self.db_config.Get('CA_KEY'))
# To stop OWTF from breaking for our beloved users :P
try:
self.application.ca_key_pass = FileOperations.open(
os.path.expanduser(self.db_config.Get('CA_PASS_FILE')),
'r',
owtf_clean=False).read().strip()
except IOError:
self.application.ca_key_pass = "owtf" # XXX: Legacy CA key pass for older versions.
self.application.proxy_folder = os.path.dirname(self.application.ca_cert)
self.application.certs_folder = os.path.expanduser(self.db_config.Get('CERTS_FOLDER'))
try: # Ensure CA.crt and Key exist.
assert os.path.exists(self.application.ca_cert)
assert os.path.exists(self.application.ca_key)
except __HOLE__:
self.get_component("error_handler").FrameworkAbort(
"Files required for SSL MiTM are missing. Please run the install script")
try: # If certs folder missing, create that.
assert os.path.exists(self.application.certs_folder)
except AssertionError:
FileOperations.make_dirs(self.application.certs_folder)
# Blacklist (or) Whitelist Cookies
# Building cookie regex to be used for cookie filtering for caching
if self.db_config.Get('WHITELIST_COOKIES') == 'None':
cookies_list = self.db_config.Get('BLACKLIST_COOKIES').split(',')
self.application.cookie_blacklist = True
else:
cookies_list = self.db_config.Get('WHITELIST_COOKIES').split(',')
self.application.cookie_blacklist = False
if self.application.cookie_blacklist:
regex_cookies_list = [cookie + "=([^;]+;?)" for cookie in cookies_list]
else:
regex_cookies_list = ["(" + cookie + "=[^;]+;?)" for cookie in self.db_config.Get('COOKIES_LIST')]
regex_string = '|'.join(regex_cookies_list)
self.application.cookie_regex = re.compile(regex_string)
# Outbound Proxy
# Outbound proxy settings to be used inside request handler
if outbound_options:
if len(outbound_options) == 3:
self.application.outbound_proxy_type = outbound_options[0]
self.application.outbound_ip = outbound_options[1]
self.application.outbound_port = int(outbound_options[2])
else:
self.application.outbound_proxy_type = "http"
self.application.outbound_ip = outbound_options[0]
self.application.outbound_port = int(outbound_options[1])
else:
self.application.outbound_ip = None
self.application.outbound_port = None
self.application.outbound_proxy_type = None
if outbound_auth:
self.application.outbound_username, self.application.outbound_password = outbound_auth.split(":")
else:
self.application.outbound_username = None
self.application.outbound_password = None
self.server = tornado.httpserver.HTTPServer(self.application)
# server has to be a class variable, because it is used inside request handler to attach sockets for monitoring
ProxyHandler.server = self.server
# Header filters
# Restricted headers are picked from framework/config/framework_config.cfg
# These headers are removed from the response obtained from webserver, before sending it to browser
restricted_response_headers = self.config.FrameworkConfigGet("PROXY_RESTRICTED_RESPONSE_HEADERS").split(",")
ProxyHandler.restricted_response_headers = restricted_response_headers
# These headers are removed from request obtained from browser, before sending it to webserver
restricted_request_headers = self.config.FrameworkConfigGet("PROXY_RESTRICTED_REQUEST_HEADERS").split(",")
ProxyHandler.restricted_request_headers = restricted_request_headers
# HTTP Auth options
if self.db_config.Get("HTTP_AUTH_HOST") != "None":
self.application.http_auth = True
# All the variables are lists
self.application.http_auth_hosts = self.db_config.Get("HTTP_AUTH_HOST").strip().split(',')
self.application.http_auth_usernames = self.db_config.Get("HTTP_AUTH_USERNAME").strip().split(',')
self.application.http_auth_passwords = self.db_config.Get("HTTP_AUTH_PASSWORD").strip().split(',')
self.application.http_auth_modes = self.db_config.Get("HTTP_AUTH_MODE").strip().split(',')
else:
self.application.http_auth = False | AssertionError | dataset/ETHPy150Open owtf/owtf/framework/http/proxy/proxy.py/ProxyProcess.initialize |
def __init__(self, *args, **kwargs):
super(StyledLinkForm, self).__init__(*args, **kwargs)
#
# Combine object_type and object_id into a single 'int_destination'
# field Get all the objects that we want the user to be able to choose
# from.
#
# For the objects, if its not convenient to sort in the queryset, (I'm
# looking at you django-cms), then just set 'sorted=False' and we'll
# do it below, based on the sort values.
#
available_objects = []
for item in STYLEDLINK_MODELS:
if 'type' in item:
model = item['type']
else:
model = item['_cls_name']
parts = item['class_path'].rsplit('.', 1)
cls = getattr(import_module(parts[0]), parts[1])
queryset = cls.objects
if 'manager_method' in item:
queryset = getattr(queryset, item['manager_method'])()
if 'filter' in item:
for (k, v) in item['filter'].items():
try:
# Attempt to execute any callables in the filter dict.
item['filter'][k] = v()
except __HOLE__:
# OK, it wasn't a callable, so, leave it be
pass
queryset = queryset.filter(**item['filter'])
else:
if not 'manager_method' in item:
queryset = queryset.all()
if 'order_by' in item:
queryset = queryset.order_by(item['order_by'])
available_objects.append({
'model': model,
'objects': list(queryset),
})
# Now create our list of choices for the <select> field
object_choices = []
object_choices.append(("", "--", ))
for group in sorted(available_objects):
obj_list = []
for obj in group['objects']:
type_class = ContentType.objects.get_for_model(obj.__class__)
type_id = type_class.id
obj_id = obj.id
form_value = "type:%s-id:%s" % (type_id, obj_id)
display_text = str(obj)
obj_list.append((form_value, display_text))
object_choices.append(( group['model'], obj_list, ))
self.fields['int_destination'].choices = object_choices
# If there is an existing value, pre-select it
if self.instance.int_destination:
type_class = ContentType.objects.get_for_model(self.instance.int_destination.__class__)
type_id = type_class.id
obj_id = self.instance.int_destination.id
current_value = "type:%s-id:%s" % (type_id, obj_id)
self.fields['int_destination'].initial = current_value | TypeError | dataset/ETHPy150Open mkoistinen/djangocms-styledlink/djangocms_styledlink/forms.py/StyledLinkForm.__init__ |