function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def _get_max_io_ops_per_host(self, host_state, spec_obj):
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'max_io_ops_per_host')
try:
value = utils.validate_num_values(
aggregate_vals, CONF.max_io_ops_per_host, cast_to=int)
except __HOLE__ as e:
LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e)
value = CONF.max_io_ops_per_host
return value | ValueError | dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/scheduler/filters/io_ops_filter.py/AggregateIoOpsFilter._get_max_io_ops_per_host |
def isSameTree(self, p, q):
"""
dfs
:param p: TreeNode
:param q: TreeNode
:return: boolean
"""
# trivial
if not p and not q:
return True
# dfs
try:
if p.val==q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right):
return True
except __HOLE__:
return False
return False | AttributeError | dataset/ETHPy150Open algorhythms/LeetCode/100 Same Tree.py/Solution.isSameTree |
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving average
filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except __HOLE__:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result | ValueError | dataset/ETHPy150Open scipy/scipy/scipy/signal/spectral.py/_spectral_helper |
def test_js_file(err, filename, data, line=0, context=None):
"Tests a JS file by parsing and analyzing its tokens"
# Don't even try to run files bigger than 1MB.
if len(data) > 1024 * 1024:
err.warning(
err_id=("js", "skip", "didnt_even_try"),
warning="Didn't even try to validate large JS file.",
description="A very large JS file was skipped in the validation "
"process. It's over a megabyte.",
filename=filename)
return
# Set the tier to 4 (Security Tests)
if err is not None:
before_tier = err.tier
err.set_tier(3)
tree = None
get_tree = spidermonkey.get_tree
spidermonkey_path = (err and err.get_resource("SPIDERMONKEY") or
SPIDERMONKEY_INSTALLATION)
if err.get_resource("acorn") or not spidermonkey_path:
get_tree = acorn.get_tree
try:
tree = get_tree(data, err, filename, spidermonkey_path)
except __HOLE__ as exc:
warning ="JS: Unknown runtime error"
if "out of memory" in str(exc):
warning = "JS: Out of memory exception"
err.warning(
err_id=("js", "parse", "runtimeerror"),
warning=warning,
description="An error was encountered while trying to validate a "
"JS file.",
filename=filename)
if not tree:
err.metadata["ran_js_tests"] = "no;missing ast"
if err is not None:
err.set_tier(before_tier)
return
trav = traverser.Traverser(
err, filename, line, context=context or ContextGenerator(data))
trav.run(tree)
err.metadata["ran_js_tests"] = "yes"
# Reset the tier so we don't break the world
if err is not None:
err.set_tier(before_tier) | RuntimeError | dataset/ETHPy150Open mozilla/app-validator/appvalidator/testcases/scripting.py/test_js_file |
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except __HOLE__:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response | ObjectDoesNotExist | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/contrib/syndication/views.py/Feed.__call__ |
def item_link(self, item):
try:
return item.get_absolute_url()
except __HOLE__:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__) | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/contrib/syndication/views.py/Feed.item_link |
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except __HOLE__:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/contrib/syndication/views.py/Feed.__get_dynamic_attr |
def feed(request, url, feed_dict=None):
"""Provided for backwards compatibility."""
from django.contrib.syndication.feeds import Feed as LegacyFeed
import warnings
warnings.warn('The syndication feed() view is deprecated. Please use the '
'new class based view API.',
category=PendingDeprecationWarning)
if not feed_dict:
raise Http404("No feeds are registered.")
try:
slug, param = url.split('/', 1)
except ValueError:
slug, param = url, ''
try:
f = feed_dict[slug]
except __HOLE__:
raise Http404("Slug %r isn't registered." % slug)
# Backwards compatibility within the backwards compatibility;
# Feeds can be updated to be class-based, but still be deployed
# using the legacy feed view. This only works if the feed takes
# no arguments (i.e., get_object returns None). Refs #14176.
if not issubclass(f, LegacyFeed):
instance = f()
instance.feed_url = getattr(f, 'feed_url', None) or request.path
instance.title_template = f.title_template or ('feeds/%s_title.html' % slug)
instance.description_template = f.description_template or ('feeds/%s_description.html' % slug)
return instance(request)
try:
feedgen = f(slug, request).get_feed(param)
except FeedDoesNotExist:
raise Http404("Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/contrib/syndication/views.py/feed |
def get_server_name():
try:
return settings.TRANSFER_SERVER
except __HOLE__:
raise ImproperlyConfigured('Please specify settings.TRANSFER_SERVER') | AttributeError | dataset/ETHPy150Open smartfile/django-transfer/django_transfer/__init__.py/get_server_name |
def get_header_name():
server_name = get_server_name()
try:
return SERVER_HEADERS[server_name]
except __HOLE__:
raise ImproperlyConfigured('Invalid server name "%s" for '
'settings.TRANSFER_SERVER' % server_name) | KeyError | dataset/ETHPy150Open smartfile/django-transfer/django_transfer/__init__.py/get_header_name |
def get_header_value(path):
if get_server_name() == SERVER_NGINX:
try:
mappings = settings.TRANSFER_MAPPINGS
except __HOLE__:
raise ImproperlyConfigured('Please specify settings.TRANSFER_MAPPINGS')
found = False
for root, location in mappings.items():
if path.startswith(root):
found = True
path = os.path.relpath(path, root).strip('/')
path = os.path.join(location, path)
break
if not found:
raise ImproperlyConfigured('Cannot map path "%s"' % path)
return quote(path.encode('utf-8')) | AttributeError | dataset/ETHPy150Open smartfile/django-transfer/django_transfer/__init__.py/get_header_value |
def process_request(self, request):
if request.method != 'POST':
return
if not is_enabled():
return
if get_server_name() != SERVER_NGINX:
return
# Find uploads in request.POST and copy them to request.FILES. Such
# fields are expected to be named as:
# __original_field_name__[__attribute__]
# We will build a unique list of the __original_field_name__'s we find
# that contain a valid __attribute__ name.
fields = set()
for name in request.POST.keys():
field, bracket, attr = name.partition('[')
if attr in ('filename]', 'path]', 'size]', 'content_type]'):
fields.add(field)
# If we found any field names that match the expected naming scheme, we
# can now loop through the names, and try to extract the attributes.
# The original fields will be pop()ed off request.POST, to clean up.
if fields:
# We will be modifying these objects, so make them mutable.
request.POST._mutable, request.FILES._mutable = True, True
for field in fields:
# Get required fields. If these are missing, we will fail.
data = []
try:
fields = enumerate(zip(request.POST.pop('%s[filename]' % field), request.POST.pop('%s[path]' % field)))
except __HOLE__:
raise Exception('Missing required field "%s", please '
'configure mod_upload properly')
# Get optional fields. If these are missing, we will try to
# determine the value from the temporary file.
try:
content_types = dict(enumerate(request.POST.pop('%s[content_type]' % field)))
except (KeyError, ValueError):
content_types = {}
try:
sizes = dict((request.POST.pop('%s[size]' % field)))
except (KeyError, ValueError):
sizes = {}
# Iterating over possible multiple files
for i, (name, temp) in fields:
content_type = content_types[i] if i in content_types else mimetypes.guess_type(name)[0]
size = sizes[i] if i in sizes else os.path.getsize(temp)
data.append(ProxyUploadedFile(temp, name, content_type, size))
# Now add a new UploadedFile object so that the web application
# can handle these "files" that were uploaded in the same
# fashion as a regular file upload.
if not data:
continue
request.FILES.setlist(field, data)
# We are done modifying these objects, make them immutable once
# again.
request.POST._mutable, request.FILES._mutable = False, False | KeyError | dataset/ETHPy150Open smartfile/django-transfer/django_transfer/__init__.py/TransferMiddleware.process_request |
def yearFilter(queryset, querydict):
try:
year=int(querydict['year_covered'])
queryset = queryset.filter(Q(coverage_from_date__gte=date(year,1,1), coverage_to_date__lte=date(year,12,31)))
except (__HOLE__, ValueError):
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/yearFilter |
def DWDistrictFilter(queryset, querydict):
try:
district_list=querydict['districts']
# if there's no commas, it's just a single district
if district_list.find(',') < 0:
queryset = queryset.filter(district__pk=district_list)
# if there's a comma, it's a comma-delimited list
else:
district_ids = district_list.split(',')
queryset = queryset.filter(district__pk__in=district_ids)
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/DWDistrictFilter |
def candidatedistrictFilter(queryset, querydict):
try:
id=int(querydict['district'])
queryset = queryset.filter(district__pk=id)
except (__HOLE__, ValueError):
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/candidatedistrictFilter |
def districtIDFilter(queryset, querydict):
try:
id=int(querydict['pk'])
queryset = queryset.filter(pk=id)
except (__HOLE__, ValueError):
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/districtIDFilter |
def weekFilter(queryset, querydict):
try:
week=querydict['week']
# if there's no commas, it's just a single district
if week.upper() == "NOW":
queryset = queryset.filter(cycle_week_number = get_week_number(date.today()) )
if week.upper() == "LAST":
queryset = queryset.filter(cycle_week_number = get_week_number(date.today())-1 )
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/weekFilter |
def periodTypeFilter(queryset, querydict):
try:
period_type=querydict['period_type']
if period_type.startswith('Q'):
if period_type == 'Q1':
queryset = queryset.filter(coverage_from_date__month=1, coverage_from_date__day=1, coverage_to_date__month=3, coverage_to_date__day=31)
elif period_type == 'Q2':
queryset = queryset.filter(coverage_from_date__month=4, coverage_from_date__day=1, coverage_to_date__month=6, coverage_to_date__day=30)
elif period_type == 'Q3':
queryset = queryset.filter(coverage_from_date__month=7, coverage_from_date__day=1, coverage_to_date__month=9, coverage_to_date__day=30)
elif period_type == 'Q4':
queryset = queryset.filter(coverage_from_date__month=10, coverage_from_date__day=1, coverage_to_date__month=12, coverage_to_date__day=31)
elif period_type.startswith('M'):
if period_type == 'M1':
queryset = queryset.filter(coverage_from_date__month=1, coverage_from_date__day=1, coverage_to_date__month=1, coverage_to_date__day=31)
elif period_type == 'M2':
# leap years!
queryset = queryset.filter(Q(coverage_from_date__month=2, coverage_from_date__day=1, coverage_to_date__month=2, coverage_to_date__day=28)|Q(coverage_from_date__month=2, coverage_from_date__day=1, coverage_to_date__month=2, coverage_to_date__day=29))
elif period_type == 'M3':
queryset = queryset.filter(coverage_from_date__month=3, coverage_from_date__day=1, coverage_to_date__month=3, coverage_to_date__day=31)
elif period_type == 'M4':
queryset = queryset.filter(coverage_from_date__month=4, coverage_from_date__day=1, coverage_to_date__month=4, coverage_to_date__day=30)
elif period_type == 'M5':
queryset = queryset.filter(coverage_from_date__month=5, coverage_from_date__day=1, coverage_to_date__month=5, coverage_to_date__day=31)
elif period_type == 'M6':
queryset = queryset.filter(coverage_from_date__month=6, coverage_from_date__day=1, coverage_to_date__month=6, coverage_to_date__day=30)
elif period_type == 'M7':
queryset = queryset.filter(coverage_from_date__month=7, coverage_from_date__day=1, coverage_to_date__month=7, coverage_to_date__day=31)
elif period_type == 'M8':
queryset = queryset.filter(coverage_from_date__month=8, coverage_from_date__day=1, coverage_to_date__month=8, coverage_to_date__day=31)
elif period_type == 'M9':
queryset = queryset.filter(coverage_from_date__month=9, coverage_from_date__day=1, coverage_to_date__month=9, coverage_to_date__day=30)
elif period_type == 'M10':
queryset = queryset.filter(coverage_from_date__month=10, coverage_from_date__day=1, coverage_to_date__month=10, coverage_to_date__day=31)
elif period_type == 'M11':
queryset = queryset.filter(coverage_from_date__month=11, coverage_from_date__day=1, coverage_to_date__month=11, coverage_to_date__day=30)
elif period_type == 'M12':
queryset = queryset.filter(coverage_from_date__month=12, coverage_from_date__day=1, coverage_to_date__month=12, coverage_to_date__day=31)
elif period_type == 'PRE':
queryset = queryset.filter(Q(coverage_from_date=date(2014,10,1), coverage_to_date=date(2014,10,15)))
elif period_type == 'POS':
queryset = queryset.filter(Q(coverage_from_date=date(2014,10,16), coverage_to_date=date(2014,11,24)))
elif period_type == 'EOY':
queryset = queryset.filter(coverage_to_date=date(2014,12,31))
elif period_type.startswith('S'):
if period_type == 'S1':
queryset = queryset.filter(coverage_from_date__month=1, coverage_from_date__day=1, coverage_to_date__month=6, coverage_to_date__day=30)
elif period_type == 'S2':
queryset = queryset.filter(coverage_from_date__month=7, coverage_from_date__day=1, coverage_to_date__month=12, coverage_to_date__day=31)
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/periodTypeFilter |
def reportTypeFilter(queryset, querydict):
try:
report_type=querydict['report_type']
if report_type == 'monthly':
queryset = queryset.filter(form_type__in=['F3X', 'F3XN', 'F3XA', 'F3', 'F3A', 'F3N', 'F3P', 'F3PA', 'F3PN'])
elif report_type == 'ies':
queryset = queryset.filter(form_type__in=['F5', 'F5N', 'F5A', 'F24', 'F24A', 'F24N'])
elif report_type == 'F6':
queryset = queryset.filter(form_type__in=['F6', 'F6N', 'F6A'])
elif report_type == 'F9':
queryset = queryset.filter(form_type__in=['F9', 'F9N', 'F9A'])
elif report_type == 'F2':
queryset = queryset.filter(form_type__in=['F2', 'F2N', 'F2A'])
elif report_type == 'F1':
queryset = queryset.filter(form_type__in=['F1', 'F1N', 'F1A'])
elif report_type == 'F13':
queryset = queryset.filter(form_type__in=['F13', 'F13N', 'F13A'])
elif report_type == 'F4':
queryset = queryset.filter(form_type__in=['F4', 'F4N', 'F4A'])
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/reportTypeFilter |
def orderingFilter(queryset, querydict, fields):
"""
Only works if the ordering hasn't already been set. Which it hasn't, but...
"""
try:
ordering=querydict['ordering']
if ordering.lstrip('-') in fields:
orderlist = [ordering]
queryset = queryset.order_by(*orderlist)
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/orderingFilter |
def committeeSearchSlow(queryset, querydict):
"""
Table scan--maybe some sorta dropdown in front of this?
"""
try:
search_term = querydict['search_term']
queryset = queryset.filter(committee_name__icontains=search_term)
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/committeeSearchSlow |
def candidateidSearch(queryset, querydict):
try:
candidate_id = querydict['candidate_id']
authorized_committee_list = Authorized_Candidate_Committees.objects.filter(candidate_id=candidate_id)
committee_list = [x.get('committee_id') for x in authorized_committee_list.values('committee_id')]
queryset = queryset.filter(fec_id__in=committee_list)
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/candidateidSearch |
def filingTimeFilter(queryset, querydict):
try:
time_range=querydict['time_range']
if time_range == 'day':
today = date.today()
queryset = queryset.filter(filed_date=today)
elif time_range == 'week':
today = date.today()
one_week_ago = today-timedelta(days=7)
queryset = queryset.filter(filed_date__gte=one_week_ago)
elif time_range == '2014_cycle':
queryset = queryset.filter(filed_date__gte=date(2013,1,1), filed_date__lte=date(2014,12,31))
elif time_range == '2016_cycle':
queryset = queryset.filter(filed_date__gte=date(2015,1,1), filed_date__lte=date(2016,12,31))
except __HOLE__:
pass
return queryset
# create a phony keyword committee class that is just a list of committee types allowed
# so superpacs would be 'UO' -- but if we wanted to included hybrids, it would be 'UOVW' | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/filingTimeFilter |
def multiCommitteeTypeFilter(queryset, querydict):
try:
committee_class = querydict['committee_class']
committee_class = committee_class.upper()
if committee_class == 'J':
queryset = queryset.filter(committee_designation=committee_class)
elif committee_class == 'L':
# a D commmittee type is a delegate, so use L instead.
queryset = queryset.filter(committee_designation='D')
else:
committee_type_list = list(committee_class)
queryset = queryset.filter(committee_type__in=committee_type_list)
except __HOLE__:
pass
return queryset
# variant with different name for committee type. | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/multiCommitteeTypeFilter |
def multiCTypeFilter(queryset, querydict):
try:
committee_class = querydict['committee_class']
committee_class = committee_class.upper()
if committee_class == 'J':
queryset = queryset.filter(designation=committee_class)
elif committee_class == 'L':
# a D commmittee type is a delegate, so use L instead.
queryset = queryset.filter(designation='D')
else:
committee_type_list = list(committee_class)
queryset = queryset.filter(ctype__in=committee_type_list)
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/multiCTypeFilter |
def candidateCommitteeSearchSlow(queryset, querydict):
"""
Table scan--maybe some sorta dropdown in front of this?
"""
try:
search_term = querydict['search_term']
queryset = queryset.filter(Q(name__icontains=search_term)|Q(curated_candidate__name__icontains=search_term))
except __HOLE__:
pass
return queryset | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/api/filters.py/candidateCommitteeSearchSlow |
@staticmethod
def from_bundle(bundle_path):
"""Collect run data from two CSV files."""
run_data = RunData(None)
# load runs
runs_csv_path = os.path.join(bundle_path, "all_runs.csv.gz")
logger.info("reading run data from %s", runs_csv_path)
solver_names = set()
with borg.util.openz(runs_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
columns = csv_reader.next()
if columns[:5] != ["instance", "solver", "budget", "cost", "succeeded"]:
raise Exception("unexpected columns in run data CSV file")
for (instance, solver, budget_str, cost_str, succeeded_str) in csv_reader:
run_data.add_run(
instance,
RunRecord(
solver,
float(budget_str),
float(cost_str),
succeeded_str.lower() == "true",
),
)
solver_names.add(solver)
run_data.solver_names = sorted(solver_names)
# load features
features_csv_path = os.path.join(bundle_path, "all_features.csv.gz")
logger.info("reading feature data from %s", features_csv_path)
with borg.util.openz(features_csv_path) as csv_file:
csv_reader = csv.reader(csv_file)
try:
columns = csv_reader.next()
except __HOLE__:
pass
else:
if columns[0] != "instance":
raise Exception("unexpected columns in features CSV file")
for row in csv_reader:
feature_dict = dict(zip(columns[1:], map(float, row[1:])))
run_data.add_feature_vector(row[0], feature_dict)
assert set(run_data.run_lists) == set(run_data.feature_vectors)
return run_data | StopIteration | dataset/ETHPy150Open borg-project/borg/borg/storage.py/RunData.from_bundle |
def _import_svn():
global fs, repos, core, delta, _kindmap, _svn_uri_canonicalize
from svn import fs, repos, core, delta
_kindmap = {core.svn_node_dir: Node.DIRECTORY,
core.svn_node_file: Node.FILE}
try:
_svn_uri_canonicalize = core.svn_uri_canonicalize # Subversion 1.7+
except __HOLE__:
_svn_uri_canonicalize = lambda v: v
# Protect svn.core methods from GC
Pool.apr_pool_clear = staticmethod(core.apr_pool_clear)
Pool.apr_pool_destroy = staticmethod(core.apr_pool_destroy) | AttributeError | dataset/ETHPy150Open edgewall/trac/tracopt/versioncontrol/svn/svn_fs.py/_import_svn |
def __init__(self):
self._version = None
try:
_import_svn()
except __HOLE__ as e:
self.error = e
self.log.info('Failed to load Subversion bindings', exc_info=True)
else:
self.log.debug("Subversion bindings imported")
version = (core.SVN_VER_MAJOR, core.SVN_VER_MINOR,
core.SVN_VER_MICRO)
self._version = '%d.%d.%d' % version + core.SVN_VER_TAG
if version[0] < 1:
self.error = _("Subversion >= 1.0 required, found %(version)s",
version=self._version)
Pool()
# ISystemInfoProvider methods | ImportError | dataset/ETHPy150Open edgewall/trac/tracopt/versioncontrol/svn/svn_fs.py/SubversionConnector.__init__ |
def normalize_rev(self, rev):
"""Take any revision specification and produce a revision suitable
for the rest of the API
"""
if rev is None or isinstance(rev, basestring) and \
rev.lower() in ('', 'head', 'latest', 'youngest'):
return self.youngest_rev
else:
try:
rev = int(rev)
if 0 <= rev <= self.youngest_rev:
return rev
except (ValueError, __HOLE__):
pass
raise NoSuchChangeset(rev) | TypeError | dataset/ETHPy150Open edgewall/trac/tracopt/versioncontrol/svn/svn_fs.py/SubversionRepository.normalize_rev |
def get_annotations(self):
"""Return a list the last changed revision for each line.
(wraps ``client.blame2``)
"""
annotations = []
if self.isfile:
def blame_receiver(line_no, revision, author, date, line, pool):
annotations.append(revision)
try:
rev = _svn_rev(self.rev)
start = _svn_rev(0)
file_url_utf8 = posixpath.join(self.repos.ra_url_utf8,
quote(self._scoped_path_utf8))
# svn_client_blame2() requires a canonical uri since
# Subversion 1.7 (#11167)
file_url_utf8 = _svn_uri_canonicalize(file_url_utf8)
self.repos.log.info('opening ra_local session to %r',
file_url_utf8)
from svn import client
client.blame2(file_url_utf8, rev, start, rev, blame_receiver,
client.create_context(), self.pool())
except (core.SubversionException, __HOLE__) as e:
# svn thinks file is a binary or blame not supported
raise TracError(_('svn blame failed on %(path)s: %(error)s',
path=self.path, error=to_unicode(e)))
return annotations
# def get_previous(self):
# # FIXME: redo it with fs.node_history | AttributeError | dataset/ETHPy150Open edgewall/trac/tracopt/versioncontrol/svn/svn_fs.py/SubversionNode.get_annotations |
@classmethod
def from_json(cls, json_thing):
"""
Given a JSON object or JSON string that was created by `self.to_json`,
return the corresponding markovify.Chain.
"""
# Python3 compatibility
try:
basestring
except __HOLE__:
basestring = str
if isinstance(json_thing, basestring):
obj = json.loads(json_thing)
else:
obj = json_thing
if isinstance(obj, list):
rehydrated = dict((tuple(item[0]), item[1]) for item in obj)
elif isinstance(obj, dict):
rehydrated = obj
else:
raise ValueError("Object should be dict or list")
state_size = len(list(rehydrated.keys())[0])
inst = cls(None, state_size, rehydrated)
return inst | NameError | dataset/ETHPy150Open jsvine/markovify/markovify/chain.py/Chain.from_json |
def __getattr__(self, name):
# Now that we've intercepted __getattr__, we can't access our own
# attributes directly. Use __getattribute__ to access them.
obj = self.__getattribute__('_wrapped')
exceptions = self.__getattribute__('_exceptions')
if (obj is None) or (name in exceptions):
self.__getattribute__(name)
else:
try:
attr = getattr(obj, name)
if isinstance(obj, MethodType):
return new.instancemethod(attr.im_func, self, obj.__class__)
else:
return attr
except __HOLE__:
# Recast error message as being from this class.
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name)) | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/grizzled/grizzled/proxy.py/Forwarder.__getattr__ |
def divide_gaussians(mean_precision_num, mean_precision_den):
"""
mean_precision_num are parameters of gaussian in the numerator
mean_precision_den are parameters of gaussian in the denominator
output is a valid gaussian only if the variance of ouput is non-negative
"""
precision_op = mean_precision_num[1] - mean_precision_den[1]
try:
assert precision_op >= 0. # making it > so that mean_op is not inf
except __HOLE__:
print 'inputs = %s, %s' % (mean_precision_num, mean_precision_den)
print 'precision_op = %s' % (precision_op)
raise AssertionError
if precision_op == 0.:
mean_op = 0.
else:
mean_op = (mean_precision_num[0] * mean_precision_num[1] \
- mean_precision_den[0] * mean_precision_den[1] ) / precision_op
return np.array([mean_op, precision_op]) | AssertionError | dataset/ETHPy150Open balajiln/mondrianforest/src/utils.py/divide_gaussians |
def assert_no_nan(mat, name='matrix'):
try:
assert(not any(np.isnan(mat)))
except __HOLE__:
print '%s contains NaN' % name
print mat
raise AssertionError | AssertionError | dataset/ETHPy150Open balajiln/mondrianforest/src/utils.py/assert_no_nan |
def check_if_one(val):
try:
assert(np.abs(val - 1) < 1e-9)
except __HOLE__:
print 'val = %s (needs to be equal to 1)' % val
raise AssertionError | AssertionError | dataset/ETHPy150Open balajiln/mondrianforest/src/utils.py/check_if_one |
def check_if_zero(val):
try:
assert(np.abs(val) < 1e-9)
except __HOLE__:
print 'val = %s (needs to be equal to 0)' % val
raise AssertionError | AssertionError | dataset/ETHPy150Open balajiln/mondrianforest/src/utils.py/check_if_zero |
def sample_multinomial(prob):
try:
k = int(np.where(np.random.multinomial(1, prob, size=1)[0]==1)[0])
except __HOLE__:
print 'problem in sample_multinomial: prob = '
print prob
raise TypeError
except:
raise Exception
return k | TypeError | dataset/ETHPy150Open balajiln/mondrianforest/src/utils.py/sample_multinomial |
def main():
"""
Sends an API AT command to read the lower-order address bits from
an XBee Series 1 and looks for a response
"""
try:
# Open serial port
ser = serial.Serial('/dev/ttyUSB0', 9600)
# Create XBee Series 1 object
xbee = XBee(ser)
# Send AT packet
xbee.send('at', frame_id='A', command='DH')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='B', command='DL')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='C', command='MY')
# Wait for response
response = xbee.wait_read_frame()
print response
# Send AT packet
xbee.send('at', frame_id='D', command='CE')
# Wait for response
response = xbee.wait_read_frame()
print response
except __HOLE__:
pass
finally:
ser.close() | KeyboardInterrupt | dataset/ETHPy150Open thom-nic/python-xbee/examples/serial_example_series_1.py/main |
def createQuestionWindow(self):
""" Create a question window (well, duh) """
try:
questionArray = self.questionBank[self.questionPointer]
questionArrayLength = len(questionArray) - 3
#### QUESTION WINDOW FONTS
defaultfont = tkFont.Font(root=self.master, name="defaultfont", family="pixelgamefont", size=14)
titlefont = tkFont.Font(root=self.master, name="titlefont", family="pixelmix", size=14)
monaco12 = tkFont.Font(root=self.master, name="monaco12", family="monaco", size=12)
#### INITIALIZE FRAME OF LABELS WITHIN SELF.CANVAS
self.fr = Frame(bg='bisque', borderwidth=10, relief='ridge')
self.canvas.create_window(650, 300, window=self.fr, tags='window')
self.questionLabel = Label(self.fr, text=questionArray[0], bg="bisque", fg='#9152a1', font='titlefont', wraplength=800)
self.questionLabel.grid(row=0, columnspan=2, padx=10, pady=10, ipadx=2, ipady=2)
self.codeLabel = Label(self.fr, text=questionArray[1], fg='white', bg="#3c3c3c", font='monaco12', anchor=W, justify=LEFT, padx=10)
self.codeLabel.grid(row=1, columnspan=2, padx=10, pady=10, ipadx=0, ipady=10)
self.sLabel = Label(self.fr, text="S: " + str(questionArray[2]), bg="bisque", font='defaultfont', fg='#4e793f', wraplength=250, anchor=N)
self.sLabel.grid(row=2, column=0, padx=10, pady=10, ipadx=2, ipady=2)
self.kLabel = Label(self.fr, text="K: " + str(questionArray[3]), bg="bisque", font='defaultfont', fg='#4e793f', wraplength=250, anchor=N)
self.kLabel.grid(row=2, column=1, padx=10, pady=10, ipadx=2, ipady=2)
# questions may have 2-5 choices
if questionArrayLength > 2:
self.xLabel = Label(self.fr, text="X: " + str(questionArray[4]), bg="bisque", font='defaultfont', fg='#4e793f', wraplength=250, anchor=N)
self.xLabel.grid(row=3, column=0, padx=10, pady=10, ipadx=2, ipady=2)
if questionArrayLength > 3:
self.mLabel = Label(self.fr, text="M: " + str(questionArray[5]), bg="bisque", font='defaultfont', fg='#4e793f', wraplength=250, anchor=N)
self.mLabel.grid(row=3, column=1, padx=10, pady=10, ipadx=2, ipady=2)
if questionArrayLength > 4:
self.spaceLabel = Label(self.fr, text="SPACE: " + str(questionArray[6]), bg="bisque", font='defaultfont', fg='#4e793f', wraplength=250, anchor=N)
self.spaceLabel.grid(row=4, column=0, columnspan=2, padx=10, pady=10, ipadx=2, ipady=2)
pass
except __HOLE__:
# reached the end of level-specific self.questionBank
# pokemon player has boosted enough to get to the finish line
# you can pop up something like "your {SUICUNE} IS CLOSING IN!!!""
# awesome! full speed ahead!
# HOME RUN!!
# this is the final stretch!
# your {pokemon} is racing towards the finish line!
print "passing"
pass | KeyError | dataset/ETHPy150Open christabella/pokepython/gamelib/gui.py/applicationGUI.createQuestionWindow |
def write_error(self, status_code, **kwargs):
try:
log_message = kwargs.get('exc_info')[1].log_message
except (TypeError, AttributeError, __HOLE__):
log_message = 'unknown reason'
self.finish(log_message+'\r\n') | IndexError | dataset/ETHPy150Open EnigmaCurry/curlbomb/curlbomb/server.py/CurlbombBaseRequestHandler.write_error |
def run_server(settings):
settings['state'] = {'num_gets': 0, 'num_posts': 0, 'num_posts_in_progress': 0}
curlbomb_args = dict(
resource=settings['resource'],
state=settings['state'],
allowed_gets=settings['num_gets'],
knock=settings['knock'],
mime_type=settings['mime_type'],
allow_post_backs=settings['receive_postbacks'],
log_post_backs=settings['log_post_backs'],
log_file=settings['log_file'],
get_callback=settings.get('get_callback', None)
)
unwrapped_script = settings['get_curlbomb_command'](settings, unwrapped=True)
if not settings['client_quiet'] and settings['time_command']:
unwrapped_script = "time "+unwrapped_script
app = tornado.web.Application(
[
(r"/", CurlbombResourceWrapperRequestHandler,
dict(curlbomb_command=unwrapped_script)),
(r"/r", CurlbombResourceRequestHandler, curlbomb_args),
(r"/s", CurlbombStreamRequestHandler, curlbomb_args)
], default_handler_class=ErrorRequestHandler
)
global httpd
httpd = app.listen(
settings['port'],
ssl_options=settings['ssl_context'],
max_buffer_size=1024E9)
## Start SSH tunnel if requested:
httpd.ssh_conn = None
if settings['ssh']:
if settings['ssl'] is False:
log.warn("Using --ssh without --ssl is probably not a great idea")
httpd.ssh_conn = SSHRemoteForward(
settings['ssh_host'], settings['ssh_forward'], settings['ssh_port'])
httpd.ssh_conn.start()
if not httpd.ssh_conn.wait_connected():
log.error(httpd.ssh_conn.last_msg)
sys.exit(1)
cmd = settings['get_curlbomb_command'](settings)
if not settings['quiet']:
if settings['stdout'].isatty():
sys.stderr.write("Paste this command on the client:\n")
sys.stderr.write("\n")
sys.stderr.write(" {}\n".format(cmd))
sys.stderr.write("\n")
else:
sys.stderr.write("{}\n".format(cmd))
# Disable port forward checker for now. Good idea, but it doesn't work reliably.
#
# if settings['ssh'] and not settings['args'].domain:
# "Check the SSH forward works"
# def check_port_forward(timeout=5):
# time.sleep(5)
# try:
# url = "http{ssl}://{host}:{port}".format(
# ssl="s" if settings['ssl'] is not False else "",
# host=settings['display_host'],
# port=settings['display_port'])
# log.info("Testing port forward is functioning properly - {}".format(url))
# r = requests.head(url, timeout=timeout)
# except (requests.ConnectionError, requests.exceptions.ReadTimeout):
# log.warn("Could not contact server throuh SSH forward. You may need to check your sshd_config and enable 'GatwayPorts clientspecified'")
# _thread.start_new_thread(check_port_forward, ())
try:
log.debug("server ready")
tornado.ioloop.IOLoop.current().start()
except __HOLE__:
if settings['verbose']:
traceback.print_exc()
finally:
httpd.stop()
if httpd.ssh_conn is not None:
httpd.ssh_conn.kill()
settings['resource'].close()
if settings['log_process']:
settings['log_file'].close()
settings['log_process'].wait()
log.info("run_server done")
return settings.get('return_code', 0) | KeyboardInterrupt | dataset/ETHPy150Open EnigmaCurry/curlbomb/curlbomb/server.py/run_server |
def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun stdout."""
with safe_open(self._ng_stdout, 'r') as ng_stdout:
start_time = time.time()
while 1:
readable, _, _ = select.select([ng_stdout], [], [], self._SELECT_WAIT)
if readable:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except __HOLE__:
pass
if (time.time() - start_time) > timeout:
raise NailgunClient.NailgunError(
'Failed to read nailgun output after {sec} seconds!'.format(sec=timeout)) | AttributeError | dataset/ETHPy150Open pantsbuild/pants/src/python/pants/java/nailgun_executor.py/NailgunExecutor._await_socket |
def __init__(self, formats=None, content_types=None, datetime_formatting=None):
self.supported_formats = []
self.datetime_formatting = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
if formats is not None:
self.formats = formats
if content_types is not None:
self.content_types = content_types
if datetime_formatting is not None:
self.datetime_formatting = datetime_formatting
for format in self.formats:
try:
self.supported_formats.append(self.content_types[format])
except __HOLE__:
raise ImproperlyConfigured("Content type for specified type '%s' not found. Please provide it at either the class level or via the arguments." % format) | KeyError | dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-tastypie/tastypie/serializers.py/Serializer.__init__ |
def get_mime_for_format(self, format):
"""
Given a format, attempts to determine the correct MIME type.
If not available on the current ``Serializer``, returns
``application/json`` by default.
"""
try:
return self.content_types[format]
except __HOLE__:
return 'application/json' | KeyError | dataset/ETHPy150Open mozilla/inventory/vendor-local/src/django-tastypie/tastypie/serializers.py/Serializer.get_mime_for_format |
def AskUser(text, choices=None):
"""Ask the user a question.
@param text: the question to ask
@param choices: list with elements tuples (input_char, return_value,
description); if not given, it will default to: [('y', True,
'Perform the operation'), ('n', False, 'Do no do the operation')];
note that the '?' char is reserved for help
@return: one of the return values from the choices list; if input is
not possible (i.e. not running with a tty, we return the last
entry from the list
"""
if choices is None:
choices = [("y", True, "Perform the operation"),
("n", False, "Do not perform the operation")]
if not choices or not isinstance(choices, list):
raise errors.ProgrammerError("Invalid choices argument to AskUser")
for entry in choices:
if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
raise errors.ProgrammerError("Invalid choices element to AskUser")
answer = choices[-1][1]
new_text = []
for line in text.splitlines():
new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
text = "\n".join(new_text)
try:
f = file("/dev/tty", "a+")
except __HOLE__:
return answer
try:
chars = [entry[0] for entry in choices]
chars[-1] = "[%s]" % chars[-1]
chars.append("?")
maps = dict([(entry[0], entry[1]) for entry in choices])
while True:
f.write(text)
f.write("\n")
f.write("/".join(chars))
f.write(": ")
line = f.readline(2).strip().lower()
if line in maps:
answer = maps[line]
break
elif line == "?":
for entry in choices:
f.write(" %s - %s\n" % (entry[0], entry[2]))
f.write("\n")
continue
finally:
f.close()
return answer | IOError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/AskUser |
def GenericMain(commands, override=None, aliases=None,
env_override=frozenset()):
"""Generic main function for all the gnt-* commands.
@param commands: a dictionary with a special structure, see the design doc
for command line handling.
@param override: if not None, we expect a dictionary with keys that will
override command line options; this can be used to pass
options from the scripts to generic functions
@param aliases: dictionary with command aliases {'alias': 'target, ...}
@param env_override: list of environment names which are allowed to submit
default args for commands
"""
# save the program name and the entire command line for later logging
if sys.argv:
binary = os.path.basename(sys.argv[0])
if not binary:
binary = sys.argv[0]
if len(sys.argv) >= 2:
logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
else:
logname = binary
cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
else:
binary = "<unknown program>"
cmdline = "<unknown>"
if aliases is None:
aliases = {}
try:
(func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
env_override)
except _ShowVersion:
ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
constants.RELEASE_VERSION)
return constants.EXIT_SUCCESS
except _ShowUsage, err:
for line in _FormatUsage(binary, commands):
ToStdout(line)
if err.exit_error:
return constants.EXIT_FAILURE
else:
return constants.EXIT_SUCCESS
except errors.ParameterError, err:
result, err_msg = FormatError(err)
ToStderr(err_msg)
return 1
if func is None: # parse error
return 1
if override is not None:
for key, val in override.iteritems():
setattr(options, key, val)
utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
stderr_logging=True)
logging.debug("Command line: %s", cmdline)
try:
result = func(options, args)
except (errors.GenericError, rpcerr.ProtocolError,
JobSubmittedException), err:
result, err_msg = FormatError(err)
logging.exception("Error during command processing")
ToStderr(err_msg)
except KeyboardInterrupt:
result = constants.EXIT_FAILURE
ToStderr("Aborted. Note that if the operation created any jobs, they"
" might have been submitted and"
" will continue to run in the background.")
except __HOLE__, err:
if err.errno == errno.EPIPE:
# our terminal went away, we'll exit
sys.exit(constants.EXIT_FAILURE)
else:
raise
return result | IOError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/GenericMain |
def ParseNicOption(optvalue):
"""Parses the value of the --net option(s).
"""
try:
nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
except (TypeError, __HOLE__), err:
raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
errors.ECODE_INVAL)
nics = [{}] * nic_max
for nidx, ndict in optvalue:
nidx = int(nidx)
if not isinstance(ndict, dict):
raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
" got %s" % (nidx, ndict), errors.ECODE_INVAL)
utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
nics[nidx] = ndict
return nics | ValueError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/ParseNicOption |
def FixHvParams(hvparams):
# In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
# comma to space because commas cannot be accepted on the command line
# (they already act as the separator between different hvparams). Still,
# RAPI should be able to accept commas for backwards compatibility.
# Therefore, we convert spaces into commas here, and we keep the old
# parsing logic everywhere else.
try:
new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
hvparams[constants.HV_USB_DEVICES] = new_usb_devices
except __HOLE__:
#No usb_devices, no modification required
pass | KeyError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/FixHvParams |
def GenericInstanceCreate(mode, opts, args):
"""Add an instance to the cluster via either creation or import.
@param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the new instance name
@rtype: int
@return: the desired exit code
"""
instance = args[0]
forthcoming = opts.ensure_value("forthcoming", False)
commit = opts.ensure_value("commit", False)
if forthcoming and commit:
raise errors.OpPrereqError("Creating an instance only forthcoming and"
" commiting it are mutally exclusive",
errors.ECODE_INVAL)
(pnode, snode) = SplitNodeOption(opts.node)
hypervisor = None
hvparams = {}
if opts.hypervisor:
hypervisor, hvparams = opts.hypervisor
if opts.nics:
nics = ParseNicOption(opts.nics)
elif opts.no_nics:
# no nics
nics = []
elif mode == constants.INSTANCE_CREATE:
# default of one nic, all auto
nics = [{}]
else:
# mode == import
nics = []
if opts.disk_template == constants.DT_DISKLESS:
if opts.disks or opts.sd_size is not None:
raise errors.OpPrereqError("Diskless instance but disk"
" information passed", errors.ECODE_INVAL)
disks = []
else:
if (not opts.disks and not opts.sd_size
and mode == constants.INSTANCE_CREATE):
raise errors.OpPrereqError("No disk information specified",
errors.ECODE_INVAL)
if opts.disks and opts.sd_size is not None:
raise errors.OpPrereqError("Please use either the '--disk' or"
" '-s' option", errors.ECODE_INVAL)
if opts.sd_size is not None:
opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
if opts.disks:
try:
disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
except __HOLE__, err:
raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
errors.ECODE_INVAL)
disks = [{}] * disk_max
else:
disks = []
for didx, ddict in opts.disks:
didx = int(didx)
if not isinstance(ddict, dict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
elif constants.IDISK_SIZE in ddict:
if constants.IDISK_ADOPT in ddict:
raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
" (disk %d)" % didx, errors.ECODE_INVAL)
try:
ddict[constants.IDISK_SIZE] = \
utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
(didx, err), errors.ECODE_INVAL)
elif constants.IDISK_ADOPT in ddict:
if constants.IDISK_SPINDLES in ddict:
raise errors.OpPrereqError("spindles is not a valid option when"
" adopting a disk", errors.ECODE_INVAL)
if mode == constants.INSTANCE_IMPORT:
raise errors.OpPrereqError("Disk adoption not allowed for instance"
" import", errors.ECODE_INVAL)
ddict[constants.IDISK_SIZE] = 0
else:
raise errors.OpPrereqError("Missing size or adoption source for"
" disk %d" % didx, errors.ECODE_INVAL)
if constants.IDISK_SPINDLES in ddict:
ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
disks[didx] = ddict
if opts.tags is not None:
tags = opts.tags.split(",")
else:
tags = []
utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
FixHvParams(hvparams)
osparams_private = opts.osparams_private or serializer.PrivateDict()
osparams_secret = opts.osparams_secret or serializer.PrivateDict()
helper_startup_timeout = opts.helper_startup_timeout
helper_shutdown_timeout = opts.helper_shutdown_timeout
if mode == constants.INSTANCE_CREATE:
start = opts.start
os_type = opts.os
force_variant = opts.force_variant
src_node = None
src_path = None
no_install = opts.no_install
identify_defaults = False
compress = constants.IEC_NONE
if opts.instance_communication is None:
instance_communication = False
else:
instance_communication = opts.instance_communication
elif mode == constants.INSTANCE_IMPORT:
if forthcoming:
raise errors.OpPrereqError("forthcoming instances can only be created,"
" not imported")
start = False
os_type = None
force_variant = False
src_node = opts.src_node
src_path = opts.src_dir
no_install = None
identify_defaults = opts.identify_defaults
compress = opts.compress
instance_communication = False
else:
raise errors.ProgrammerError("Invalid creation mode %s" % mode)
op = opcodes.OpInstanceCreate(
forthcoming=forthcoming,
commit=commit,
instance_name=instance,
disks=disks,
disk_template=opts.disk_template,
group_name=opts.nodegroup,
nics=nics,
conflicts_check=opts.conflicts_check,
pnode=pnode, snode=snode,
ip_check=opts.ip_check,
name_check=opts.name_check,
wait_for_sync=opts.wait_for_sync,
file_storage_dir=opts.file_storage_dir,
file_driver=opts.file_driver,
iallocator=opts.iallocator,
hypervisor=hypervisor,
hvparams=hvparams,
beparams=opts.beparams,
osparams=opts.osparams,
osparams_private=osparams_private,
osparams_secret=osparams_secret,
mode=mode,
opportunistic_locking=opts.opportunistic_locking,
start=start,
os_type=os_type,
force_variant=force_variant,
src_node=src_node,
src_path=src_path,
compress=compress,
tags=tags,
no_install=no_install,
identify_defaults=identify_defaults,
ignore_ipolicy=opts.ignore_ipolicy,
instance_communication=instance_communication,
helper_startup_timeout=helper_startup_timeout,
helper_shutdown_timeout=helper_shutdown_timeout)
SubmitOrSend(op, opts)
return 0 | ValueError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/GenericInstanceCreate |
def GenerateTable(headers, fields, separator, data,
numfields=None, unitfields=None,
units=None):
"""Prints a table with headers and different fields.
@type headers: dict
@param headers: dictionary mapping field names to headers for
the table
@type fields: list
@param fields: the field names corresponding to each row in
the data field
@param separator: the separator to be used; if this is None,
the default 'smart' algorithm is used which computes optimal
field width, otherwise just the separator is used between
each field
@type data: list
@param data: a list of lists, each sublist being one row to be output
@type numfields: list
@param numfields: a list with the fields that hold numeric
values and thus should be right-aligned
@type unitfields: list
@param unitfields: a list with the fields that hold numeric
values that should be formatted with the units field
@type units: string or None
@param units: the units we should use for formatting, or None for
automatic choice (human-readable for non-separator usage, otherwise
megabytes); this is a one-letter string
"""
if units is None:
if separator:
units = "m"
else:
units = "h"
if numfields is None:
numfields = []
if unitfields is None:
unitfields = []
numfields = utils.FieldSet(*numfields) # pylint: disable=W0142
unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
format_fields = []
for field in fields:
if headers and field not in headers:
# TODO: handle better unknown fields (either revert to old
# style of raising exception, or deal more intelligently with
# variable fields)
headers[field] = field
if separator is not None:
format_fields.append("%s")
elif numfields.Matches(field):
format_fields.append("%*s")
else:
format_fields.append("%-*s")
if separator is None:
mlens = [0 for name in fields]
format_str = " ".join(format_fields)
else:
format_str = separator.replace("%", "%%").join(format_fields)
for row in data:
if row is None:
continue
for idx, val in enumerate(row):
if unitfields.Matches(fields[idx]):
try:
val = int(val)
except (TypeError, __HOLE__):
pass
else:
val = row[idx] = utils.FormatUnit(val, units)
val = row[idx] = str(val)
if separator is None:
mlens[idx] = max(mlens[idx], len(val))
result = []
if headers:
args = []
for idx, name in enumerate(fields):
hdr = headers[name]
if separator is None:
mlens[idx] = max(mlens[idx], len(hdr))
args.append(mlens[idx])
args.append(hdr)
result.append(format_str % tuple(args))
if separator is None:
assert len(mlens) == len(fields)
if fields and not numfields.Matches(fields[-1]):
mlens[-1] = 0
for line in data:
args = []
if line is None:
line = ["-" for _ in fields]
for idx in range(len(fields)):
if separator is None:
args.append(mlens[idx])
args.append(line[idx])
result.append(format_str % tuple(args))
return result | ValueError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/GenerateTable |
def FormatResultError(status, verbose):
"""Formats result status other than L{constants.RS_NORMAL}.
@param status: The result status
@type verbose: boolean
@param verbose: Whether to return the verbose text
@return: Text of result status
"""
assert status != constants.RS_NORMAL, \
"FormatResultError called with status equal to constants.RS_NORMAL"
try:
(verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
except __HOLE__:
raise NotImplementedError("Unknown status %s" % status)
else:
if verbose:
return verbose_text
return normal_text | KeyError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/FormatResultError |
def ParseTimespec(value):
"""Parse a time specification.
The following suffixed will be recognized:
- s: seconds
- m: minutes
- h: hours
- d: day
- w: weeks
Without any suffix, the value will be taken to be in seconds.
"""
value = str(value)
if not value:
raise errors.OpPrereqError("Empty time specification passed",
errors.ECODE_INVAL)
suffix_map = {
"s": 1,
"m": 60,
"h": 3600,
"d": 86400,
"w": 604800,
}
if value[-1] not in suffix_map:
try:
value = int(value)
except (__HOLE__, ValueError):
raise errors.OpPrereqError("Invalid time specification '%s'" % value,
errors.ECODE_INVAL)
else:
multiplier = suffix_map[value[-1]]
value = value[:-1]
if not value: # no data left after stripping the suffix
raise errors.OpPrereqError("Invalid time specification (only"
" suffix passed)", errors.ECODE_INVAL)
try:
value = int(value) * multiplier
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid time specification '%s'" % value,
errors.ECODE_INVAL)
return value | TypeError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/ParseTimespec |
def _ToStream(stream, txt, *args):
"""Write a message to a stream, bypassing the logging system
@type stream: file object
@param stream: the file to which we should write
@type txt: str
@param txt: the message
"""
try:
if args:
args = tuple(args)
stream.write(txt % args)
else:
stream.write(txt)
stream.write("\n")
stream.flush()
except __HOLE__, err:
if err.errno == errno.EPIPE:
# our terminal went away, we'll exit
sys.exit(constants.EXIT_FAILURE)
else:
raise | IOError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/_ToStream |
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
ispecs_disk_count, ispecs_disk_size,
ispecs_nic_count, group_ipolicy, fill_all):
try:
if ispecs_mem_size:
ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
if ispecs_disk_size:
ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
except (__HOLE__, ValueError, errors.UnitParseError), err:
raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
" in policy: %s" %
(ispecs_disk_size, ispecs_mem_size, err),
errors.ECODE_INVAL)
# prepare ipolicy dict
ispecs_transposed = {
constants.ISPEC_MEM_SIZE: ispecs_mem_size,
constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
constants.ISPEC_DISK_COUNT: ispecs_disk_count,
constants.ISPEC_DISK_SIZE: ispecs_disk_size,
constants.ISPEC_NIC_COUNT: ispecs_nic_count,
}
# first, check that the values given are correct
if group_ipolicy:
forced_type = TISPECS_GROUP_TYPES
else:
forced_type = TISPECS_CLUSTER_TYPES
for specs in ispecs_transposed.values():
assert type(specs) is dict
utils.ForceDictType(specs, forced_type)
# then transpose
ispecs = {
constants.ISPECS_MIN: {},
constants.ISPECS_MAX: {},
constants.ISPECS_STD: {},
}
for (name, specs) in ispecs_transposed.iteritems():
assert name in constants.ISPECS_PARAMETERS
for key, val in specs.items(): # {min: .. ,max: .., std: ..}
assert key in ispecs
ispecs[key][name] = val
minmax_out = {}
for key in constants.ISPECS_MINMAX_KEYS:
if fill_all:
minmax_out[key] = \
objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
else:
minmax_out[key] = ispecs[key]
ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
if fill_all:
ipolicy[constants.ISPECS_STD] = \
objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
ispecs[constants.ISPECS_STD])
else:
ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD] | TypeError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/_InitISpecsFromSplitOpts |
def _ParseSpecUnit(spec, keyname):
ret = spec.copy()
for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
if k in ret:
try:
ret[k] = utils.ParseUnit(ret[k])
except (__HOLE__, ValueError, errors.UnitParseError), err:
raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
" specs: %s" % (k, ret[k], keyname, err)),
errors.ECODE_INVAL)
return ret | TypeError | dataset/ETHPy150Open ganeti/ganeti/lib/cli.py/_ParseSpecUnit |
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if com.is_float_dtype(values.dtype):
values = com._ensure_float64(values)
elif com.is_integer_dtype(values.dtype):
values = com._ensure_float64(values)
elif com.needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = com._ensure_float64(values)
except (__HOLE__, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values | ValueError | dataset/ETHPy150Open pydata/pandas/pandas/core/window.py/_Window._prep_values |
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif com.is_integer(window):
try:
import scipy.signal as sig
except __HOLE__:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window)) | ImportError | dataset/ETHPy150Open pydata/pandas/pandas/core/window.py/Window.validate |
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except __HOLE__:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return algos.roll_window(np.concatenate((arg, additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/window.py/Window._apply_window |
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except __HOLE__:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
if not hasattr(algos, func):
raise ValueError("we do not support this function "
"algos.{0}".format(func))
cfunc = getattr(algos, func)
def func(arg, window, min_periods=None):
minp = check_minp(min_periods, window)
# GH #12373: rolling functions error on float32 data
return cfunc(com._ensure_float64(arg),
window, minp, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods)
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/window.py/_Rolling._apply |
def count(self):
obj = self._convert_freq()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
blocks, obj = self._create_blocks(how=None)
results = []
for b in blocks:
if com.needs_i8_conversion(b.values):
result = b.notnull().astype(int)
else:
try:
result = np.isfinite(b).astype(float)
except __HOLE__:
result = np.isfinite(b.astype(float)).astype(float)
result[pd.isnull(result)] = 0
result = self._constructor(result, window=window, min_periods=0,
center=self.center).sum()
results.append(result)
return self._wrap_results(results, blocks, obj) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/window.py/_Rolling_and_Expanding.count |
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None (DEPRECATED)
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except __HOLE__:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
if not hasattr(algos, func):
raise ValueError("we do not support this function "
"algos.{0}".format(func))
cfunc = getattr(algos, func)
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/window.py/EWM._apply |
def render(input, saltenv='base', sls='', argline='', **kws):
gen_start_state = False
no_goal_state = False
implicit_require = False
def process_sls_data(data, context=None, extract=False):
sls_dir = ospath.dirname(sls.replace('.', ospath.sep)) if '.' in sls else sls
ctx = dict(sls_dir=sls_dir if sls_dir else '.')
if context:
ctx.update(context)
tmplout = render_template(
StringIO(data), saltenv, sls, context=ctx,
argline=rt_argline.strip(), **kws
)
high = render_data(tmplout, saltenv, sls, argline=rd_argline.strip())
return process_high_data(high, extract)
def process_high_data(high, extract):
# make a copy so that the original, un-preprocessed highstate data
# structure can be used later for error checking if anything goes
# wrong during the preprocessing.
data = copy.deepcopy(high)
try:
rewrite_single_shorthand_state_decl(data)
rewrite_sls_includes_excludes(data, sls, saltenv)
if not extract and implicit_require:
sid = has_names_decls(data)
if sid:
raise SaltRenderError(
'\'names\' declaration(found in state id: {0}) is '
'not supported with implicitly ordered states! You '
'should generate the states in a template for-loop '
'instead.'.format(sid)
)
add_implicit_requires(data)
if gen_start_state:
add_start_state(data, sls)
if not extract and not no_goal_state:
add_goal_state(data)
rename_state_ids(data, sls)
# We must extract no matter what so extending a stateconf sls file
# works!
extract_state_confs(data)
except SaltRenderError:
raise
except Exception as err:
log.exception(
'Error found while pre-processing the salt file '
'{0}:\n{1}'.format(sls, err)
)
from salt.state import State
state = State(__opts__)
errors = state.verify_high(high)
if errors:
raise SaltRenderError('\n'.join(errors))
raise SaltRenderError('sls preprocessing/rendering failed!')
return data
# ----------------------
renderers = kws['renderers']
opts, args = getopt.getopt(argline.split(), 'Gosp')
argline = ' '.join(args) if args else 'yaml . jinja'
if ('-G', '') in opts:
no_goal_state = True
if ('-o', '') in opts:
implicit_require = True
if ('-s', '') in opts:
gen_start_state = True
if ('-p', '') in opts:
data = process_high_data(input, extract=False)
else:
# Split on the first dot surrounded by spaces but not preceded by a
# backslash. A backslash preceded dot will be replaced with just dot.
args = [
arg.strip().replace('\\.', '.')
for arg in re.split(r'\s+(?<!\\)\.\s+', argline, 1)
]
try:
name, rd_argline = (args[0] + ' ').split(' ', 1)
render_data = renderers[name] # e.g., the yaml renderer
if implicit_require:
if name == 'yaml':
rd_argline = '-o ' + rd_argline
else:
raise SaltRenderError(
'Implicit ordering is only supported if the yaml renderer '
'is used!'
)
name, rt_argline = (args[1] + ' ').split(' ', 1)
render_template = renderers[name] # e.g., the mako renderer
except __HOLE__ as err:
raise SaltRenderError('Renderer: {0} is not available!'.format(err))
except IndexError:
raise INVALID_USAGE_ERROR
if isinstance(input, six.string_types):
with salt.utils.fopen(input, 'r') as ifile:
sls_templ = ifile.read()
else: # assume file-like
sls_templ = input.read()
# first pass to extract the state configuration
match = re.search(__opts__['stateconf_end_marker'], sls_templ)
if match:
process_sls_data(sls_templ[:match.start()], extract=True)
# if some config has been extracted then remove the sls-name prefix
# of the keys in the extracted stateconf.set context to make them easier
# to use in the salt file.
if STATE_CONF:
tmplctx = STATE_CONF.copy()
if tmplctx:
prefix = sls + '::'
for k in six.iterkeys(tmplctx): # iterate over a copy of keys
if k.startswith(prefix):
tmplctx[k[len(prefix):]] = tmplctx[k]
del tmplctx[k]
else:
tmplctx = {}
# do a second pass that provides the extracted conf as template context
data = process_sls_data(sls_templ, tmplctx)
if log.isEnabledFor(logging.DEBUG):
import pprint # FIXME: pprint OrderedDict
log.debug('Rendered sls: {0}'.format(pprint.pformat(data)))
return data | KeyError | dataset/ETHPy150Open saltstack/salt/salt/renderers/stateconf.py/render |
def add_implicit_requires(data):
def T(sid, state): # pylint: disable=C0103
return '{0}:{1}'.format(sid, state_name(state))
states_before = set()
states_after = set()
for sid in data:
for state in data[sid]:
states_after.add(T(sid, state))
prev_state = (None, None) # (state_name, sid)
for sid, states, sname, args in statelist(data):
if sid == 'extend':
for esid, _, _, eargs in statelist(states):
for _, rstate, rsid in nvlist2(eargs, REQUIRE):
EXTENDED_REQUIRE.setdefault(
T(esid, rstate), []).append((None, rstate, rsid))
for _, rstate, rsid in nvlist2(eargs, REQUIRE_IN):
EXTENDED_REQUIRE_IN.setdefault(
T(esid, rstate), []).append((None, rstate, rsid))
continue
tag = T(sid, sname)
states_after.remove(tag)
reqs = nvlist2(args, REQUIRE)
if tag in EXTENDED_REQUIRE:
reqs = chain(reqs, EXTENDED_REQUIRE[tag])
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_after:
raise SaltRenderError(
'State({0}) can\'t require/watch a state({1}) defined '
'after it!'.format(tag, T(rsid, rstate))
)
reqs = nvlist2(args, REQUIRE_IN)
if tag in EXTENDED_REQUIRE_IN:
reqs = chain(reqs, EXTENDED_REQUIRE_IN[tag])
for _, rstate, rsid in reqs:
if T(rsid, rstate) in states_before:
raise SaltRenderError(
'State({0}) can\'t require_in/watch_in a state({1}) '
'defined before it!'.format(tag, T(rsid, rstate))
)
# add a (- state: sid) item, at the beginning of the require of this
# state if there's a state before this one.
if prev_state[0] is not None:
try:
next(nvlist(args, ['require']))[2].insert(0, dict([prev_state]))
except __HOLE__: # i.e., there's no require
args.append(dict(require=[dict([prev_state])]))
states_before.add(tag)
prev_state = (state_name(sname), sid) | StopIteration | dataset/ETHPy150Open saltstack/salt/salt/renderers/stateconf.py/add_implicit_requires |
def get_version_delta_info(self, block_version):
if block_version.time == -1:
return None
try:
return self._dev_versions[block_version]
except __HOLE__:
pass
assert block_version.time is not None
try:
delta = self._store.get_delta_info(block_version)
if delta.tag == DEV:
try:
ndelta = self._restapi_manager.get_version_delta_info(block_version)
if delta != ndelta:
self._store.remove_dev_references(block_version)
self._store.upsert_delta_info(block_version, ndelta)
delta = ndelta
if ndelta.tag == DEV:
self._out.info("Dev version of %s has been updated"
% str(block_version))
except (ConnectionErrorException, NotFoundException) as e:
self._out.warn('You depend on DEV version "%s", but unable to '
'check updates in server: %s'
% (str(block_version), str(e)))
except NotInStoreException:
delta = self._restapi_manager.get_version_delta_info(block_version)
if delta.tag == DEV: # Ensure we delete the references we can have because they can be outdated
self._store.remove_dev_references(block_version)
self._store.upsert_delta_info(block_version, delta)
self._dev_versions[block_version] = delta
return delta | KeyError | dataset/ETHPy150Open biicode/client/api/biiapi_proxy.py/BiiAPIProxy.get_version_delta_info |
def qs_delete(self, *keys):
'''Delete value from QuerySet MultiDict'''
query = self.query.copy()
for key in set(keys):
try:
del query[key]
except __HOLE__:
pass
return self._copy(query=query) | KeyError | dataset/ETHPy150Open SmartTeleMax/iktomi/iktomi/web/url.py/URL.qs_delete |
def _get_range(self, dim, start, end):
"""Create a :class:`Range` for the :class:`Chart`.
Args:
dim (str): the name of the dimension, which is an attribute of the builder
start: the starting value of the range
end: the ending value of the range
Returns:
:class:`Range`
"""
dim_ref = getattr(self, dim)
values = dim_ref.data
dtype = dim_ref.dtype.name
sort = self.sort_dim.get(dim)
# object data or single value
if dtype == 'object':
factors = values.drop_duplicates()
if sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
factors.sort_values(inplace=True)
except __HOLE__:
factors.sort(inplace=True)
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=factors.tolist())
elif 'datetime' in dtype:
setattr(self, dim + 'scale', 'datetime')
return Range1d(start=start, end=end)
else:
if end == 'None' or (end - start) == 0:
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=['None'])
else:
diff = end - start
setattr(self, dim + 'scale', 'linear')
return Range1d(start=start - 0.1 * diff, end=end + 0.1 * diff) | AttributeError | dataset/ETHPy150Open bokeh/bokeh/bokeh/charts/builder.py/XYBuilder._get_range |
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rbU')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if sys.version_info >= (3,):
try:
line = line.decode('UTF-8')
except __HOLE__:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
# | UnicodeDecodeError | dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/distribute-0.6.34-py2.7.egg/setuptools/command/sdist.py/sdist.read_manifest |
def __init__(self, bulk_insert=1000, **kwargs):
try:
mysql = __import__('MySQLdb')
filterwarnings('ignore', category=mysql.Warning)
self.to_buffer = lambda buf: buf
except __HOLE__:
try:
mysql = __import__('pymysql')
self.to_buffer = str # pylint: disable=redefined-variable-type
except ImportError:
raise ImportError('The mysql-python or pymysql library '
'is required')
self.db = mysql.connect(**kwargs)
self.bulk_insert = bulk_insert
self.cursor = self.db.cursor() | ImportError | dataset/ETHPy150Open chriso/gauged/gauged/drivers/mysql.py/MySQLDriver.__init__ |
def preexec_setpgid_setrlimit(memory_limit):
if resource is not None:
def _preexec():
os.setpgid(0, 0)
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except __HOLE__:
pass # No permission
if memory_limit:
try:
(soft, hard) = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (min(soft, memory_limit),
min(hard, memory_limit)))
except ValueError:
pass # No permission
return _preexec | ValueError | dataset/ETHPy150Open jansel/opentuner/opentuner/measurement/interface.py/preexec_setpgid_setrlimit |
def goodwait(p):
"""
python doesn't check if its system calls return EINTR, retry if it does
"""
while True:
try:
rv = p.wait()
return rv
except __HOLE__, e:
if e.errno != errno.EINTR:
raise | OSError | dataset/ETHPy150Open jansel/opentuner/opentuner/measurement/interface.py/goodwait |
def optimizeTextures(mesh):
previous_images = []
for cimg in mesh.images:
previous_images.append(cimg.path)
pilimg = cimg.pilimage
#PIL doesn't support DDS, so if loading failed, try and load it as a DDS with panda3d
if pilimg is None:
imgdata = cimg.data
#if we can't even load the image's data, can't convert
if imgdata is None:
print >> sys.stderr, "Couldn't load image data"
continue
try:
from panda3d.core import Texture
from panda3d.core import StringStream
from panda3d.core import PNMImage
except ImportError:
#if panda3d isn't installed and PIL failed, can't convert
print >> sys.stderr, 'Tried loading image with PIL and DDS and both failed'
continue
t = Texture()
success = t.readDds(StringStream(imgdata))
if success == 0:
#failed to load as DDS, so let's give up
print >> sys.stderr, 'Tried loading image as DDS and failed'
continue
#convert DDS to PNG
outdata = t.getRamImageAs('RGB').getData()
try:
im = Image.fromstring('RGB', (t.getXSize(), t.getYSize()), outdata)
im.load()
except IOError:
#Any problem with panda3d might generate an invalid image buffer, so don't convert this
print >> sys.stderr, 'Problem loading DDS file with PIL'
continue
pilimg = im
if pilimg.format == 'JPEG':
#PIL image is already in JPG format so don't convert
continue
if 'A' in pilimg.getbands():
alpha = numpy.array(pilimg.split()[-1].getdata())
if not numpy.any(alpha < 255):
alpha = None
#this means that none of the pixels are using alpha, so convert to RGB
pilimg = pilimg.convert('RGB')
if 'A' in pilimg.getbands():
#save textures with an alpha channel in PNG
output_format = 'PNG'
output_extension = '.png'
output_options = {'optimize':True}
else:
if pilimg.format != 'RGB':
pilimg = pilimg.convert("RGB")
#otherwise save as JPEG since it gets
output_format = 'JPEG'
output_extension = '.jpg'
output_options = {'quality':95, 'optimize':True}
if cimg.path.lower()[-len(output_extension):] != output_extension:
dot = cimg.path.rfind('.')
before_ext = cimg.path[0:dot] if dot != -1 else cimg.path
while before_ext + output_extension in previous_images:
before_ext = before_ext + '-x'
cimg.path = before_ext + output_extension
previous_images.append(cimg.path)
outbuf = StringIO()
try:
pilimg.save(outbuf, output_format, **output_options)
except __HOLE__, ex:
print ex
cimg.data = outbuf.getvalue() | IOError | dataset/ETHPy150Open pycollada/meshtool/meshtool/filters/optimize_filters/optimize_textures.py/optimizeTextures |
def on_mouse_motion(self, x, y, dx, dy):
cx, cy = x//hw, y//hh
try:
if (self.play_field[cx, cy] or self.play_field[cx+1, cy] or
self.play_field[cx, cy+1] or self.play_field[cx+1, cy+1]):
self.show_highlight = False
return True
except __HOLE__:
self.show_highlight = False
return True
self.show_highlight = True
self.highlight.position = cx*hw, cy*hh
return True | KeyError | dataset/ETHPy150Open ardekantur/pyglet/contrib/spryte/dtd/dtd.py/Game.on_mouse_motion |
def body_complete(self):
"""
Called when the body of the message is complete
NOINDEX
"""
try:
self.body = _decode_encoded(self._data_obj.body,
self._encoding_type)
except __HOLE__:
# Screw handling it gracefully, this is the server's fault.
print 'Error decoding request, storing raw data in body instead'
self.body = self._data_obj.body | IOError | dataset/ETHPy150Open roglew/pappy-proxy/pappyproxy/http.py/HTTPMessage.body_complete |
@property
def saved(self):
"""
If the request is saved in the data file
:getter: Returns True if the request is saved in the data file
:type: Bool
"""
if self.reqid is None:
return False
try:
_ = int(self.reqid)
return True
except (__HOLE__, TypeError):
return False | ValueError | dataset/ETHPy150Open roglew/pappy-proxy/pappyproxy/http.py/Request.saved |
@defer.inlineCallbacks
def async_save(self, cust_dbpool=None, cust_cache=None):
"""
async_save()
Save/update the request in the data file. Returns a twisted deferred which
fires when the save is complete.
:rtype: twisted.internet.defer.Deferred
"""
from .pappy import main_context
global dbpool
if cust_dbpool:
use_dbpool = cust_dbpool
use_cache = cust_cache
else:
use_dbpool = dbpool
use_cache = Request.cache
assert(use_dbpool)
if not self.reqid:
self.reqid = '--'
try:
# Check for intyness
_ = int(self.reqid)
# If we have reqid, we're updating
yield use_dbpool.runInteraction(self._update)
assert(self.reqid is not None)
yield use_dbpool.runInteraction(self._update_tags)
except (__HOLE__, TypeError):
# Either no id or in-memory
yield use_dbpool.runInteraction(self._insert)
assert(self.reqid is not None)
yield use_dbpool.runInteraction(self._update_tags)
if use_cache:
use_cache.add(self)
main_context.cache_reset() | ValueError | dataset/ETHPy150Open roglew/pappy-proxy/pappyproxy/http.py/Request.async_save |
@staticmethod
@defer.inlineCallbacks
def load_request(to_load, allow_special=True, use_cache=True, cust_dbpool=None, cust_cache=None):
"""
load_request(to_load)
Load a request with the given request id and return it.
Returns a deferred which calls back with the request when complete.
:param allow_special: Whether to allow special IDs such as ``u##`` or ``s##``
:type allow_special: bool
:param use_cache: Whether to use the cache. If set to false, it will always query the data file to get the request
:type use_cache: bool
:rtype: twisted.internet.defer.Deferred
"""
global dbpool
if cust_dbpool:
use_dbpool = cust_dbpool
cache_to_use = cust_cache
else:
use_dbpool = dbpool
cache_to_use = Request.cache
if not use_dbpool:
raise PappyException('No database connection to load from')
if to_load == '--':
raise PappyException('Invalid request ID. Wait for it to save first.')
if not allow_special:
try:
int(to_load)
except (ValueError, __HOLE__):
raise PappyException('Cannot load special id %s' % to_load)
ret_unmangled = False
rsp_unmangled = False
if to_load[0] == 'u':
ret_unmangled = True
loadid = to_load[1:]
elif to_load[0] == 's':
rsp_unmangled = True
loadid = to_load[1:]
else:
loadid = to_load
def retreq(r):
if ret_unmangled:
if not r.unmangled:
raise PappyException("Request %s was not mangled"%r.reqid)
return r.unmangled
if rsp_unmangled:
if not r.response:
raise PappyException("Request %s does not have a response" % r.reqid)
if not r.response.unmangled:
raise PappyException("Response to request %s was not mangled" % r.reqid)
r.response = r.response.unmangled
return r
else:
return r
# Get it through the cache
if use_cache and cache_to_use:
# If it's not cached, load_request will be called again and be told
# not to use the cache.
r = yield cache_to_use.get(loadid)
defer.returnValue(retreq(r))
# Load it from the data file
rows = yield use_dbpool.runQuery(
"""
SELECT %s
FROM requests
WHERE id=?;
""" % Request._gen_sql_row(),
(loadid,)
)
if len(rows) != 1:
raise PappyException("Request with id %s does not exist" % loadid)
req = yield Request._from_sql_row(rows[0], cust_dbpool=cust_dbpool, cust_cache=cust_cache)
assert req.reqid == loadid
if cache_to_use:
cache_to_use.add(req)
defer.returnValue(retreq(req))
######################
## Submitting Requests | TypeError | dataset/ETHPy150Open roglew/pappy-proxy/pappyproxy/http.py/Request.load_request |
@defer.inlineCallbacks
def async_save(self, cust_dbpool=None, cust_cache=None):
"""
async_save()
Save/update the just request in the data file. Returns a twisted deferred which
fires when the save is complete. It is suggested that you use
:func: `~pappyproxy.http.Request.async_deep_save` instead to save responses.
:rtype: twisted.internet.defer.Deferred
"""
global dbpool
if cust_dbpool:
use_dbpool = cust_dbpool
else:
use_dbpool = dbpool
assert(use_dbpool)
try:
# Check for intyness
_ = int(self.rspid)
# If we have rspid, we're updating
yield use_dbpool.runInteraction(self._update)
except (ValueError, __HOLE__):
yield use_dbpool.runInteraction(self._insert)
assert(self.rspid is not None)
# Right now responses without requests are unviewable
# @crochet.wait_for(timeout=180.0)
# @defer.inlineCallbacks
# def save(self):
# yield self.save() | TypeError | dataset/ETHPy150Open roglew/pappy-proxy/pappyproxy/http.py/Response.async_save |
def run(self):
print 'JPGVisLoadingThread.run called'
while not self.is_timed_out():
with self.state.lock:
if self.state.quit:
break
#print 'JPGVisLoadingThread.run: caffe_net_state is:', self.state.caffe_net_state
#print 'JPGVisLoadingThread.run loop: next_frame: %s, caffe_net_state: %s, back_enabled: %s' % (
# 'None' if self.state.next_frame is None else 'Avail',
# self.state.caffe_net_state,
# self.state.back_enabled)
jpgvis_to_load_key = self.state.jpgvis_to_load_key
if jpgvis_to_load_key is None:
time.sleep(self.loop_sleep)
continue
state_layer, state_selected_unit, data_shape = jpgvis_to_load_key
# Load three images:
images = [None] * 3
# Resize each component images only using one direction as
# a constraint. This is straightforward but could be very
# wasteful (making an image much larger then much smaller)
# if the proportions of the stacked image are very
# different from the proportions of the data pane.
#resize_shape = (None, data_shape[1]) if self.settings.caffevis_jpgvis_stack_vert else (data_shape[0], None)
# As a heuristic, instead just assume the three images are of the same shape.
if self.settings.caffevis_jpgvis_stack_vert:
resize_shape = (data_shape[0]/3, data_shape[1])
else:
resize_shape = (data_shape[0], data_shape[1]/3)
# 0. e.g. regularized_opt/conv1/conv1_0037_montage.jpg
jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir,
'regularized_opt',
state_layer,
'%s_%04d_montage.jpg' % (state_layer, state_selected_unit))
try:
img = caffe_load_image(jpg_path, color = True)
img_corner = crop_to_corner(img, 2)
images[0] = ensure_uint255_and_resize_to_fit(img_corner, resize_shape)
except IOError:
print '\nAttempted to load file %s but failed. To supress this warning, remove layer "%s" from settings.caffevis_jpgvis_layers' % (jpg_path, state_layer)
pass
# 1. e.g. max_im/conv1/conv1_0037.jpg
jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir,
'max_im',
state_layer,
'%s_%04d.jpg' % (state_layer, state_selected_unit))
try:
img = caffe_load_image(jpg_path, color = True)
images[1] = ensure_uint255_and_resize_to_fit(img, resize_shape)
except IOError:
pass
# 2. e.g. max_deconv/conv1/conv1_0037.jpg
try:
jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir,
'max_deconv',
state_layer,
'%s_%04d.jpg' % (state_layer, state_selected_unit))
img = caffe_load_image(jpg_path, color = True)
images[2] = ensure_uint255_and_resize_to_fit(img, resize_shape)
except __HOLE__:
pass
# Prune images that were not found:
images = [im for im in images if im is not None]
# Stack together
if len(images) > 0:
#print 'Stacking:', [im.shape for im in images]
stack_axis = 0 if self.settings.caffevis_jpgvis_stack_vert else 1
img_stacked = np.concatenate(images, axis = stack_axis)
#print 'Stacked:', img_stacked.shape
img_resize = ensure_uint255_and_resize_to_fit(img_stacked, data_shape)
#print 'Resized:', img_resize.shape
else:
img_resize = np.zeros(shape=(0,)) # Sentinal value when image is not found.
self.cache.set(jpgvis_to_load_key, img_resize)
with self.state.lock:
self.state.jpgvis_to_load_key = None
self.state.drawing_stale = True
print 'JPGVisLoadingThread.run: finished' | IOError | dataset/ETHPy150Open yosinski/deep-visualization-toolbox/caffevis/jpg_vis_loading_thread.py/JPGVisLoadingThread.run |
def curve_to_lut(colorspace, gamma, outlutfile, out_type=None, out_format=None,
input_range=None, output_range=None, out_bit_depth=None,
out_cube_size=None, verbose=False, direction=Direction.ENCODE,
preset=None, overwrite_preset=False,
process_input_range=False):
"""Export a LUT from a colorspace gradation function
Args:
colorspace (str): input colorspace. Mutually exclusive with gamma.
See list of colorspaces in utils.colorspaces
gamma (float): input gamma. Mutually exclusive with colorspace.
out_type (str): 1D, 2D or 3D
out_format (str): '3dl', 'csp', 'cube', 'lut', 'spi', 'clcc', 'json'...
outlutfile (str): path to output LUT
Kwargs:
input_range ([int/float, int/float]): input range.
Ex: [0.0, 1.0] or [0, 4095]
output_range ([int/float, int/float]): output range.
Ex: [0.0, 1.0] or [0, 4095]
out_bit_depth (int): output lut bit precision (1D only).
Ex : 10, 16, 32.
out_cube_size (int): output cube size (3D only). Ex : 17, 32.
verbose (bool): print log if true
direction (Direction): encode or decode
preset (dict): lut generic and sampling informations
process_input_range (bool): If true, input range will be computed from
colorspace gradation functions. Colorspace only"
"""
# get colorspace function
if colorspace is None and gamma is None:
raise AttributeError("A colorspace or a gamma should be specified")
if colorspace is not None and gamma is not None:
raise AttributeError("Choose between a colorspace or a gamma")
elif gamma is not None:
# gamma mode
if direction == Direction.DECODE:
gradation = lambda value: gamma_to_lin(value, gamma)
title = "Gamma{0}_to_lin".format(gamma)
else:
gradation = lambda value: lin_to_gamma(value, gamma)
title = "Lin_to_gamma{0}".format(gamma)
else:
# colorspace mode
try:
colorspace_obj = dict(COLORSPACES.items() +
PRIVATE_COLORSPACES.items())[colorspace]
except __HOLE__:
raise CurveToLUTException(("Unsupported {0} "
"Colorspace!").format(colorspace))
if direction == Direction.DECODE:
gradation = colorspace_obj.decode_gradation
title = "{0}_to_lin".format(colorspace)
else:
gradation = colorspace_obj.encode_gradation
title = "Lin_to_{0}".format(colorspace)
# get preset and write function
if preset:
write_function = get_write_function(preset, overwrite_preset,
out_type, out_format,
input_range,
output_range,
out_bit_depth,
out_cube_size,
verbose)
elif out_type is None or out_format is None:
raise CurveToLUTException("Specify out_type/out_format or a preset.")
else:
preset, write_function = get_preset_and_write_function(out_type,
out_format,
input_range,
output_range,
out_bit_depth,
out_cube_size)
if preset[presets.TYPE] == '3D':
print_warning_message(("Gradations and gamma functions are 1D / 2D"
" transformations. Baking them in a 3D LUT "
"may not be efficient. Are you sure ?"))
# process file output
if os.path.isdir(outlutfile):
filename = "{0}{1}".format(title,
preset[presets.EXT])
outlutfile = os.path.join(outlutfile, filename)
else:
try:
check_extension(outlutfile, preset[presets.EXT])
outlutfile = outlutfile
except LUTException as error:
raise CurveToLUTException(("Directory doesn't exist "
"or {0}").format(error))
preset[presets.TITLE] = title
if process_input_range:
if colorspace:
preset[presets.IN_RANGE] = get_input_range(colorspace_obj,
direction,
8)
else:
raise CurveToLUTException(("--process-input-range must be used"
" with --colorspace."))
if verbose:
print "{0} will be written in {1}.".format(title, outlutfile)
print "Final setting:\n{0}".format(presets.string_preset(preset))
# write
message = write_function(gradation, outlutfile, preset)
if verbose:
print_success_message(message) | KeyError | dataset/ETHPy150Open mikrosimage/ColorPipe-tools/lutLab/curve_to_lut.py/curve_to_lut |
@classmethod
def from_uri(cls, uri):
# TODO(termie): very simplistic
#m = cls._re_jid.search(uri)
node, rest = uri.split('@', 1)
try:
host, rest = rest.split('/', 1)
resource = '/' + rest
except __HOLE__:
host = rest
resource = '/'
return cls(node, host, resource) | ValueError | dataset/ETHPy150Open CollabQ/CollabQ/common/protocol/xmpp.py/JID.from_uri |
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except __HOLE__:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction | IndexError | dataset/ETHPy150Open IEEERobotics/bot/bot/hardware/complex_hardware/transformations.py/scale_from_matrix |
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, __HOLE__):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M | KeyError | dataset/ETHPy150Open IEEERobotics/bot/bot/hardware/complex_hardware/transformations.py/euler_matrix |
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (__HOLE__, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az | AttributeError | dataset/ETHPy150Open IEEERobotics/bot/bot/hardware/complex_hardware/transformations.py/euler_from_matrix |
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (__HOLE__, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q | AttributeError | dataset/ETHPy150Open IEEERobotics/bot/bot/hardware/complex_hardware/transformations.py/quaternion_from_euler |
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except __HOLE__:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True | ImportError | dataset/ETHPy150Open IEEERobotics/bot/bot/hardware/complex_hardware/transformations.py/_import_module |
def safe_finish(self):
"""Finish session. If it will blow up - connection was set to Keep-Alive and
client dropped connection, ignore any IOError or socket error."""
try:
self.finish()
except (socket.error, __HOLE__):
# We don't want to raise IOError exception if finish() call fails.
# It can happen if connection is set to Keep-Alive, but client
# closes connection after receiving response.
logging.debug('Ignoring IOError in safe_finish()')
pass | IOError | dataset/ETHPy150Open benoitc/gaffer/gaffer/sockjs/basehandler.py/BaseHandler.safe_finish |
def oauth2_callback(request):
""" View that handles the user's return from OAuth2 provider.
This view verifies the CSRF state and OAuth authorization code, and on
success stores the credentials obtained in the storage provider,
and redirects to the return_url specified in the authorize view and
stored in the session.
:param request: Django request
:return: A redirect response back to the return_url
"""
if 'error' in request.GET:
reason = request.GET.get(
'error_description', request.GET.get('error', ''))
return http.HttpResponseBadRequest(
'Authorization failed %s' % reason)
try:
encoded_state = request.GET['state']
code = request.GET['code']
except __HOLE__:
return http.HttpResponseBadRequest(
"Request missing state or authorization code")
try:
server_csrf = request.session[_CSRF_KEY]
except KeyError:
return http.HttpResponseBadRequest("No existing session for this flow.")
try:
state = json.loads(encoded_state)
client_csrf = state['csrf_token']
return_url = state['return_url']
except (ValueError, KeyError):
return http.HttpResponseBadRequest('Invalid state parameter.')
if client_csrf != server_csrf:
return http.HttpResponseBadRequest('Invalid CSRF token.')
flow = _get_flow_for_token(client_csrf, request)
if not flow:
return http.HttpResponseBadRequest("Missing Oauth2 flow.")
try:
credentials = flow.step2_exchange(code)
except client.FlowExchangeError as exchange_error:
return http.HttpResponseBadRequest(
"An error has occurred: {0}".format(exchange_error))
storage.get_storage(request).put(credentials)
signals.oauth2_authorized.send(sender=signals.oauth2_authorized,
request=request, credentials=credentials)
return shortcuts.redirect(return_url) | KeyError | dataset/ETHPy150Open google/oauth2client/oauth2client/contrib/django_util/views.py/oauth2_callback |
def _command(self, name, *args):
if self.state not in Commands[name]:
self.literal = None
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(name, self.state,
', '.join(Commands[name])))
for typ in ('OK', 'NO', 'BAD'):
if typ in self.untagged_responses:
del self.untagged_responses[typ]
if 'READ-ONLY' in self.untagged_responses \
and not self.is_readonly:
raise self.readonly('mailbox status changed to READ-ONLY')
tag = self._new_tag()
data = '%s %s' % (tag, name)
for arg in args:
if arg is None: continue
data = '%s %s' % (data, self._checkquote(arg))
literal = self.literal
if literal is not None:
self.literal = None
if type(literal) is type(self._command):
literator = literal
else:
literator = None
data = '%s {%s}' % (data, len(literal))
if __debug__:
if self.debug >= 4:
self._mesg('> %s' % data)
else:
self._log('> %s' % data)
try:
self.send('%s%s' % (data, CRLF))
except (socket.error, OSError), val:
raise self.abort('socket error: %s' % val)
if literal is None:
return tag
while 1:
# Wait for continuation response
while self._get_response():
if self.tagged_commands[tag]: # BAD/NO?
return tag
# Send literal
if literator:
literal = literator(self.continuation_response)
if __debug__:
if self.debug >= 4:
self._mesg('write literal size %s' % len(literal))
try:
self.send(literal)
self.send(CRLF)
except (socket.error, __HOLE__), val:
raise self.abort('socket error: %s' % val)
if not literator:
break
return tag | OSError | dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/imaplib.py/IMAP4._command |
def on_done(self, s):
self._restore_sel()
try:
SearchImpl(self.view, s, start_sel=self.original_sel).search()
ex_commands.VintageExState.search_buffer_type = 'pattern_search'
except __HOLE__, e:
if 'parsing' in str(e):
print "VintageEx: Regex parsing error. Incomplete pattern: %s" % s
else:
raise e
self.original_sel = None
self._restore_sel() | RuntimeError | dataset/ETHPy150Open SublimeText/VintageEx/ex_search_cmd.py/ViSearch.on_done |
def on_change(self, s):
if s in ("/", "?"):
return
self._restore_sel()
try:
SearchImpl(self.view, s, remember=False,
start_sel=self.original_sel).search()
except __HOLE__, e:
if 'parsing' in str(e):
print "VintageEx: Regex parsing error. Expected error."
else:
raise e | RuntimeError | dataset/ETHPy150Open SublimeText/VintageEx/ex_search_cmd.py/ViSearch.on_change |
def _handle_callback(self, message):
if self.host_id == message.get('host_id'):
try:
sid = message['sid']
namespace = message['namespace']
id = message['id']
args = message['args']
except __HOLE__:
return
self.trigger_callback(sid, namespace, id, args) | KeyError | dataset/ETHPy150Open miguelgrinberg/python-socketio/socketio/pubsub_manager.py/PubSubManager._handle_callback |
def set(self, key, value):
key = make_md5(make_hashable(key))
self.cache[key] = value
try:
self.keys.remove(key)
except __HOLE__:
pass
self.keys.append(key)
# limit cache to the given capacity
to_delete = self.keys[0:max(0, len(self.keys)-self.capacity)]
self.keys = self.keys[len(to_delete):]
for item in to_delete:
del self.cache[item] | ValueError | dataset/ETHPy150Open miracle2k/webassets/src/webassets/cache.py/MemoryCache.set |
def get(self, key):
filename = path.join(self.directory, '%s' % make_md5(self.V, key))
try:
f = open(filename, 'rb')
except __HOLE__ as e:
if e.errno != errno.ENOENT:
raise
return None
try:
result = f.read()
finally:
f.close()
unpickled = safe_unpickle(result)
if unpickled is None:
warnings.warn('Ignoring corrupted cache file %s' % filename)
return unpickled | IOError | dataset/ETHPy150Open miracle2k/webassets/src/webassets/cache.py/FilesystemCache.get |
def fetch_command(self, global_options, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"uliweb") if it can't be found.
"""
commands = self.get_commands(global_options)
try:
klass = commands[subcommand]
except __HOLE__:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\nMany commands will only run at project directory, maybe the directory is not right.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
return klass | KeyError | dataset/ETHPy150Open limodou/uliweb/uliweb/core/commands.py/CommandManager.fetch_command |
def do_command(self, args, global_options):
try:
subcommand = args[0]
except __HOLE__:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) > 1:
command = self.fetch_command(global_options, args[1])
if issubclass(command, CommandManager):
cmd = command(['help'], None, '%s %s' % (self.prog_name, args[1]), global_options=global_options)
cmd.execute()
else:
command().print_help(self.prog_name, args[1])
sys.exit(0)
else:
self.print_help(global_options)
if global_options.help:
self.print_help(global_options)
else:
command = self.fetch_command(global_options, subcommand)
if issubclass(command, CommandManager):
cmd = command(args[1:], None, '%s %s' % (self.prog_name, subcommand), global_options=global_options)
cmd.execute()
else:
cmd = command()
cmd.run_from_argv(self.prog_name, subcommand, global_options, args[1:]) | IndexError | dataset/ETHPy150Open limodou/uliweb/uliweb/core/commands.py/CommandManager.do_command |
def __read_id(self):
'''
Read the next ID number and do the appropriate task with it.
Returns nothing.
'''
try:
# float32 -- ID of the first data sequence
objid = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]
except __HOLE__:
# if we have a previous segment, save it
self.__save_segment()
# if there are no more Segments, return
return False
if objid == -2:
self.__read_condition()
elif objid == -1:
self.__read_segment()
else:
self.__spiketimes.append(objid)
return True | IndexError | dataset/ETHPy150Open NeuralEnsemble/python-neo/neo/io/brainwaref32io.py/BrainwareF32IO.__read_id |
def RunTest():
try:
arcpy.AddMessage("Starting Test: TestLocalPeaks")
#TEST_IMPLEMENTED = False
#
#if not TEST_IMPLEMENTED :
# arcpy.AddWarning("***Test Not Yet Implemented***")
# return
# TODO: once model has a version that works with local surface data
# (rather than image service), then finish this test/implementation below
#
# alternately you can add an image service connection in Catalog and
# fill in the parameter below
if arcpy.CheckExtension("Spatial") == "Available":
print("Checking out Spatial Analyst license...")
arcpy.CheckOutExtension("Spatial")
else:
# Raise a custom exception
raise Exception("LicenseError")
if arcpy.CheckExtension("3D") == "Available":
print("Checking out 3D Analyst license...")
arcpy.CheckOutExtension("3D")
else:
raise Exception("LicenseError")
try:
print("Getting Advanced license...")
import arcinfo
except __HOLE__:
print("Could not use ArcGIS Advanced license...")
raise Exception
arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = TestUtilities.scratchGDB
# WORKAROUND
print("Creating New Scratch Workspace (Workaround)")
TestUtilities.createScratch()
# Getting inputs
print("Getting inputs...")
print("inputPolygonFC...")
inputPolygonFC = os.path.join(TestUtilities.inputGDB, "samplePolygonArea")
print("inputSurface...")
inputSurface = TestUtilities.inputElevationURL
print("outputPointsFC")
outputPointsFC = os.path.join(TestUtilities.outputGDB, "LocalPeaks")
print("ImportToolbox--MAoT...")
arcpy.ImportToolbox(TestUtilities.toolbox, "MAoT")
# mf - these have been tested
# # Check For Valid Input
# print("Checking valid inputs...")
# objects2Check = []
# #objects2Check.extend([inputPolygonFC, inputSurface, toolbox])
# objects2Check.extend([inputPolygonFC, toolbox])
# for object2Check in objects2Check :
# desc = arcpy.Describe(object2Check)
# if desc == None :
# raise Exception("Bad Input")
# else :
# print("Valid Object: " + desc.Name)
# Set environment settings
print("Running from: " + str(TestUtilities.currentPath))
print("Geodatabase path: " + str(TestUtilities.geodatabasePath))
inputFeatureCount = int(arcpy.GetCount_management(inputPolygonFC).getOutput(0))
print("Input FeatureClass: " + str(inputPolygonFC))
print("Input Feature Count: " + str(inputFeatureCount))
# if (inputFeatureCount < 1):
# print("Invalid Input Polygon Feature Count: " + str(inputFeatureCount))
# Convert input elevation service to local dataset surface
print("Converting input image service into a local raster surface")
polygonExtent = arcpy.Describe(inputPolygonFC).extent
print("Using extent: " + str(polygonExtent))
cellSize = CellSize(inputPolygonFC)
print("Using cell size: " + str(cellSize))
localSurf = None
srWGS84 = arcpy.SpatialReference(4326) # GCS_WGS_1984
srWebMerc = arcpy.SpatialReference(3857) #Web_Mercator_Auxiliary_Sphere
print("Reworking inputs from image service...")
try:
tempClipExtent = os.path.join(TestUtilities.scratchGDB,"tempClipExtent")
localSurf = os.path.join(TestUtilities.scratchGDB,"localSurf")
print(" projecting input clip to WGS 1984 to match service...")
arcpy.Project_management(inputPolygonFC,tempClipExtent,srWGS84)
tempCellSize = CellSize(tempClipExtent)
#MakeImageServerLayer_management (in_image_service, out_imageserver_layer, {template},
#{band_index}, {mosaic_method}, {order_field},
#{order_base_value}, {lock_rasterid}, {cell_size},
#{where_clause}, {processing_template})
print(" getting image service layer with cell size " + str(tempCellSize) + "...")
arcpy.MakeImageServerLayer_management(inputSurface, "inputSurface", tempClipExtent,
"#", "#", "#",
"#", "#", tempCellSize)
print(" projecting image service layer to match target data...")
arcpy.ProjectRaster_management("inputSurface",localSurf,srWebMerc)
#arcpy.CopyRaster_management("inputSurface", localSurf)
except arcpy.ExecuteError:
print("Error converting image service...")
msgs = arcpy.GetMessages()
print(msgs)
sys.exit(-1)
numberOfPeaks = 3
########################################################
# Execute the Model under test:
arcpy.FindLocalPeaks_MAoT(inputPolygonFC, numberOfPeaks, localSurf, outputPointsFC)
########################################################
# Verify the results
outputFeatureCount = int(arcpy.GetCount_management(outputPointsFC).getOutput(0))
print("Output FeatureClass: " + str(outputPointsFC))
print("Output Feature Count: " + str(outputFeatureCount))
if (outputPointsFC < 3):
print("Invalid Output Feature Count: " + str(outputFeatureCount))
raise Exception("Test Failed")
# WORKAROUND: delete scratch db
print("Deleting Scratch Workspace (Workaround)")
TestUtilities.deleteScratch()
print("Test Successful")
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
except Exception as e:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# return a system error code
sys.exit(-1)
finally:
# Check in the 3D Analyst extension
arcpy.CheckInExtension("Spatial")
arcpy.CheckInExtension("3D")
# if arcpy.Exists(localSurf):
# arcpy.Delete_management(localSurf) | ImportError | dataset/ETHPy150Open Esri/solutions-geoprocessing-toolbox/suitability/test/test_maot/TestModelLocalPeaks.py/RunTest |
def _convert(self, n):
# n is the native node produced by the ast module
if n is None:
return None # but some node attributes can be None
assert isinstance(n, ast.AST)
# Get converter function
type = n.__class__.__name__
try:
converter = getattr(self, '_convert_' + type)
except __HOLE__: # pragma: no cover
raise RuntimeError('Cannot convert %s nodes.' % type)
# Convert node
val = converter(n)
assert isinstance(val, Node)
# Set its position
val.lineno = getattr(n, 'lineno', 1)
val.col_offset = getattr(n, 'col_offset', 0)
return val | AttributeError | dataset/ETHPy150Open zoofIO/flexx/flexx/pyscript/commonast.py/NativeAstConverter._convert |
def __get__(self, instance, model):
# override TaggableManager's requirement for instance to have a primary key
# before we can access its tags
try:
manager = _ClusterTaggableManager(
through=self.through, model=model, instance=instance, prefetch_cache_name=self.name
)
except __HOLE__: # fallback for django-taggit pre 0.11
manager = _ClusterTaggableManager(
through=self.through, model=model, instance=instance
)
return manager | TypeError | dataset/ETHPy150Open torchbox/django-modelcluster/modelcluster/contrib/taggit.py/ClusterTaggableManager.__get__ |